diff --git a/cv/classification/convnext/pytorch/CODE_OF_CONDUCT.md b/cv/classification/convnext/pytorch/CODE_OF_CONDUCT.md deleted file mode 100644 index 83f431e8feeb7e80d571f39c9f6c1b96857b5f85..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,80 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic -address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a -professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -This Code of Conduct also applies outside the project spaces when there is a -reasonable belief that an individual's behavior may have a negative impact on -the project or its community. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at . All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/cv/classification/convnext/pytorch/CONTRIBUTING.md b/cv/classification/convnext/pytorch/CONTRIBUTING.md deleted file mode 100644 index 38e6a78d9e94c0d1c73edd60f628c15659c6c8b4..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to ConvNeXt -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests -We actively welcome your pull requests. - -1. Fork the repo and create your branch from `main`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Meta's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe -disclosure of security bugs. In those cases, please go through the process -outlined on that page and do not file a public issue. - -## License -By contributing to ConvNeXt, you agree that your contributions will be licensed -under the LICENSE file in the root directory of this source tree. diff --git a/cv/classification/convnext/pytorch/INSTALL.md b/cv/classification/convnext/pytorch/INSTALL.md deleted file mode 100644 index c090eb611c97d0cc0c11e346686348ebf4e08202..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/INSTALL.md +++ /dev/null @@ -1,53 +0,0 @@ -# Installation - -We provide installation instructions for ImageNet classification experiments here. - -## Dependency Setup -Create an new conda virtual environment -``` -conda create -n convnext python=3.8 -y -conda activate convnext -``` - -Install [Pytorch](https://pytorch.org/)>=1.8.0, [torchvision](https://pytorch.org/vision/stable/index.html)>=0.9.0 following official instructions. For example: -``` -pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html -``` - -Clone this repo and install required packages: -``` -git clone https://github.com/facebookresearch/ConvNeXt -pip install timm==0.3.2 tensorboardX six -``` - -The results in the paper are produced with `torch==1.8.0+cu111 torchvision==0.9.0+cu111 timm==0.3.2`. - -## Dataset Preparation - -Download the [ImageNet-1K](http://image-net.org/) classification dataset and structure the data as follows: -``` -/path/to/imagenet-1k/ - train/ - class1/ - img1.jpeg - class2/ - img2.jpeg - val/ - class1/ - img3.jpeg - class2/ - img4.jpeg -``` - -For pre-training on [ImageNet-22K](http://image-net.org/), download the dataset and structure the data as follows: -``` -/path/to/imagenet-22k/ - class1/ - img1.jpeg - class2/ - img2.jpeg - class3/ - img3.jpeg - class4/ - img4.jpeg -``` diff --git a/cv/classification/convnext/pytorch/LICENSE b/cv/classification/convnext/pytorch/LICENSE deleted file mode 100644 index b93be90515ccd0b9daedaa589e42bf5929693f1f..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) Meta Platforms, Inc. and affiliates. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/cv/classification/convnext/pytorch/README.md b/cv/classification/convnext/pytorch/README.md index 6b4c56b5a84213d945557db5010ed40d83cc1cf8..0f9b1553d4829d066d1cfa737af56545d266d340 100644 --- a/cv/classification/convnext/pytorch/README.md +++ b/cv/classification/convnext/pytorch/README.md @@ -29,6 +29,9 @@ imagenet ## Step 2: Training ### Multiple GPUs on one machine ```bash +git clone https://github.com/facebookresearch/ConvNeXt.git +cd /path/to/ConvNeXt +git checkout 048efcea897d999aed302f2639b6270aedf8d4c8 python3 -m torch.distributed.launch --nproc_per_node=8 main.py \ --model convnext_tiny \ --drop_path 0.1 \ diff --git a/cv/classification/convnext/pytorch/TRAINING.md b/cv/classification/convnext/pytorch/TRAINING.md deleted file mode 100644 index 4abe34548c1f40ebe354737ae150b43223f04404..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/TRAINING.md +++ /dev/null @@ -1,543 +0,0 @@ -# Training - -We provide ImageNet-1K training, ImageNet-22K pre-training, and ImageNet-1K fine-tuning commands here. -Please check [INSTALL.md](INSTALL.md) for installation instructions first. - -## Multi-node Training -We use multi-node training on a SLURM cluster with [submitit](https://github.com/facebookincubator/submitit) for producing the results and models in the paper. Please install: -``` -pip install submitit -``` -We will give example commands for both multi-node and single-machine training below. - -## ImageNet-1K Training -ConvNeXt-T training on ImageNet-1K with 4 8-GPU nodes: -``` -python run_with_submitit.py --nodes 4 --ngpus 8 \ ---model convnext_tiny --drop_path 0.1 \ ---batch_size 128 --lr 4e-3 --update_freq 1 \ ---model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -- You may need to change cluster-specific arguments in `run_with_submitit.py`. -- You can add `--use_amp true` to train in PyTorch's Automatic Mixed Precision (AMP). -- Use `--resume /path_or_url/to/checkpoint.pth` to resume training from a previous checkpoint; use `--auto_resume true` to auto-resume from latest checkpoint in the specified output folder. -- `--batch_size`: batch size per GPU; `--update_freq`: gradient accumulation steps. -- The effective batch size = `--nodes` * `--ngpus` * `--batch_size` * `--update_freq`. In the example above, the effective batch size is `4*8*128*1 = 4096`. You can adjust these four arguments together to keep the effective batch size at 4096 and avoid OOM issues, based on the model size, number of nodes and GPU memory. - -You can use the following command to run this experiment on a single machine: -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_tiny --drop_path 0.1 \ ---batch_size 128 --lr 4e-3 --update_freq 4 \ ---model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k ---output_dir /path/to/save_results -``` - -- Here, the effective batch size = `--nproc_per_node` * `--batch_size` * `--update_freq`. In the example above, the effective batch size is `8*128*4 = 4096`. Running on one machine, we increased `update_freq` so that the total batch size is unchanged. - -To train other ConvNeXt variants, `--model` and `--drop_path` need to be changed. Examples are given below, each with both multi-node and single-machine commands: - -
- -ConvNeXt-S - - -Multi-node -``` -python run_with_submitit.py --nodes 4 --ngpus 8 \ ---model convnext_small --drop_path 0.4 \ ---batch_size 128 --lr 4e-3 --update_freq 1 \ ---model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_small --drop_path 0.4 \ ---batch_size 128 --lr 4e-3 --update_freq 4 \ ---model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` -
-
- -ConvNeXt-B - - -Multi-node -``` -python run_with_submitit.py --nodes 4 --ngpus 8 \ ---model convnext_base --drop_path 0.5 \ ---batch_size 128 --lr 4e-3 --update_freq 1 \ ---model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_base --drop_path 0.5 \ ---batch_size 128 --lr 4e-3 --update_freq 4 \ ---model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
-
- -ConvNeXt-L - - -Multi-node -``` -python run_with_submitit.py --nodes 8 --ngpus 8 \ ---model convnext_large --drop_path 0.5 \ ---batch_size 64 --lr 4e-3 --update_freq 1 \ ---model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_large --drop_path 0.5 \ ---batch_size 64 --lr 4e-3 --update_freq 8 \ ---model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -
- -ConvNeXt-S (isotropic) - - -Multi-node -``` -python run_with_submitit.py --nodes 4 --ngpus 8 \ ---model convnext_isotropic_small --drop_path 0.1 \ ---batch_size 128 --lr 4e-3 --update_freq 1 \ ---layer_scale_init_value 0 \ ---warmup_epochs 50 --model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_isotropic_small --drop_path 0.1 \ ---batch_size 128 --lr 4e-3 --update_freq 4 \ ---layer_scale_init_value 0 \ ---warmup_epochs 50 --model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -
- -ConvNeXt-B (isotropic) - - -Multi-node -``` -python run_with_submitit.py --nodes 4 --ngpus 8 \ ---model convnext_isotropic_base --drop_path 0.2 \ ---batch_size 128 --lr 4e-3 --update_freq 1 \ ---layer_scale_init_value 0 \ ---warmup_epochs 50 --model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_isotropic_base --drop_path 0.2 \ ---batch_size 128 --lr 4e-3 --update_freq 4 \ ---layer_scale_init_value 0 \ ---warmup_epochs 50 --model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -
- -ConvNeXt-L (isotropic) - - -Multi-node -``` -python run_with_submitit.py --nodes 8 --ngpus 8 \ ---model convnext_isotropic_large --drop_path 0.5 \ ---batch_size 64 --lr 4e-3 --update_freq 1 \ ---warmup_epochs 50 --model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_isotropic_large --drop_path 0.5 \ ---batch_size 64 --lr 4e-3 --update_freq 8 \ ---warmup_epochs 50 --model_ema true --model_ema_eval true \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -## ImageNet-22K Pre-training -ImageNet-22K is significantly larger than ImageNet-1K in terms of data size, so we use 16 8-GPU nodes for pre-training on ImageNet-22K. - -ConvNeXt-B pre-training on ImageNet-22K: - -Multi-node -``` -python run_with_submitit.py --nodes 16 --ngpus 8 \ ---model convnext_base --drop_path 0.1 \ ---batch_size 32 --lr 4e-3 --update_freq 1 \ ---warmup_epochs 5 --epochs 90 \ ---data_set image_folder --nb_classes 21841 --disable_eval true \ ---data_path /path/to/imagenet-22k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_base --drop_path 0.1 \ ---batch_size 32 --lr 4e-3 --update_freq 16 \ ---warmup_epochs 5 --epochs 90 \ ---data_set image_folder --nb_classes 21841 --disable_eval true \ ---data_path /path/to/imagenet-22k \ ---output_dir /path/to/save_results -``` - -
- -ConvNeXt-L - - -Multi-node -``` -python run_with_submitit.py --nodes 16 --ngpus 8 \ ---model convnext_large --drop_path 0.1 \ ---batch_size 32 --lr 4e-3 --update_freq 1 \ ---warmup_epochs 5 --epochs 90 \ ---data_set image_folder --nb_classes 21841 --disable_eval true \ ---data_path /path/to/imagenet-22k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_large --drop_path 0.1 \ ---batch_size 32 --lr 4e-3 --update_freq 16 \ ---warmup_epochs 5 --epochs 90 \ ---data_set image_folder --nb_classes 21841 --disable_eval true \ ---data_path /path/to/imagenet-22k \ ---output_dir /path/to/save_results -``` - -
- -
- -ConvNeXt-XL - - -Multi-node -``` -python run_with_submitit.py --nodes 16 --ngpus 8 \ ---model convnext_xlarge --drop_path 0.2 \ ---batch_size 32 --lr 4e-3 --update_freq 1 \ ---warmup_epochs 5 --epochs 90 \ ---data_set image_folder --nb_classes 21841 --disable_eval true \ ---data_path /path/to/imagenet-22k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_xlarge --drop_path 0.2 \ ---batch_size 32 --lr 4e-3 --update_freq 16 \ ---warmup_epochs 5 --epochs 90 \ ---data_set image_folder --nb_classes 21841 --disable_eval true \ ---data_path /path/to/imagenet-22k \ ---output_dir /path/to/save_results -``` - -
- - -## ImageNet-1K Fine-tuning -### Finetune from ImageNet-1K pre-training -The training commands given above for ImageNet-1K use the default resolution (224). We also fine-tune these trained models with a larger resolution (384). Please specify the path or url to the checkpoint in `--finetune`. - -ConvNeXt-B fine-tuning on ImageNet-1K (384x384): - -Multi-node -``` -python run_with_submitit.py --nodes 2 --ngpus 8 \ ---model convnext_base --drop_path 0.8 --input_size 384 \ ---batch_size 32 --lr 5e-5 --update_freq 1 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.7 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_base --drop_path 0.8 --input_size 384 \ ---batch_size 32 --lr 5e-5 --update_freq 2 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.7 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -ConvNeXt-L (384x384) - - -Multi-node -``` -python run_with_submitit.py --nodes 2 --ngpus 8 \ ---model convnext_large --drop_path 0.95 --input_size 384 \ ---batch_size 32 --lr 5e-5 --update_freq 1 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.7 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_large --drop_path 0.95 --input_size 384 \ ---batch_size 32 --lr 5e-5 --update_freq 2 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.7 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -- The fine-tuning for ImageNet-1K pre-trained ConvNeXt-L starts from the best ema weights during pre-training. You can add `--model_key model_ema` to load from a saved checkpoint that has `model_ema` as a key (e.g., obtained by training with `--model_ema true`), to load ema weights. Note that our provided pre-trained checkpoints only have `model` as the only key. - -
- -### Fine-tune from ImageNet-22K pre-training -We finetune from ImageNet-22K pre-trained models, in both 224 and 384 resolutions. - -ConvNeXt-B fine-tuning on ImageNet-1K (224x224) - -Multi-node -``` -python run_with_submitit.py --nodes 2 --ngpus 8 \ ---model convnext_base --drop_path 0.2 --input_size 224 \ ---batch_size 32 --lr 5e-5 --update_freq 1 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_base --drop_path 0.2 --input_size 224 \ ---batch_size 32 --lr 5e-5 --update_freq 2 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -ConvNeXt-L (224x224) - - -Multi-node -``` -python run_with_submitit.py --nodes 2 --ngpus 8 \ ---model convnext_large --drop_path 0.3 --input_size 224 \ ---batch_size 32 --lr 5e-5 --update_freq 1 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_large --drop_path 0.3 --input_size 224 \ ---batch_size 32 --lr 5e-5 --update_freq 2 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -
- -ConvNeXt-XL (224x224) - - -Multi-node -``` -python run_with_submitit.py --nodes 4 --ngpus 8 \ ---model convnext_xlarge --drop_path 0.4 --input_size 224 \ ---batch_size 16 --lr 5e-5 --update_freq 1 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results \ ---model_ema true --model_ema_eval true -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_xlarge --drop_path 0.4 --input_size 224 \ ---batch_size 16 --lr 5e-5 --update_freq 4 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results \ ---model_ema true --model_ema_eval true -``` - -
- -
- -ConvNeXt-B (384x384) - - -Multi-node -``` -python run_with_submitit.py --nodes 4 --ngpus 8 \ ---model convnext_base --drop_path 0.2 --input_size 384 \ ---batch_size 16 --lr 5e-5 --update_freq 1 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_base --drop_path 0.2 --input_size 384 \ ---batch_size 16 --lr 5e-5 --update_freq 4 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -
- -ConvNeXt-L (384x384) - - -Multi-node -``` -python run_with_submitit.py --nodes 4 --ngpus 8 \ ---model convnext_large --drop_path 0.3 --input_size 384 \ ---batch_size 16 --lr 5e-5 --update_freq 1 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_large --drop_path 0.3 --input_size 384 \ ---batch_size 16 --lr 5e-5 --update_freq 4 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results -``` - -
- -
- -ConvNeXt-XL (384x384) - - -Multi-node -``` -python run_with_submitit.py --nodes 8 --ngpus 8 \ ---model convnext_xlarge --drop_path 0.4 --input_size 384 \ ---batch_size 8 --lr 5e-5 --update_freq 1 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---job_dir /path/to/save_results \ ---model_ema true --model_ema_eval true -``` - -Single-machine -``` -python -m torch.distributed.launch --nproc_per_node=8 main.py \ ---model convnext_xlarge --drop_path 0.4 --input_size 384 \ ---batch_size 8 --lr 5e-5 --update_freq 8 \ ---warmup_epochs 0 --epochs 30 --weight_decay 1e-8 \ ---layer_decay 0.8 --head_init_scale 0.001 --cutmix 0 --mixup 0 \ ---finetune /path/to/checkpoint.pth \ ---data_path /path/to/imagenet-1k \ ---output_dir /path/to/save_results \ ---model_ema true --model_ema_eval true -``` - -
- diff --git a/cv/classification/convnext/pytorch/datasets.py b/cv/classification/convnext/pytorch/datasets.py deleted file mode 100644 index ee96135fdba427e2ef147a23ecb0ae7edb87c63b..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/datasets.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import os -from torchvision import datasets, transforms - -from timm.data.constants import \ - IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD -from timm.data import create_transform - -def build_dataset(is_train, args): - transform = build_transform(is_train, args) - - print("Transform = ") - if isinstance(transform, tuple): - for trans in transform: - print(" - - - - - - - - - - ") - for t in trans.transforms: - print(t) - else: - for t in transform.transforms: - print(t) - print("---------------------------") - - if args.data_set == 'CIFAR': - dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True) - nb_classes = 100 - elif args.data_set == 'IMNET': - print("reading from datapath", args.data_path) - root = os.path.join(args.data_path, 'train' if is_train else 'val') - dataset = datasets.ImageFolder(root, transform=transform) - nb_classes = 1000 - elif args.data_set == "image_folder": - root = args.data_path if is_train else args.eval_data_path - dataset = datasets.ImageFolder(root, transform=transform) - nb_classes = args.nb_classes - assert len(dataset.class_to_idx) == nb_classes - else: - raise NotImplementedError() - print("Number of the class = %d" % nb_classes) - - return dataset, nb_classes - - -def build_transform(is_train, args): - resize_im = args.input_size > 32 - imagenet_default_mean_and_std = args.imagenet_default_mean_and_std - mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN - std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD - - if is_train: - # this should always dispatch to transforms_imagenet_train - transform = create_transform( - input_size=args.input_size, - is_training=True, - color_jitter=args.color_jitter, - auto_augment=args.aa, - interpolation=args.train_interpolation, - re_prob=args.reprob, - re_mode=args.remode, - re_count=args.recount, - mean=mean, - std=std, - ) - if not resize_im: - transform.transforms[0] = transforms.RandomCrop( - args.input_size, padding=4) - return transform - - t = [] - if resize_im: - # warping (no cropping) when evaluated at 384 or larger - if args.input_size >= 384: - t.append( - transforms.Resize((args.input_size, args.input_size), - interpolation=transforms.InterpolationMode.BICUBIC), - ) - print(f"Warping {args.input_size} size input images...") - else: - if args.crop_pct is None: - args.crop_pct = 224 / 256 - size = int(args.input_size / args.crop_pct) - t.append( - # to maintain same ratio w.r.t. 224 images - transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC), - ) - t.append(transforms.CenterCrop(args.input_size)) - - t.append(transforms.ToTensor()) - t.append(transforms.Normalize(mean, std)) - return transforms.Compose(t) diff --git a/cv/classification/convnext/pytorch/engine.py b/cv/classification/convnext/pytorch/engine.py deleted file mode 100644 index 6903b2f7b578441a8b29a15352bf3aa65c4c65a6..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/engine.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import math -from typing import Iterable, Optional -import torch -from timm.data import Mixup -from timm.utils import accuracy, ModelEma - -import utils - -def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, - data_loader: Iterable, optimizer: torch.optim.Optimizer, - device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, - model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None, - wandb_logger=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, - num_training_steps_per_epoch=None, update_freq=None, use_amp=False): - model.train(True) - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) - metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) - header = 'Epoch: [{}]'.format(epoch) - print_freq = 10 - - optimizer.zero_grad() - - for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): - step = data_iter_step // update_freq - if step >= num_training_steps_per_epoch: - continue - it = start_steps + step # global training iteration - # Update LR & WD for the first acc - if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0: - for i, param_group in enumerate(optimizer.param_groups): - if lr_schedule_values is not None: - param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"] - if wd_schedule_values is not None and param_group["weight_decay"] > 0: - param_group["weight_decay"] = wd_schedule_values[it] - - samples = samples.to(device, non_blocking=True) - targets = targets.to(device, non_blocking=True) - - if mixup_fn is not None: - samples, targets = mixup_fn(samples, targets) - - if use_amp: - with torch.cuda.amp.autocast(): - output = model(samples) - loss = criterion(output, targets) - else: # full precision - output = model(samples) - loss = criterion(output, targets) - - loss_value = loss.item() - - if not math.isfinite(loss_value): # this could trigger if using AMP - print("Loss is {}, stopping training".format(loss_value)) - assert math.isfinite(loss_value) - - if use_amp: - # this attribute is added by timm on one optimizer (adahessian) - is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order - loss /= update_freq - grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, - parameters=model.parameters(), create_graph=is_second_order, - update_grad=(data_iter_step + 1) % update_freq == 0) - if (data_iter_step + 1) % update_freq == 0: - optimizer.zero_grad() - if model_ema is not None: - model_ema.update(model) - else: # full precision - loss /= update_freq - loss.backward() - if (data_iter_step + 1) % update_freq == 0: - optimizer.step() - optimizer.zero_grad() - if model_ema is not None: - model_ema.update(model) - - torch.cuda.synchronize() - - if mixup_fn is None: - class_acc = (output.max(-1)[-1] == targets).float().mean() - else: - class_acc = None - metric_logger.update(loss=loss_value) - metric_logger.update(class_acc=class_acc) - min_lr = 10. - max_lr = 0. - for group in optimizer.param_groups: - min_lr = min(min_lr, group["lr"]) - max_lr = max(max_lr, group["lr"]) - - metric_logger.update(lr=max_lr) - metric_logger.update(min_lr=min_lr) - weight_decay_value = None - for group in optimizer.param_groups: - if group["weight_decay"] > 0: - weight_decay_value = group["weight_decay"] - metric_logger.update(weight_decay=weight_decay_value) - if use_amp: - metric_logger.update(grad_norm=grad_norm) - - if log_writer is not None: - log_writer.update(loss=loss_value, head="loss") - log_writer.update(class_acc=class_acc, head="loss") - log_writer.update(lr=max_lr, head="opt") - log_writer.update(min_lr=min_lr, head="opt") - log_writer.update(weight_decay=weight_decay_value, head="opt") - if use_amp: - log_writer.update(grad_norm=grad_norm, head="opt") - log_writer.set_step() - - if wandb_logger: - wandb_logger._wandb.log({ - 'Rank-0 Batch Wise/train_loss': loss_value, - 'Rank-0 Batch Wise/train_max_lr': max_lr, - 'Rank-0 Batch Wise/train_min_lr': min_lr - }, commit=False) - if class_acc: - wandb_logger._wandb.log({'Rank-0 Batch Wise/train_class_acc': class_acc}, commit=False) - if use_amp: - wandb_logger._wandb.log({'Rank-0 Batch Wise/train_grad_norm': grad_norm}, commit=False) - wandb_logger._wandb.log({'Rank-0 Batch Wise/global_train_step': it}) - - - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - return {k: meter.global_avg for k, meter in metric_logger.meters.items()} - -@torch.no_grad() -def evaluate(data_loader, model, device, use_amp=False): - criterion = torch.nn.CrossEntropyLoss() - - metric_logger = utils.MetricLogger(delimiter=" ") - header = 'Test:' - - # switch to evaluation mode - model.eval() - for batch in metric_logger.log_every(data_loader, 10, header): - images = batch[0] - target = batch[-1] - - images = images.to(device, non_blocking=True) - target = target.to(device, non_blocking=True) - - # compute output - if use_amp: - with torch.cuda.amp.autocast(): - output = model(images) - loss = criterion(output, target) - else: - output = model(images) - loss = criterion(output, target) - - acc1, acc5 = accuracy(output, target, topk=(1, 5)) - - batch_size = images.shape[0] - metric_logger.update(loss=loss.item()) - metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) - metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' - .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) - - return {k: meter.global_avg for k, meter in metric_logger.meters.items()} diff --git a/cv/classification/convnext/pytorch/main.py b/cv/classification/convnext/pytorch/main.py deleted file mode 100644 index 53ef545bea8fcf7f272dc90a8e6fde743629fe73..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/main.py +++ /dev/null @@ -1,477 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import argparse -import datetime -import numpy as np -import time -import torch -import torch.nn as nn -import torch.backends.cudnn as cudnn -import json -import os - -from pathlib import Path - -from timm.data.mixup import Mixup -from timm.models import create_model -from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy -from timm.utils import ModelEma -from optim_factory import create_optimizer, LayerDecayValueAssigner - -from datasets import build_dataset -from engine import train_one_epoch, evaluate - -from utils import NativeScalerWithGradNormCount as NativeScaler -import utils -import models.convnext -import models.convnext_isotropic - -def str2bool(v): - """ - Converts string to bool type; enables command line - arguments in the format of '--arg1 true --arg2 false' - """ - if isinstance(v, bool): - return v - if v.lower() in ('yes', 'true', 't', 'y', '1'): - return True - elif v.lower() in ('no', 'false', 'f', 'n', '0'): - return False - else: - raise argparse.ArgumentTypeError('Boolean value expected.') - -def get_args_parser(): - parser = argparse.ArgumentParser('ConvNeXt training and evaluation script for image classification', add_help=False) - parser.add_argument('--batch_size', default=64, type=int, - help='Per GPU batch size') - parser.add_argument('--epochs', default=300, type=int) - parser.add_argument('--update_freq', default=1, type=int, - help='gradient accumulation steps') - - # Model parameters - parser.add_argument('--model', default='convnext_tiny', type=str, metavar='MODEL', - help='Name of model to train') - parser.add_argument('--drop_path', type=float, default=0, metavar='PCT', - help='Drop path rate (default: 0.0)') - parser.add_argument('--input_size', default=224, type=int, - help='image input size') - parser.add_argument('--layer_scale_init_value', default=1e-6, type=float, - help="Layer scale initial values") - - # EMA related parameters - parser.add_argument('--model_ema', type=str2bool, default=False) - parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') - parser.add_argument('--model_ema_force_cpu', type=str2bool, default=False, help='') - parser.add_argument('--model_ema_eval', type=str2bool, default=False, help='Using ema to eval during training.') - - # Optimization parameters - parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', - help='Optimizer (default: "adamw"') - parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', - help='Optimizer Epsilon (default: 1e-8)') - parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', - help='Optimizer Betas (default: None, use opt default)') - parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', - help='Clip gradient norm (default: None, no clipping)') - parser.add_argument('--momentum', type=float, default=0.9, metavar='M', - help='SGD momentum (default: 0.9)') - parser.add_argument('--weight_decay', type=float, default=0.05, - help='weight decay (default: 0.05)') - parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the - weight decay. We use a cosine schedule for WD and using a larger decay by - the end of training improves performance for ViTs.""") - - parser.add_argument('--lr', type=float, default=4e-3, metavar='LR', - help='learning rate (default: 4e-3), with total batch size 4096') - parser.add_argument('--layer_decay', type=float, default=1.0) - parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', - help='lower lr bound for cyclic schedulers that hit 0 (1e-6)') - parser.add_argument('--warmup_epochs', type=int, default=20, metavar='N', - help='epochs to warmup LR, if scheduler supports') - parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', - help='num of steps to warmup LR, will overload warmup_epochs if set > 0') - - # Augmentation parameters - parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', - help='Color jitter factor (default: 0.4)') - parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', - help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), - parser.add_argument('--smoothing', type=float, default=0.1, - help='Label smoothing (default: 0.1)') - parser.add_argument('--train_interpolation', type=str, default='bicubic', - help='Training interpolation (random, bilinear, bicubic default: "bicubic")') - - # Evaluation parameters - parser.add_argument('--crop_pct', type=float, default=None) - - # * Random Erase params - parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', - help='Random erase prob (default: 0.25)') - parser.add_argument('--remode', type=str, default='pixel', - help='Random erase mode (default: "pixel")') - parser.add_argument('--recount', type=int, default=1, - help='Random erase count (default: 1)') - parser.add_argument('--resplit', type=str2bool, default=False, - help='Do not random erase first (clean) augmentation split') - - # * Mixup params - parser.add_argument('--mixup', type=float, default=0.8, - help='mixup alpha, mixup enabled if > 0.') - parser.add_argument('--cutmix', type=float, default=1.0, - help='cutmix alpha, cutmix enabled if > 0.') - parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, - help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') - parser.add_argument('--mixup_prob', type=float, default=1.0, - help='Probability of performing mixup or cutmix when either/both is enabled') - parser.add_argument('--mixup_switch_prob', type=float, default=0.5, - help='Probability of switching to cutmix when both mixup and cutmix enabled') - parser.add_argument('--mixup_mode', type=str, default='batch', - help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') - - # * Finetuning params - parser.add_argument('--finetune', default='', - help='finetune from checkpoint') - parser.add_argument('--head_init_scale', default=1.0, type=float, - help='classifier head initial scale, typically adjusted in fine-tuning') - parser.add_argument('--model_key', default='model|module', type=str, - help='which key to load from saved state dict, usually model or model_ema') - parser.add_argument('--model_prefix', default='', type=str) - - # Dataset parameters - parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, - help='dataset path') - parser.add_argument('--eval_data_path', default=None, type=str, - help='dataset path for evaluation') - parser.add_argument('--nb_classes', default=1000, type=int, - help='number of the classification types') - parser.add_argument('--imagenet_default_mean_and_std', type=str2bool, default=True) - parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder'], - type=str, help='ImageNet dataset path') - parser.add_argument('--output_dir', default='', - help='path where to save, empty for no saving') - parser.add_argument('--log_dir', default=None, - help='path where to tensorboard log') - parser.add_argument('--device', default='cuda', - help='device to use for training / testing') - parser.add_argument('--seed', default=0, type=int) - - parser.add_argument('--resume', default='', - help='resume from checkpoint') - parser.add_argument('--auto_resume', type=str2bool, default=True) - parser.add_argument('--save_ckpt', type=str2bool, default=True) - parser.add_argument('--save_ckpt_freq', default=1, type=int) - parser.add_argument('--save_ckpt_num', default=3, type=int) - - parser.add_argument('--start_epoch', default=0, type=int, metavar='N', - help='start epoch') - parser.add_argument('--eval', type=str2bool, default=False, - help='Perform evaluation only') - parser.add_argument('--dist_eval', type=str2bool, default=True, - help='Enabling distributed evaluation') - parser.add_argument('--disable_eval', type=str2bool, default=False, - help='Disabling evaluation during training') - parser.add_argument('--num_workers', default=10, type=int) - parser.add_argument('--pin_mem', type=str2bool, default=True, - help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') - - # distributed training parameters - parser.add_argument('--world_size', default=1, type=int, - help='number of distributed processes') - parser.add_argument('--local_rank', default=-1, type=int) - parser.add_argument('--dist_on_itp', type=str2bool, default=False) - parser.add_argument('--dist_url', default='env://', - help='url used to set up distributed training') - - parser.add_argument('--use_amp', type=str2bool, default=False, - help="Use PyTorch's AMP (Automatic Mixed Precision) or not") - - # Weights and Biases arguments - parser.add_argument('--enable_wandb', type=str2bool, default=False, - help="enable logging to Weights and Biases") - parser.add_argument('--project', default='convnext', type=str, - help="The name of the W&B project where you're sending the new run.") - parser.add_argument('--wandb_ckpt', type=str2bool, default=False, - help="Save model checkpoints as W&B Artifacts.") - - return parser - -def main(args): - utils.init_distributed_mode(args) - print(args) - device = torch.device(args.device) - - # fix the seed for reproducibility - seed = args.seed + utils.get_rank() - torch.manual_seed(seed) - np.random.seed(seed) - cudnn.benchmark = True - - dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) - if args.disable_eval: - args.dist_eval = False - dataset_val = None - else: - dataset_val, _ = build_dataset(is_train=False, args=args) - - num_tasks = utils.get_world_size() - global_rank = utils.get_rank() - - sampler_train = torch.utils.data.DistributedSampler( - dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed, - ) - print("Sampler_train = %s" % str(sampler_train)) - if args.dist_eval: - if len(dataset_val) % num_tasks != 0: - print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' - 'This will slightly alter validation results as extra duplicate entries are added to achieve ' - 'equal num of samples per-process.') - sampler_val = torch.utils.data.DistributedSampler( - dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False) - else: - sampler_val = torch.utils.data.SequentialSampler(dataset_val) - - if global_rank == 0 and args.log_dir is not None: - os.makedirs(args.log_dir, exist_ok=True) - log_writer = utils.TensorboardLogger(log_dir=args.log_dir) - else: - log_writer = None - - if global_rank == 0 and args.enable_wandb: - wandb_logger = utils.WandbLogger(args) - else: - wandb_logger = None - - data_loader_train = torch.utils.data.DataLoader( - dataset_train, sampler=sampler_train, - batch_size=args.batch_size, - num_workers=args.num_workers, - pin_memory=args.pin_mem, - drop_last=True, - ) - - if dataset_val is not None: - data_loader_val = torch.utils.data.DataLoader( - dataset_val, sampler=sampler_val, - batch_size=int(1.5 * args.batch_size), - num_workers=args.num_workers, - pin_memory=args.pin_mem, - drop_last=False - ) - else: - data_loader_val = None - - mixup_fn = None - mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None - if mixup_active: - print("Mixup is activated!") - mixup_fn = Mixup( - mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, - prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, - label_smoothing=args.smoothing, num_classes=args.nb_classes) - - model = create_model( - args.model, - pretrained=False, - num_classes=args.nb_classes, - drop_path_rate=args.drop_path, - layer_scale_init_value=args.layer_scale_init_value, - head_init_scale=args.head_init_scale, - ) - - if args.finetune: - if args.finetune.startswith('https'): - checkpoint = torch.hub.load_state_dict_from_url( - args.finetune, map_location='cpu', check_hash=True) - else: - checkpoint = torch.load(args.finetune, map_location='cpu') - - print("Load ckpt from %s" % args.finetune) - checkpoint_model = None - for model_key in args.model_key.split('|'): - if model_key in checkpoint: - checkpoint_model = checkpoint[model_key] - print("Load state_dict by model_key = %s" % model_key) - break - if checkpoint_model is None: - checkpoint_model = checkpoint - state_dict = model.state_dict() - for k in ['head.weight', 'head.bias']: - if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: - print(f"Removing key {k} from pretrained checkpoint") - del checkpoint_model[k] - utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix) - model.to(device) - - model_ema = None - if args.model_ema: - # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper - model_ema = ModelEma( - model, - decay=args.model_ema_decay, - device='cpu' if args.model_ema_force_cpu else '', - resume='') - print("Using EMA with decay = %.8f" % args.model_ema_decay) - - model_without_ddp = model - n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) - - print("Model = %s" % str(model_without_ddp)) - print('number of params:', n_parameters) - - total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() - num_training_steps_per_epoch = len(dataset_train) // total_batch_size - print("LR = %.8f" % args.lr) - print("Batch size = %d" % total_batch_size) - print("Update frequent = %d" % args.update_freq) - print("Number of training examples = %d" % len(dataset_train)) - print("Number of training training per epoch = %d" % num_training_steps_per_epoch) - - if args.layer_decay < 1.0 or args.layer_decay > 1.0: - num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value. - assert args.model in ['convnext_small', 'convnext_base', 'convnext_large', 'convnext_xlarge'], \ - "Layer Decay impl only supports convnext_small/base/large/xlarge" - assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2))) - else: - assigner = None - - if assigner is not None: - print("Assigned values = %s" % str(assigner.values)) - - if args.distributed: - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False) - model_without_ddp = model.module - - optimizer = create_optimizer( - args, model_without_ddp, skip_list=None, - get_num_layer=assigner.get_layer_id if assigner is not None else None, - get_layer_scale=assigner.get_scale if assigner is not None else None) - - loss_scaler = NativeScaler() # if args.use_amp is False, this won't be used - - print("Use Cosine LR scheduler") - lr_schedule_values = utils.cosine_scheduler( - args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, - warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, - ) - - if args.weight_decay_end is None: - args.weight_decay_end = args.weight_decay - wd_schedule_values = utils.cosine_scheduler( - args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) - print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) - - if mixup_fn is not None: - # smoothing is handled with mixup label transform - criterion = SoftTargetCrossEntropy() - elif args.smoothing > 0.: - criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) - else: - criterion = torch.nn.CrossEntropyLoss() - - print("criterion = %s" % str(criterion)) - - utils.auto_load_model( - args=args, model=model, model_without_ddp=model_without_ddp, - optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) - - if args.eval: - print(f"Eval only mode") - test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp) - print(f"Accuracy of the network on {len(dataset_val)} test images: {test_stats['acc1']:.5f}%") - return - - max_accuracy = 0.0 - if args.model_ema and args.model_ema_eval: - max_accuracy_ema = 0.0 - - print("Start training for %d epochs" % args.epochs) - start_time = time.time() - for epoch in range(args.start_epoch, args.epochs): - if args.distributed: - data_loader_train.sampler.set_epoch(epoch) - if log_writer is not None: - log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq) - if wandb_logger: - wandb_logger.set_steps() - train_stats = train_one_epoch( - model, criterion, data_loader_train, optimizer, - device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn, - log_writer=log_writer, wandb_logger=wandb_logger, start_steps=epoch * num_training_steps_per_epoch, - lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values, - num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq, - use_amp=args.use_amp - ) - if args.output_dir and args.save_ckpt: - if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: - utils.save_model( - args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, - loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema) - if data_loader_val is not None: - test_stats = evaluate(data_loader_val, model, device, use_amp=args.use_amp) - print(f"Accuracy of the model on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") - if max_accuracy < test_stats["acc1"]: - max_accuracy = test_stats["acc1"] - if args.output_dir and args.save_ckpt: - utils.save_model( - args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, - loss_scaler=loss_scaler, epoch="best", model_ema=model_ema) - print(f'Max accuracy: {max_accuracy:.2f}%') - - if log_writer is not None: - log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch) - log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch) - log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch) - - log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, - **{f'test_{k}': v for k, v in test_stats.items()}, - 'epoch': epoch, - 'n_parameters': n_parameters} - - # repeat testing routines for EMA, if ema eval is turned on - if args.model_ema and args.model_ema_eval: - test_stats_ema = evaluate(data_loader_val, model_ema.ema, device, use_amp=args.use_amp) - print(f"Accuracy of the model EMA on {len(dataset_val)} test images: {test_stats_ema['acc1']:.1f}%") - if max_accuracy_ema < test_stats_ema["acc1"]: - max_accuracy_ema = test_stats_ema["acc1"] - if args.output_dir and args.save_ckpt: - utils.save_model( - args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, - loss_scaler=loss_scaler, epoch="best-ema", model_ema=model_ema) - print(f'Max EMA accuracy: {max_accuracy_ema:.2f}%') - if log_writer is not None: - log_writer.update(test_acc1_ema=test_stats_ema['acc1'], head="perf", step=epoch) - log_stats.update({**{f'test_{k}_ema': v for k, v in test_stats_ema.items()}}) - else: - log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, - 'epoch': epoch, - 'n_parameters': n_parameters} - - if args.output_dir and utils.is_main_process(): - if log_writer is not None: - log_writer.flush() - with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: - f.write(json.dumps(log_stats) + "\n") - - if wandb_logger: - wandb_logger.log_epoch_metrics(log_stats) - - if wandb_logger and args.wandb_ckpt and args.save_ckpt and args.output_dir: - wandb_logger.log_checkpoints() - - - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('Training time {}'.format(total_time_str)) - -if __name__ == '__main__': - parser = argparse.ArgumentParser('ConvNeXt training and evaluation script', parents=[get_args_parser()]) - args = parser.parse_args() - if args.output_dir: - Path(args.output_dir).mkdir(parents=True, exist_ok=True) - main(args) diff --git a/cv/classification/convnext/pytorch/models/convnext.py b/cv/classification/convnext/pytorch/models/convnext.py deleted file mode 100644 index 74c1e9407bb4096a3b27127515573e32865b40f7..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/models/convnext.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import trunc_normal_, DropPath -from timm.models.registry import register_model - -class Block(nn.Module): - r""" ConvNeXt Block. There are two equivalent implementations: - (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) - (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back - We use (2) as we find it slightly faster in PyTorch - - Args: - dim (int): Number of input channels. - drop_path (float): Stochastic depth rate. Default: 0.0 - layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. - """ - def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6): - super().__init__() - self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv - self.norm = LayerNorm(dim, eps=1e-6) - self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers - self.act = nn.GELU() - self.pwconv2 = nn.Linear(4 * dim, dim) - self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), - requires_grad=True) if layer_scale_init_value > 0 else None - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - - def forward(self, x): - input = x - x = self.dwconv(x) - x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) - x = self.norm(x) - x = self.pwconv1(x) - x = self.act(x) - x = self.pwconv2(x) - if self.gamma is not None: - x = self.gamma * x - x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) - - x = input + self.drop_path(x) - return x - -class ConvNeXt(nn.Module): - r""" ConvNeXt - A PyTorch impl of : `A ConvNet for the 2020s` - - https://arxiv.org/pdf/2201.03545.pdf - - Args: - in_chans (int): Number of input image channels. Default: 3 - num_classes (int): Number of classes for classification head. Default: 1000 - depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] - dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768] - drop_path_rate (float): Stochastic depth rate. Default: 0. - layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. - head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. - """ - def __init__(self, in_chans=3, num_classes=1000, - depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0., - layer_scale_init_value=1e-6, head_init_scale=1., - ): - super().__init__() - - self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers - stem = nn.Sequential( - nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4), - LayerNorm(dims[0], eps=1e-6, data_format="channels_first") - ) - self.downsample_layers.append(stem) - for i in range(3): - downsample_layer = nn.Sequential( - LayerNorm(dims[i], eps=1e-6, data_format="channels_first"), - nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2), - ) - self.downsample_layers.append(downsample_layer) - - self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks - dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] - cur = 0 - for i in range(4): - stage = nn.Sequential( - *[Block(dim=dims[i], drop_path=dp_rates[cur + j], - layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])] - ) - self.stages.append(stage) - cur += depths[i] - - self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer - self.head = nn.Linear(dims[-1], num_classes) - - self.apply(self._init_weights) - self.head.weight.data.mul_(head_init_scale) - self.head.bias.data.mul_(head_init_scale) - - def _init_weights(self, m): - if isinstance(m, (nn.Conv2d, nn.Linear)): - trunc_normal_(m.weight, std=.02) - nn.init.constant_(m.bias, 0) - - def forward_features(self, x): - for i in range(4): - x = self.downsample_layers[i](x) - x = self.stages[i](x) - return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C) - - def forward(self, x): - x = self.forward_features(x) - x = self.head(x) - return x - -class LayerNorm(nn.Module): - r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. - The ordering of the dimensions in the inputs. channels_last corresponds to inputs with - shape (batch_size, height, width, channels) while channels_first corresponds to inputs - with shape (batch_size, channels, height, width). - """ - def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): - super().__init__() - self.weight = nn.Parameter(torch.ones(normalized_shape)) - self.bias = nn.Parameter(torch.zeros(normalized_shape)) - self.eps = eps - self.data_format = data_format - if self.data_format not in ["channels_last", "channels_first"]: - raise NotImplementedError - self.normalized_shape = (normalized_shape, ) - - def forward(self, x): - if self.data_format == "channels_last": - return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) - elif self.data_format == "channels_first": - u = x.mean(1, keepdim=True) - s = (x - u).pow(2).mean(1, keepdim=True) - x = (x - u) / torch.sqrt(s + self.eps) - x = self.weight[:, None, None] * x + self.bias[:, None, None] - return x - - -model_urls = { - "convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth", - "convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth", - "convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth", - "convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth", - "convnext_tiny_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth", - "convnext_small_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth", - "convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", - "convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", - "convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", -} - -@register_model -def convnext_tiny(pretrained=False,in_22k=False, **kwargs): - model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs) - if pretrained: - url = model_urls['convnext_tiny_22k'] if in_22k else model_urls['convnext_tiny_1k'] - checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True) - model.load_state_dict(checkpoint["model"]) - return model - -@register_model -def convnext_small(pretrained=False,in_22k=False, **kwargs): - model = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs) - if pretrained: - url = model_urls['convnext_small_22k'] if in_22k else model_urls['convnext_small_1k'] - checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu") - model.load_state_dict(checkpoint["model"]) - return model - -@register_model -def convnext_base(pretrained=False, in_22k=False, **kwargs): - model = ConvNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs) - if pretrained: - url = model_urls['convnext_base_22k'] if in_22k else model_urls['convnext_base_1k'] - checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu") - model.load_state_dict(checkpoint["model"]) - return model - -@register_model -def convnext_large(pretrained=False, in_22k=False, **kwargs): - model = ConvNeXt(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs) - if pretrained: - url = model_urls['convnext_large_22k'] if in_22k else model_urls['convnext_large_1k'] - checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu") - model.load_state_dict(checkpoint["model"]) - return model - -@register_model -def convnext_xlarge(pretrained=False, in_22k=False, **kwargs): - model = ConvNeXt(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs) - if pretrained: - assert in_22k, "only ImageNet-22K pre-trained ConvNeXt-XL is available; please set in_22k=True" - url = model_urls['convnext_xlarge_22k'] - checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu") - model.load_state_dict(checkpoint["model"]) - return model diff --git a/cv/classification/convnext/pytorch/models/convnext_isotropic.py b/cv/classification/convnext/pytorch/models/convnext_isotropic.py deleted file mode 100644 index 50217b2e431e2862376034ae25969b19b704f6e2..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/models/convnext_isotropic.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -from functools import partial -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import trunc_normal_, DropPath -from timm.models.registry import register_model -from .convnext import Block, LayerNorm - -class ConvNeXtIsotropic(nn.Module): - r""" ConvNeXt - A PyTorch impl of : `A ConvNet for the 2020s` - - https://arxiv.org/pdf/2201.03545.pdf - Isotropic ConvNeXts (Section 3.3 in paper) - - Args: - in_chans (int): Number of input image channels. Default: 3 - num_classes (int): Number of classes for classification head. Default: 1000 - depth (tuple(int)): Number of blocks. Default: 18. - dims (int): Feature dimension. Default: 384 - drop_path_rate (float): Stochastic depth rate. Default: 0. - layer_scale_init_value (float): Init value for Layer Scale. Default: 0. - head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. - """ - def __init__(self, in_chans=3, num_classes=1000, - depth=18, dim=384, drop_path_rate=0., - layer_scale_init_value=0, head_init_scale=1., - ): - super().__init__() - - self.stem = nn.Conv2d(in_chans, dim, kernel_size=16, stride=16) - dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, depth)] - self.blocks = nn.Sequential(*[Block(dim=dim, drop_path=dp_rates[i], - layer_scale_init_value=layer_scale_init_value) - for i in range(depth)]) - - self.norm = LayerNorm(dim, eps=1e-6) # final norm layer - self.head = nn.Linear(dim, num_classes) - - self.apply(self._init_weights) - self.head.weight.data.mul_(head_init_scale) - self.head.bias.data.mul_(head_init_scale) - - def _init_weights(self, m): - if isinstance(m, (nn.Conv2d, nn.Linear)): - trunc_normal_(m.weight, std=.02) - nn.init.constant_(m.bias, 0) - - def forward_features(self, x): - x = self.stem(x) - x = self.blocks(x) - return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C) - - def forward(self, x): - x = self.forward_features(x) - x = self.head(x) - return x - -@register_model -def convnext_isotropic_small(pretrained=False, **kwargs): - model = ConvNeXtIsotropic(depth=18, dim=384, **kwargs) - if pretrained: - url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_small_1k_224_ema.pth' - checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu") - model.load_state_dict(checkpoint["model"]) - return model - -@register_model -def convnext_isotropic_base(pretrained=False, **kwargs): - model = ConvNeXtIsotropic(depth=18, dim=768, **kwargs) - if pretrained: - url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_base_1k_224_ema.pth' - checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu") - model.load_state_dict(checkpoint["model"]) - return model - -@register_model -def convnext_isotropic_large(pretrained=False, **kwargs): - model = ConvNeXtIsotropic(depth=36, dim=1024, **kwargs) - if pretrained: - url = 'https://dl.fbaipublicfiles.com/convnext/convnext_iso_large_1k_224_ema.pth' - checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu") - model.load_state_dict(checkpoint["model"]) - return model diff --git a/cv/classification/convnext/pytorch/object_detection/README.md b/cv/classification/convnext/pytorch/object_detection/README.md deleted file mode 100644 index 68e11906e4315f63f6f7cc6432b23091c1727fc7..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# COCO Object detection with ConvNeXt - -## Getting started - -We add ConvNeXt model and config files to [Swin Detection](https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/tree/6a979e2164e3fb0de0ca2546545013a4d71b2f7d). -Our code has been tested with commit `6a979e2`. Please refer to [README.md](https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/6a979e2164e3fb0de0ca2546545013a4d71b2f7d/README.md) for installation and dataset preparation instructions. - -## Results and Fine-tuned Models - -| name | Pretrained Model | Method | Lr Schd | box mAP | mask mAP | #params | FLOPs | Fine-tuned Model | -|:---:|:---:|:---:|:---:| :---:|:---:|:---:|:---:| :---:| -| ConvNeXt-T | [ImageNet-1K](https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224.pth) | Mask R-CNN | 3x | 46.2 | 41.7 | 48M | 262G | [model](https://dl.fbaipublicfiles.com/convnext/coco/mask_rcnn_convnext_tiny_1k_3x.pth) | -| ConvNeXt-T | [ImageNet-1K](https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224.pth) | Cascade Mask R-CNN | 3x | 50.4 | 43.7 | 86M | 741G | [model](https://dl.fbaipublicfiles.com/convnext/coco/cascade_mask_rcnn_convnext_tiny_1k_3x.pth) | -| ConvNeXt-S | [ImageNet-1K](https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224.pth) | Cascade Mask R-CNN | 3x | 51.9 | 45.0 | 108M | 827G | [model](https://dl.fbaipublicfiles.com/convnext/coco/cascade_mask_rcnn_convnext_small_1k_3x.pth) | -| ConvNeXt-B | [ImageNet-1K](https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224.pth) | Cascade Mask R-CNN | 3x | 52.7 | 45.6 | 146M | 964G | [model](https://dl.fbaipublicfiles.com/convnext/coco/cascade_mask_rcnn_convnext_base_1k_3x.pth) | -| ConvNeXt-B | [ImageNet-22K](https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth) | Cascade Mask R-CNN | 3x | 54.0 | 46.9 | 146M | 964G | [model](https://dl.fbaipublicfiles.com/convnext/coco/cascade_mask_rcnn_convnext_base_22k_3x.pth) | -| ConvNeXt-L | [ImageNet-22K](https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth) | Cascade Mask R-CNN | 3x | 54.8 | 47.6 | 255M | 1354G | [model](https://dl.fbaipublicfiles.com/convnext/coco/cascade_mask_rcnn_convnext_large_22k_3x.pth) | -| ConvNeXt-XL | [ImageNet-22K](https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth) | Cascade Mask R-CNN | 3x | 55.2 | 47.7 | 407M | 1898G | [model](https://dl.fbaipublicfiles.com/convnext/coco/cascade_mask_rcnn_convnext_xlarge_22k_3x.pth) | - - -### Training - -To train a detector with pre-trained models, run: -``` -# single-gpu training -python tools/train.py --cfg-options model.pretrained= [other optional arguments] - -# multi-gpu training -tools/dist_train.sh --cfg-options model.pretrained= [other optional arguments] -``` -For example, to train a Cascade Mask R-CNN model with a `ConvNeXt-T` backbone and 8 gpus, run: -``` -tools/dist_train.sh configs/convnext/cascade_mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py 8 --cfg-options model.pretrained=https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224.pth -``` - -More config files can be found at [`configs/convnext`](configs/convnext). - -### Inference -``` -# single-gpu testing -python tools/test.py --eval bbox segm - -# multi-gpu testing -tools/dist_test.sh --eval bbox segm -``` - -## Acknowledgment - -This code is built using [mmdetection](https://github.com/open-mmlab/mmdetection), [timm](https://github.com/rwightman/pytorch-image-models) libraries, and [BeiT](https://github.com/microsoft/unilm/tree/f8f3df80c65eb5e5fc6d6d3c9bd3137621795d1e/beit), [Swin Transformer](https://github.com/microsoft/Swin-Transformer) repositories. \ No newline at end of file diff --git a/cv/classification/convnext/pytorch/object_detection/configs/_base_/default_runtime.py b/cv/classification/convnext/pytorch/object_detection/configs/_base_/default_runtime.py deleted file mode 100644 index 3aa9bc043f970b1043697ab00d7b84f97c07350c..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/_base_/default_runtime.py +++ /dev/null @@ -1,16 +0,0 @@ -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='CustomizedTextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -custom_hooks = [dict(type='NumClassCheckHook')] - -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] diff --git a/cv/classification/convnext/pytorch/object_detection/configs/_base_/models/cascade_mask_rcnn_convnext_fpn.py b/cv/classification/convnext/pytorch/object_detection/configs/_base_/models/cascade_mask_rcnn_convnext_fpn.py deleted file mode 100644 index 624b7df31768d4ca00cf3a9b3dea7802d6077b70..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/_base_/models/cascade_mask_rcnn_convnext_fpn.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -# model settings -model = dict( - type='CascadeRCNN', - pretrained=None, - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 9, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.2, - layer_scale_init_value=1e-6, - out_indices=[0, 1, 2, 3], - ), - neck=dict( - type='FPN', - in_channels=[128, 256, 512, 1024], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='CascadeRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg = dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_across_levels=False, - nms_pre=2000, - nms_post=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False) - ]), - test_cfg = dict( - rpn=dict( - nms_across_levels=False, - nms_pre=1000, - nms_post=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/cv/classification/convnext/pytorch/object_detection/configs/_base_/models/mask_rcnn_convnext_fpn.py b/cv/classification/convnext/pytorch/object_detection/configs/_base_/models/mask_rcnn_convnext_fpn.py deleted file mode 100644 index a7775b5d9304ff815c6267e67e48bce5d85a1d41..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/_base_/models/mask_rcnn_convnext_fpn.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -# model settings -model = dict( - type='MaskRCNN', - pretrained=None, - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 9, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.2, - layer_scale_init_value=1e-6, - out_indices=[0, 1, 2, 3], - ), - neck=dict( - type='FPN', - in_channels=[128, 256, 512, 1024], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_base_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py b/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_base_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py deleted file mode 100644 index 3e179080b786be822dbac38e56b66c5619a6ca0c..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_base_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/cascade_mask_rcnn_convnext_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 27, 3], - dims=[128, 256, 512, 1024], - drop_path_rate=0.7, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - neck=dict(in_channels=[128, 256, 512, 1024]), - roi_head=dict( - bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.8, - 'decay_type': 'layer_wise', - 'num_layers': 12}) -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_base_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py b/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_base_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py deleted file mode 100644 index 3248dbe2b24be63227e895ec3248d2d36b303485..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_base_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/cascade_mask_rcnn_convnext_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 27, 3], - dims=[128, 256, 512, 1024], - drop_path_rate=0.6, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - neck=dict(in_channels=[128, 256, 512, 1024]), - roi_head=dict( - bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.8, - 'decay_type': 'layer_wise', - 'num_layers': 12}) -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_large_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py b/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_large_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py deleted file mode 100644 index 2d8c835f0ea03387a91ec45dd3ce5a8b172a1780..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_large_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/cascade_mask_rcnn_convnext_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 27, 3], - dims=[192, 384, 768, 1536], - drop_path_rate=0.7, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - neck=dict(in_channels=[192, 384, 768, 1536]), - roi_head=dict( - bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.7, - 'decay_type': 'layer_wise', - 'num_layers': 12}) -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=False, -) diff --git a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py b/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py deleted file mode 100644 index c1eee63bcc24903fd21eb658add9bf7ce1a98c81..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_small_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/cascade_mask_rcnn_convnext_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 27, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.6, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - neck=dict(in_channels=[96, 192, 384, 768]), - roi_head=dict( - bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.7, - 'decay_type': 'layer_wise', - 'num_layers': 12}) -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py b/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py deleted file mode 100644 index 614fd702fa91257ec575a8d2f88c68cece1ec6a9..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in1k.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/cascade_mask_rcnn_convnext_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 9, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - neck=dict(in_channels=[96, 192, 384, 768]), - roi_head=dict( - bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0002, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.7, - 'decay_type': 'layer_wise', - 'num_layers': 6}) -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_xlarge_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py b/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_xlarge_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py deleted file mode 100644 index cbb4cb1353008ca14b7c58120029e11b4c9fcdbb..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/convnext/cascade_mask_rcnn_convnext_xlarge_patch4_window7_mstrain_480-800_giou_4conv1f_adamw_3x_coco_in22k.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/cascade_mask_rcnn_convnext_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 27, 3], - dims=[256, 512, 1024, 2048], - drop_path_rate=0.8, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - neck=dict(in_channels=[256, 512, 1024, 2048]), - roi_head=dict( - bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline), samples_per_gpu=2) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.7, - 'decay_type': 'layer_wise', - 'num_layers': 12}) -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=False, -) diff --git a/cv/classification/convnext/pytorch/object_detection/configs/convnext/mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_adamw_3x_coco_in1k.py b/cv/classification/convnext/pytorch/object_detection/configs/convnext/mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_adamw_3x_coco_in1k.py deleted file mode 100644 index 2ed11608225caa35e034655cd26d75d15fa57d62..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/configs/convnext/mask_rcnn_convnext_tiny_patch4_window7_mstrain_480-800_adamw_3x_coco_in1k.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/mask_rcnn_convnext_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 9, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - neck=dict(in_channels=[96, 192, 384, 768])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='AutoAugment', - policies=[ - [ - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict(type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict(type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.95, - 'decay_type': 'layer_wise', - 'num_layers': 6}) - -lr_config = dict(step=[27, 33]) -runner = dict(type='EpochBasedRunnerAmp', max_epochs=36) - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/object_detection/mmcv_custom/__init__.py b/cv/classification/convnext/pytorch/object_detection/mmcv_custom/__init__.py deleted file mode 100644 index 7d7e6c46bbd7b7386bbbfab523f9b2c41c3b9a7d..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/mmcv_custom/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -# -*- coding: utf-8 -*- - -from .checkpoint import load_checkpoint -from .layer_decay_optimizer_constructor import LearningRateDecayOptimizerConstructor -from .customized_text import CustomizedTextLoggerHook - -__all__ = ['load_checkpoint', 'LearningRateDecayOptimizerConstructor', 'CustomizedTextLoggerHook'] diff --git a/cv/classification/convnext/pytorch/object_detection/mmcv_custom/customized_text.py b/cv/classification/convnext/pytorch/object_detection/mmcv_custom/customized_text.py deleted file mode 100644 index bfbc9633f9587a6815d2eceb030e5b6a8944e1d2..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/mmcv_custom/customized_text.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import datetime -from collections import OrderedDict - -import torch - -import mmcv -from mmcv.runner import HOOKS -from mmcv.runner import TextLoggerHook - - -@HOOKS.register_module() -class CustomizedTextLoggerHook(TextLoggerHook): - """Customized Text Logger hook. - - This logger prints out both lr and layer_0_lr. - - """ - - def _log_info(self, log_dict, runner): - # print exp name for users to distinguish experiments - # at every ``interval_exp_name`` iterations and the end of each epoch - if runner.meta is not None and 'exp_name' in runner.meta: - if (self.every_n_iters(runner, self.interval_exp_name)) or ( - self.by_epoch and self.end_of_epoch(runner)): - exp_info = f'Exp name: {runner.meta["exp_name"]}' - runner.logger.info(exp_info) - - if log_dict['mode'] == 'train': - lr_str = {} - for lr_type in ['lr', 'layer_0_lr']: - if isinstance(log_dict[lr_type], dict): - lr_str[lr_type] = [] - for k, val in log_dict[lr_type].items(): - lr_str.append(f'{lr_type}_{k}: {val:.3e}') - lr_str[lr_type] = ' '.join(lr_str) - else: - lr_str[lr_type] = f'{lr_type}: {log_dict[lr_type]:.3e}' - - # by epoch: Epoch [4][100/1000] - # by iter: Iter [100/100000] - if self.by_epoch: - log_str = f'Epoch [{log_dict["epoch"]}]' \ - f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t' - else: - log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t' - log_str += f'{lr_str["lr"]}, {lr_str["layer_0_lr"]}, ' - - if 'time' in log_dict.keys(): - self.time_sec_tot += (log_dict['time'] * self.interval) - time_sec_avg = self.time_sec_tot / ( - runner.iter - self.start_iter + 1) - eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1) - eta_str = str(datetime.timedelta(seconds=int(eta_sec))) - log_str += f'eta: {eta_str}, ' - log_str += f'time: {log_dict["time"]:.3f}, ' \ - f'data_time: {log_dict["data_time"]:.3f}, ' - # statistic memory - if torch.cuda.is_available(): - log_str += f'memory: {log_dict["memory"]}, ' - else: - # val/test time - # here 1000 is the length of the val dataloader - # by epoch: Epoch[val] [4][1000] - # by iter: Iter[val] [1000] - if self.by_epoch: - log_str = f'Epoch({log_dict["mode"]}) ' \ - f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t' - else: - log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t' - - log_items = [] - for name, val in log_dict.items(): - # TODO: resolve this hack - # these items have been in log_str - if name in [ - 'mode', 'Epoch', 'iter', 'lr', 'layer_0_lr', 'time', 'data_time', - 'memory', 'epoch' - ]: - continue - if isinstance(val, float): - val = f'{val:.4f}' - log_items.append(f'{name}: {val}') - log_str += ', '.join(log_items) - - runner.logger.info(log_str) - - - def log(self, runner): - if 'eval_iter_num' in runner.log_buffer.output: - # this doesn't modify runner.iter and is regardless of by_epoch - cur_iter = runner.log_buffer.output.pop('eval_iter_num') - else: - cur_iter = self.get_iter(runner, inner_iter=True) - - log_dict = OrderedDict( - mode=self.get_mode(runner), - epoch=self.get_epoch(runner), - iter=cur_iter) - - # record lr and layer_0_lr - cur_lr = runner.current_lr() - if isinstance(cur_lr, list): - log_dict['layer_0_lr'] = min(cur_lr) - log_dict['lr'] = max(cur_lr) - else: - assert isinstance(cur_lr, dict) - log_dict['lr'], log_dict['layer_0_lr'] = {}, {} - for k, lr_ in cur_lr.items(): - assert isinstance(lr_, list) - log_dict['layer_0_lr'].update({k: min(lr_)}) - log_dict['lr'].update({k: max(lr_)}) - - if 'time' in runner.log_buffer.output: - # statistic memory - if torch.cuda.is_available(): - log_dict['memory'] = self._get_max_memory(runner) - - log_dict = dict(log_dict, **runner.log_buffer.output) - - self._log_info(log_dict, runner) - self._dump_log(log_dict, runner) - return log_dict diff --git a/cv/classification/convnext/pytorch/object_detection/mmcv_custom/layer_decay_optimizer_constructor.py b/cv/classification/convnext/pytorch/object_detection/mmcv_custom/layer_decay_optimizer_constructor.py deleted file mode 100644 index 8fd5869e2e5eb80e9ca08ae1d24f4a79b7f7545c..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/mmcv_custom/layer_decay_optimizer_constructor.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import json -from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor -from mmcv.runner import get_dist_info - - -def get_num_layer_layer_wise(var_name, num_max_layer=12): - - if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"): - return 0 - elif var_name.startswith("backbone.downsample_layers"): - stage_id = int(var_name.split('.')[2]) - if stage_id == 0: - layer_id = 0 - elif stage_id == 1: - layer_id = 2 - elif stage_id == 2: - layer_id = 3 - elif stage_id == 3: - layer_id = num_max_layer - return layer_id - elif var_name.startswith("backbone.stages"): - stage_id = int(var_name.split('.')[2]) - block_id = int(var_name.split('.')[3]) - if stage_id == 0: - layer_id = 1 - elif stage_id == 1: - layer_id = 2 - elif stage_id == 2: - layer_id = 3 + block_id // 3 - elif stage_id == 3: - layer_id = num_max_layer - return layer_id - else: - return num_max_layer + 1 - - -def get_num_layer_stage_wise(var_name, num_max_layer): - if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"): - return 0 - elif var_name.startswith("backbone.downsample_layers"): - return 0 - elif var_name.startswith("backbone.stages"): - stage_id = int(var_name.split('.')[2]) - return stage_id + 1 - else: - return num_max_layer - 1 - - -@OPTIMIZER_BUILDERS.register_module() -class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor): - def add_params(self, params, module, prefix='', is_dcn_module=None): - """Add all parameters of module to the params list. - The parameters of the given module will be added to the list of param - groups, with specific rules defined by paramwise_cfg. - Args: - params (list[dict]): A list of param groups, it will be modified - in place. - module (nn.Module): The module to be added. - prefix (str): The prefix of the module - is_dcn_module (int|float|None): If the current module is a - submodule of DCN, `is_dcn_module` will be passed to - control conv_offset layer's learning rate. Defaults to None. - """ - parameter_groups = {} - print(self.paramwise_cfg) - num_layers = self.paramwise_cfg.get('num_layers') + 2 - decay_rate = self.paramwise_cfg.get('decay_rate') - decay_type = self.paramwise_cfg.get('decay_type', "layer_wise") - print("Build LearningRateDecayOptimizerConstructor %s %f - %d" % (decay_type, decay_rate, num_layers)) - weight_decay = self.base_wd - - for name, param in module.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if len(param.shape) == 1 or name.endswith(".bias") or name in ('pos_embed', 'cls_token'): - group_name = "no_decay" - this_weight_decay = 0. - else: - group_name = "decay" - this_weight_decay = weight_decay - - if decay_type == "layer_wise": - layer_id = get_num_layer_layer_wise(name, self.paramwise_cfg.get('num_layers')) - elif decay_type == "stage_wise": - layer_id = get_num_layer_stage_wise(name, num_layers) - - group_name = "layer_%d_%s" % (layer_id, group_name) - - if group_name not in parameter_groups: - scale = decay_rate ** (num_layers - layer_id - 1) - - parameter_groups[group_name] = { - "weight_decay": this_weight_decay, - "params": [], - "param_names": [], - "lr_scale": scale, - "group_name": group_name, - "lr": scale * self.base_lr, - } - - parameter_groups[group_name]["params"].append(param) - parameter_groups[group_name]["param_names"].append(name) - rank, _ = get_dist_info() - if rank == 0: - to_display = {} - for key in parameter_groups: - to_display[key] = { - "param_names": parameter_groups[key]["param_names"], - "lr_scale": parameter_groups[key]["lr_scale"], - "lr": parameter_groups[key]["lr"], - "weight_decay": parameter_groups[key]["weight_decay"], - } - print("Param groups = %s" % json.dumps(to_display, indent=2)) - - params.extend(parameter_groups.values()) diff --git a/cv/classification/convnext/pytorch/object_detection/mmcv_custom/runner/checkpoint.py b/cv/classification/convnext/pytorch/object_detection/mmcv_custom/runner/checkpoint.py deleted file mode 100644 index e2fb383a75242a5ed3a83c78a7f1c93ab5a28627..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/mmcv_custom/runner/checkpoint.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) Open-MMLab. All rights reserved. -import os.path as osp -import time -from tempfile import TemporaryDirectory - -import torch -from torch.optim import Optimizer - -import mmcv -from mmcv.parallel import is_module_wrapper -from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict - -try: - import apex -except: - print('apex is not installed') - - -def save_checkpoint(model, filename, optimizer=None, meta=None): - """Save checkpoint to file. - - The checkpoint will have 4 fields: ``meta``, ``state_dict`` and - ``optimizer``, ``amp``. By default ``meta`` will contain version - and time info. - - Args: - model (Module): Module whose params are to be saved. - filename (str): Checkpoint filename. - optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. - meta (dict, optional): Metadata to be saved in checkpoint. - """ - if meta is None: - meta = {} - elif not isinstance(meta, dict): - raise TypeError(f'meta must be a dict or None, but got {type(meta)}') - meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) - - if is_module_wrapper(model): - model = model.module - - if hasattr(model, 'CLASSES') and model.CLASSES is not None: - # save class name to the meta - meta.update(CLASSES=model.CLASSES) - - checkpoint = { - 'meta': meta, - 'state_dict': weights_to_cpu(get_state_dict(model)) - } - # save optimizer state dict in the checkpoint - if isinstance(optimizer, Optimizer): - checkpoint['optimizer'] = optimizer.state_dict() - elif isinstance(optimizer, dict): - checkpoint['optimizer'] = {} - for name, optim in optimizer.items(): - checkpoint['optimizer'][name] = optim.state_dict() - - # save amp state dict in the checkpoint - # checkpoint['amp'] = apex.amp.state_dict() - - if filename.startswith('pavi://'): - try: - from pavi import modelcloud - from pavi.exception import NodeNotFoundError - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - model_path = filename[7:] - root = modelcloud.Folder() - model_dir, model_name = osp.split(model_path) - try: - model = modelcloud.get(model_dir) - except NodeNotFoundError: - model = root.create_training_model(model_dir) - with TemporaryDirectory() as tmp_dir: - checkpoint_file = osp.join(tmp_dir, model_name) - with open(checkpoint_file, 'wb') as f: - torch.save(checkpoint, f) - f.flush() - model.create_file(checkpoint_file, name=model_name) - else: - mmcv.mkdir_or_exist(osp.dirname(filename)) - # immediately flush buffer - with open(filename, 'wb') as f: - torch.save(checkpoint, f) - f.flush() diff --git a/cv/classification/convnext/pytorch/object_detection/mmdet/models/backbones/__init__.py b/cv/classification/convnext/pytorch/object_detection/mmdet/models/backbones/__init__.py deleted file mode 100644 index 58d4507f42819976610bb649c4d850bd3b3851b8..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/mmdet/models/backbones/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from .darknet import Darknet -from .detectors_resnet import DetectoRS_ResNet -from .detectors_resnext import DetectoRS_ResNeXt -from .hourglass import HourglassNet -from .hrnet import HRNet -from .regnet import RegNet -from .res2net import Res2Net -from .resnest import ResNeSt -from .resnet import ResNet, ResNetV1d -from .resnext import ResNeXt -from .ssd_vgg import SSDVGG -from .trident_resnet import TridentResNet -from .swin_transformer import SwinTransformer -from .convnext import ConvNeXt - -__all__ = [ - 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net', - 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet', - 'ResNeSt', 'TridentResNet', 'SwinTransformer', 'ConvNeXt' -] diff --git a/cv/classification/convnext/pytorch/object_detection/mmdet/models/backbones/convnext.py b/cv/classification/convnext/pytorch/object_detection/mmdet/models/backbones/convnext.py deleted file mode 100644 index 06ed077a347a6fff9e9242beaa9eb3c2ba301701..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/object_detection/mmdet/models/backbones/convnext.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -from functools import partial -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import trunc_normal_, DropPath - -from mmcv_custom import load_checkpoint -from mmdet.utils import get_root_logger -from ..builder import BACKBONES - -class Block(nn.Module): - r""" ConvNeXt Block. There are two equivalent implementations: - (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) - (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back - We use (2) as we find it slightly faster in PyTorch - - Args: - dim (int): Number of input channels. - drop_path (float): Stochastic depth rate. Default: 0.0 - layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. - """ - def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6): - super().__init__() - self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv - self.norm = LayerNorm(dim, eps=1e-6) - self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers - self.act = nn.GELU() - self.pwconv2 = nn.Linear(4 * dim, dim) - self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), - requires_grad=True) if layer_scale_init_value > 0 else None - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - - def forward(self, x): - input = x - x = self.dwconv(x) - x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) - x = self.norm(x) - x = self.pwconv1(x) - x = self.act(x) - x = self.pwconv2(x) - if self.gamma is not None: - x = self.gamma * x - x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) - - x = input + self.drop_path(x) - return x - -@BACKBONES.register_module() -class ConvNeXt(nn.Module): - r""" ConvNeXt - A PyTorch impl of : `A ConvNet for the 2020s` - - https://arxiv.org/pdf/2201.03545.pdf - - Args: - in_chans (int): Number of input image channels. Default: 3 - num_classes (int): Number of classes for classification head. Default: 1000 - depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] - dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768] - drop_path_rate (float): Stochastic depth rate. Default: 0. - layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. - head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. - """ - def __init__(self, in_chans=3, depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], - drop_path_rate=0., layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3], - ): - super().__init__() - - self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers - stem = nn.Sequential( - nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4), - LayerNorm(dims[0], eps=1e-6, data_format="channels_first") - ) - self.downsample_layers.append(stem) - for i in range(3): - downsample_layer = nn.Sequential( - LayerNorm(dims[i], eps=1e-6, data_format="channels_first"), - nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2), - ) - self.downsample_layers.append(downsample_layer) - - self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks - dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] - cur = 0 - for i in range(4): - stage = nn.Sequential( - *[Block(dim=dims[i], drop_path=dp_rates[cur + j], - layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])] - ) - self.stages.append(stage) - cur += depths[i] - - self.out_indices = out_indices - - norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first") - for i_layer in range(4): - layer = norm_layer(dims[i_layer]) - layer_name = f'norm{i_layer}' - self.add_module(layer_name, layer) - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, (nn.Conv2d, nn.Linear)): - trunc_normal_(m.weight, std=.02) - nn.init.constant_(m.bias, 0) - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - - def _init_weights(m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - if isinstance(pretrained, str): - self.apply(_init_weights) - logger = get_root_logger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - self.apply(_init_weights) - else: - raise TypeError('pretrained must be a str or None') - - def forward_features(self, x): - outs = [] - for i in range(4): - x = self.downsample_layers[i](x) - x = self.stages[i](x) - if i in self.out_indices: - norm_layer = getattr(self, f'norm{i}') - x_out = norm_layer(x) - outs.append(x_out) - - return tuple(outs) - - def forward(self, x): - x = self.forward_features(x) - return x - -class LayerNorm(nn.Module): - r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. - The ordering of the dimensions in the inputs. channels_last corresponds to inputs with - shape (batch_size, height, width, channels) while channels_first corresponds to inputs - with shape (batch_size, channels, height, width). - """ - def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): - super().__init__() - self.weight = nn.Parameter(torch.ones(normalized_shape)) - self.bias = nn.Parameter(torch.zeros(normalized_shape)) - self.eps = eps - self.data_format = data_format - if self.data_format not in ["channels_last", "channels_first"]: - raise NotImplementedError - self.normalized_shape = (normalized_shape, ) - - def forward(self, x): - if self.data_format == "channels_last": - return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) - elif self.data_format == "channels_first": - u = x.mean(1, keepdim=True) - s = (x - u).pow(2).mean(1, keepdim=True) - x = (x - u) / torch.sqrt(s + self.eps) - x = self.weight[:, None, None] * x + self.bias[:, None, None] - return x diff --git a/cv/classification/convnext/pytorch/optim_factory.py b/cv/classification/convnext/pytorch/optim_factory.py deleted file mode 100644 index ec5c003c3d8f66583f3971a6289a560c61d70111..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/optim_factory.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import torch -from torch import optim as optim - -from timm.optim.adafactor import Adafactor -from timm.optim.adahessian import Adahessian -from timm.optim.adamp import AdamP -from timm.optim.lookahead import Lookahead -from timm.optim.nadam import Nadam -from timm.optim.novograd import NovoGrad -from timm.optim.nvnovograd import NvNovoGrad -from timm.optim.radam import RAdam -from timm.optim.rmsprop_tf import RMSpropTF -from timm.optim.sgdp import SGDP - -import json - -try: - from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD - has_apex = True -except ImportError: - has_apex = False - - -def get_num_layer_for_convnext(var_name): - """ - Divide [3, 3, 27, 3] layers into 12 groups; each group is three - consecutive blocks, including possible neighboring downsample layers; - adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py - """ - num_max_layer = 12 - if var_name.startswith("downsample_layers"): - stage_id = int(var_name.split('.')[1]) - if stage_id == 0: - layer_id = 0 - elif stage_id == 1 or stage_id == 2: - layer_id = stage_id + 1 - elif stage_id == 3: - layer_id = 12 - return layer_id - - elif var_name.startswith("stages"): - stage_id = int(var_name.split('.')[1]) - block_id = int(var_name.split('.')[2]) - if stage_id == 0 or stage_id == 1: - layer_id = stage_id + 1 - elif stage_id == 2: - layer_id = 3 + block_id // 3 - elif stage_id == 3: - layer_id = 12 - return layer_id - else: - return num_max_layer + 1 - -class LayerDecayValueAssigner(object): - def __init__(self, values): - self.values = values - - def get_scale(self, layer_id): - return self.values[layer_id] - - def get_layer_id(self, var_name): - return get_num_layer_for_convnext(var_name) - - -def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None): - parameter_group_names = {} - parameter_group_vars = {} - - for name, param in model.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: - group_name = "no_decay" - this_weight_decay = 0. - else: - group_name = "decay" - this_weight_decay = weight_decay - if get_num_layer is not None: - layer_id = get_num_layer(name) - group_name = "layer_%d_%s" % (layer_id, group_name) - else: - layer_id = None - - if group_name not in parameter_group_names: - if get_layer_scale is not None: - scale = get_layer_scale(layer_id) - else: - scale = 1. - - parameter_group_names[group_name] = { - "weight_decay": this_weight_decay, - "params": [], - "lr_scale": scale - } - parameter_group_vars[group_name] = { - "weight_decay": this_weight_decay, - "params": [], - "lr_scale": scale - } - - parameter_group_vars[group_name]["params"].append(param) - parameter_group_names[group_name]["params"].append(name) - print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) - return list(parameter_group_vars.values()) - - -def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None): - opt_lower = args.opt.lower() - weight_decay = args.weight_decay - # if weight_decay and filter_bias_and_bn: - if filter_bias_and_bn: - skip = {} - if skip_list is not None: - skip = skip_list - elif hasattr(model, 'no_weight_decay'): - skip = model.no_weight_decay() - parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale) - weight_decay = 0. - else: - parameters = model.parameters() - - if 'fused' in opt_lower: - assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' - - opt_args = dict(lr=args.lr, weight_decay=weight_decay) - if hasattr(args, 'opt_eps') and args.opt_eps is not None: - opt_args['eps'] = args.opt_eps - if hasattr(args, 'opt_betas') and args.opt_betas is not None: - opt_args['betas'] = args.opt_betas - - opt_split = opt_lower.split('_') - opt_lower = opt_split[-1] - if opt_lower == 'sgd' or opt_lower == 'nesterov': - opt_args.pop('eps', None) - optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) - elif opt_lower == 'momentum': - opt_args.pop('eps', None) - optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) - elif opt_lower == 'adam': - optimizer = optim.Adam(parameters, **opt_args) - elif opt_lower == 'adamw': - optimizer = optim.AdamW(parameters, **opt_args) - elif opt_lower == 'nadam': - optimizer = Nadam(parameters, **opt_args) - elif opt_lower == 'radam': - optimizer = RAdam(parameters, **opt_args) - elif opt_lower == 'adamp': - optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) - elif opt_lower == 'sgdp': - optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) - elif opt_lower == 'adadelta': - optimizer = optim.Adadelta(parameters, **opt_args) - elif opt_lower == 'adafactor': - if not args.lr: - opt_args['lr'] = None - optimizer = Adafactor(parameters, **opt_args) - elif opt_lower == 'adahessian': - optimizer = Adahessian(parameters, **opt_args) - elif opt_lower == 'rmsprop': - optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) - elif opt_lower == 'rmsproptf': - optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) - elif opt_lower == 'novograd': - optimizer = NovoGrad(parameters, **opt_args) - elif opt_lower == 'nvnovograd': - optimizer = NvNovoGrad(parameters, **opt_args) - elif opt_lower == 'fusedsgd': - opt_args.pop('eps', None) - optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) - elif opt_lower == 'fusedmomentum': - opt_args.pop('eps', None) - optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) - elif opt_lower == 'fusedadam': - optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) - elif opt_lower == 'fusedadamw': - optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) - elif opt_lower == 'fusedlamb': - optimizer = FusedLAMB(parameters, **opt_args) - elif opt_lower == 'fusednovograd': - opt_args.setdefault('betas', (0.95, 0.98)) - optimizer = FusedNovoGrad(parameters, **opt_args) - else: - assert False and "Invalid optimizer" - - if len(opt_split) > 1: - if opt_split[0] == 'lookahead': - optimizer = Lookahead(optimizer) - - return optimizer diff --git a/cv/classification/convnext/pytorch/run_with_submitit.py b/cv/classification/convnext/pytorch/run_with_submitit.py deleted file mode 100644 index 03c2e5fac0bcec25addec82bb923aa8799384ca3..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/run_with_submitit.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import argparse -import os -import uuid -from pathlib import Path - -import main as classification -import submitit - -def parse_args(): - classification_parser = classification.get_args_parser() - parser = argparse.ArgumentParser("Submitit for ConvNeXt", parents=[classification_parser]) - parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") - parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request") - parser.add_argument("--timeout", default=72, type=int, help="Duration of the job, in hours") - parser.add_argument("--job_name", default="convnext", type=str, help="Job name") - parser.add_argument("--job_dir", default="", type=str, help="Job directory; leave empty for default") - parser.add_argument("--partition", default="learnlab", type=str, help="Partition where to submit") - parser.add_argument("--use_volta32", action='store_true', default=True, help="Big models? Use this") - parser.add_argument('--comment', default="", type=str, - help='Comment to pass to scheduler, e.g. priority message') - return parser.parse_args() - -def get_shared_folder() -> Path: - user = os.getenv("USER") - if Path("/checkpoint/").is_dir(): - p = Path(f"/checkpoint/{user}/convnext") - p.mkdir(exist_ok=True) - return p - raise RuntimeError("No shared folder available") - -def get_init_file(): - # Init file must not exist, but it's parent dir must exist. - os.makedirs(str(get_shared_folder()), exist_ok=True) - init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" - if init_file.exists(): - os.remove(str(init_file)) - return init_file - -class Trainer(object): - def __init__(self, args): - self.args = args - - def __call__(self): - import main as classification - - self._setup_gpu_args() - classification.main(self.args) - - def checkpoint(self): - import os - import submitit - - self.args.dist_url = get_init_file().as_uri() - self.args.auto_resume = True - print("Requeuing ", self.args) - empty_trainer = type(self)(self.args) - return submitit.helpers.DelayedSubmission(empty_trainer) - - def _setup_gpu_args(self): - import submitit - from pathlib import Path - - job_env = submitit.JobEnvironment() - self.args.output_dir = Path(self.args.job_dir) - self.args.gpu = job_env.local_rank - self.args.rank = job_env.global_rank - self.args.world_size = job_env.num_tasks - print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") - - -def main(): - args = parse_args() - - if args.job_dir == "": - args.job_dir = get_shared_folder() / "%j" - - executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) - - num_gpus_per_node = args.ngpus - nodes = args.nodes - timeout_min = args.timeout * 60 - - partition = args.partition - kwargs = {} - if args.use_volta32: - kwargs['slurm_constraint'] = 'volta32gb' - if args.comment: - kwargs['slurm_comment'] = args.comment - - executor.update_parameters( - mem_gb=40 * num_gpus_per_node, - gpus_per_node=num_gpus_per_node, - tasks_per_node=num_gpus_per_node, # one task per GPU - cpus_per_task=10, - nodes=nodes, - timeout_min=timeout_min, # max is 60 * 72 - # Below are cluster dependent parameters - slurm_partition=partition, - slurm_signal_delay_s=120, - **kwargs - ) - - executor.update_parameters(name=args.job_name) - - args.dist_url = get_init_file().as_uri() - args.output_dir = args.job_dir - - trainer = Trainer(args) - job = executor.submit(trainer) - - print("Submitted job_id:", job.job_id) - -if __name__ == "__main__": - main() diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/README.md b/cv/classification/convnext/pytorch/semantic_segmentation/README.md deleted file mode 100644 index e7585765e5dc767e768fbc1ea4137e6a40b6ae95..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# ADE20k Semantic segmentation with ConvNeXt - -## Getting started - -### [Update: ConvNeXt now in official OpenMMLab/MMSegmentation] -If you want to try ConvNeXt with the latest MMSegmentation toolbox (recommended), please follow the instructions in -https://github.com/open-mmlab/mmsegmentation/tree/master/configs/convnext - -Alternatively, if you want to run ConvNeXt in the original codebase, follow instructions below: - -We add ConvNeXt model and config files to the semantic_segmentation folder of [BeiT](https://github.com/microsoft/unilm/tree/f8f3df80c65eb5e5fc6d6d3c9bd3137621795d1e/beit/semantic_segmentation). -Our code has been tested with commit `8b57ed1`. Please refer to [README.md](https://github.com/microsoft/unilm/tree/f8f3df80c65eb5e5fc6d6d3c9bd3137621795d1e/beit/semantic_segmentation/README.md) for installation and dataset preparation instructions. - -## Results and Fine-tuned Models - -| name | Pretrained Model | Method | Crop Size | Lr Schd | mIoU | mIoU (ms+flip) | #params | FLOPs | Fine-tuned Model | -|:---:|:---:|:---:|:---:| :---:|:---:|:---:|:---:| :---:|:---:| -| ConvNeXt-T | [ImageNet-1K](https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224.pth) | UPerNet | 512x512 | 160K | 46.0 | 46.7 | 60M | 939G | [model](https://dl.fbaipublicfiles.com/convnext/ade20k/upernet_convnext_tiny_1k_512x512.pth) | -| ConvNeXt-S | [ImageNet-1K](https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224.pth) | UPerNet | 512x512 | 160K | 48.7 | 49.6 | 82M | 1027G | [model](https://dl.fbaipublicfiles.com/convnext/ade20k/upernet_convnext_small_1k_512x512.pth) | -| ConvNeXt-B | [ImageNet-1K](https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224.pth) | UPerNet | 512x512 | 160K | 49.1 | 49.9 | 122M | 1170G | [model](https://dl.fbaipublicfiles.com/convnext/ade20k/upernet_convnext_base_1k_512x512.pth) | -| ConvNeXt-B | [ImageNet-22K](https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth) | UPerNet | 640x640 | 160K | 52.6 | 53.1 | 122M | 1828G | [model](https://dl.fbaipublicfiles.com/convnext/ade20k/upernet_convnext_base_22k_640x640.pth) | -| ConvNeXt-L | [ImageNet-22K](https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth) | UPerNet | 640x640 | 160K | 53.2 | 53.7 | 235M | 2458G | [model](https://dl.fbaipublicfiles.com/convnext/ade20k/upernet_convnext_large_22k_640x640.pth) | -| ConvNeXt-XL | [ImageNet-22K](https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth) | UPerNet | 640x640 | 160K | 53.6 | 54.0 | 391M | 3335G | [model](https://dl.fbaipublicfiles.com/convnext/ade20k/upernet_convnext_xlarge_22k_640x640.pth) | - -### Training -Note: Please add `from backbone import convnext` to tools/train.py. - -Command format: -``` -tools/dist_train.sh --work-dir --seed 0 --deterministic --options model.pretrained= -``` - -For example, using a `ConvNeXt-T` backbone with UperNet: -```bash -bash tools/dist_train.sh \ - configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ms.py 8 \ - --work-dir /path/to/save --seed 0 --deterministic \ - --options model.pretrained=https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224.pth -``` - -More config files can be found at [`configs/convnext`](configs/convnext). - - -## Evaluation -Note: Please add `from backbone import convnext` to tools/test.py. - -Command format for multi-scale testing: -``` -tools/dist_test.sh --eval mIoU --aug-test -``` - -For example, evaluate a `ConvNeXt-T` backbone with UperNet: -```bash -bash tools/dist_test.sh configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ms.py \ - https://dl.fbaipublicfiles.com/convnext/ade20k/upernet_convnext_tiny_1k_512x512.pth 4 --eval mIoU --aug-test -``` - -Command format for single-scale testing: -``` -tools/dist_test.sh --eval mIoU -``` - -For example, evaluate a `ConvNeXt-T` backbone with UperNet: -```bash -bash tools/dist_test.sh configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ss.py \ - https://dl.fbaipublicfiles.com/convnext/ade20k/upernet_convnext_tiny_1k_512x512.pth 4 --eval mIoU -``` - -## Acknowledgment - -This code is built using [mmsegmentation](https://github.com/open-mmlab/mmsegmentation), [timm](https://github.com/rwightman/pytorch-image-models) libraries, and [BeiT](https://github.com/microsoft/unilm/tree/f8f3df80c65eb5e5fc6d6d3c9bd3137621795d1e/beit), [Swin Transformer](https://github.com/microsoft/Swin-Transformer), [XCiT](https://github.com/facebookresearch/xcit), [SETR](https://github.com/fudan-zvg/SETR) repositories. diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/backbone/convnext.py b/cv/classification/convnext/pytorch/semantic_segmentation/backbone/convnext.py deleted file mode 100644 index b6737e2aaa3a89c51ca0ea5821b9e38de637f59f..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/backbone/convnext.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -from functools import partial - -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import trunc_normal_, DropPath - -from mmcv_custom import load_checkpoint -from mmseg.utils import get_root_logger -from mmseg.models.builder import BACKBONES - - -class Block(nn.Module): - r""" ConvNeXt Block. There are two equivalent implementations: - (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) - (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back - We use (2) as we find it slightly faster in PyTorch - - Args: - dim (int): Number of input channels. - drop_path (float): Stochastic depth rate. Default: 0.0 - layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. - """ - def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6): - super().__init__() - self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv - self.norm = LayerNorm(dim, eps=1e-6) - self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers - self.act = nn.GELU() - self.pwconv2 = nn.Linear(4 * dim, dim) - self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), - requires_grad=True) if layer_scale_init_value > 0 else None - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - - def forward(self, x): - input = x - x = self.dwconv(x) - x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) - x = self.norm(x) - x = self.pwconv1(x) - x = self.act(x) - x = self.pwconv2(x) - if self.gamma is not None: - x = self.gamma * x - x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) - - x = input + self.drop_path(x) - return x - -@BACKBONES.register_module() -class ConvNeXt(nn.Module): - r""" ConvNeXt - A PyTorch impl of : `A ConvNet for the 2020s` - - https://arxiv.org/pdf/2201.03545.pdf - - Args: - in_chans (int): Number of input image channels. Default: 3 - num_classes (int): Number of classes for classification head. Default: 1000 - depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] - dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768] - drop_path_rate (float): Stochastic depth rate. Default: 0. - layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. - head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. - """ - def __init__(self, in_chans=3, depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], - drop_path_rate=0., layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3], - ): - super().__init__() - - self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers - stem = nn.Sequential( - nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4), - LayerNorm(dims[0], eps=1e-6, data_format="channels_first") - ) - self.downsample_layers.append(stem) - for i in range(3): - downsample_layer = nn.Sequential( - LayerNorm(dims[i], eps=1e-6, data_format="channels_first"), - nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2), - ) - self.downsample_layers.append(downsample_layer) - - self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks - dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] - cur = 0 - for i in range(4): - stage = nn.Sequential( - *[Block(dim=dims[i], drop_path=dp_rates[cur + j], - layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])] - ) - self.stages.append(stage) - cur += depths[i] - - self.out_indices = out_indices - - norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first") - for i_layer in range(4): - layer = norm_layer(dims[i_layer]) - layer_name = f'norm{i_layer}' - self.add_module(layer_name, layer) - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, (nn.Conv2d, nn.Linear)): - trunc_normal_(m.weight, std=.02) - nn.init.constant_(m.bias, 0) - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - - def _init_weights(m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - if isinstance(pretrained, str): - self.apply(_init_weights) - logger = get_root_logger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - self.apply(_init_weights) - else: - raise TypeError('pretrained must be a str or None') - - def forward_features(self, x): - outs = [] - for i in range(4): - x = self.downsample_layers[i](x) - x = self.stages[i](x) - if i in self.out_indices: - norm_layer = getattr(self, f'norm{i}') - x_out = norm_layer(x) - outs.append(x_out) - - return tuple(outs) - - def forward(self, x): - x = self.forward_features(x) - return x - -class LayerNorm(nn.Module): - r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. - The ordering of the dimensions in the inputs. channels_last corresponds to inputs with - shape (batch_size, height, width, channels) while channels_first corresponds to inputs - with shape (batch_size, channels, height, width). - """ - def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): - super().__init__() - self.weight = nn.Parameter(torch.ones(normalized_shape)) - self.bias = nn.Parameter(torch.zeros(normalized_shape)) - self.eps = eps - self.data_format = data_format - if self.data_format not in ["channels_last", "channels_first"]: - raise NotImplementedError - self.normalized_shape = (normalized_shape, ) - - def forward(self, x): - if self.data_format == "channels_last": - return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) - elif self.data_format == "channels_first": - u = x.mean(1, keepdim=True) - s = (x - u).pow(2).mean(1, keepdim=True) - x = (x - u) / torch.sqrt(s + self.eps) - x = self.weight[:, None, None] * x + self.bias[:, None, None] - return x diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/_base_/default_runtime.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/_base_/default_runtime.py deleted file mode 100644 index 5cdf029d08c174e3ac8190858295013d0985ab42..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/_base_/default_runtime.py +++ /dev/null @@ -1,14 +0,0 @@ -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='CustomizedTextLoggerHook', by_epoch=False), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -cudnn_benchmark = True diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/_base_/models/upernet_convnext.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/_base_/models/upernet_convnext.py deleted file mode 100644 index 355d02ea714772075a1790c26bb4c418bc4e9aad..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/_base_/models/upernet_convnext.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained=None, - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 9, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.2, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - type='UPerHead', - in_channels=[128, 256, 512, 1024], - in_index=[0, 1, 2, 3], - pool_scales=(1, 2, 3, 6), - channels=512, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=384, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_512_160k_ade20k_ms.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_512_160k_ade20k_ms.py deleted file mode 100644 index c6aabfbe53e554fc20591ad2c2db811757510a00..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_512_160k_ade20k_ms.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (512, 512) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 27, 3], - dims=[128, 256, 512, 1024], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[128, 256, 512, 1024], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=512, - num_classes=150 - ), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_512_160k_ade20k_ss.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_512_160k_ade20k_ss.py deleted file mode 100644 index 8ae703f16522cd213ba5d9b0b30fcd2525a0f77e..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_512_160k_ade20k_ss.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (512, 512) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 27, 3], - dims=[128, 256, 512, 1024], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[128, 256, 512, 1024], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=512, - num_classes=150 - ), - test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341)), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_640_160k_ade20k_ms.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_640_160k_ade20k_ms.py deleted file mode 100644 index c2b47130fc14af4247cd376ce053b7f3f267e592..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_640_160k_ade20k_ms.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (640, 640) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 27, 3], - dims=[128, 256, 512, 1024], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[128, 256, 512, 1024], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=512, - num_classes=150 - ), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_640_160k_ade20k_ss.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_640_160k_ade20k_ss.py deleted file mode 100644 index 52185558d0a4c0af03b8966c70999dad9aeb1542..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_base_640_160k_ade20k_ss.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (640, 640) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 27, 3], - dims=[128, 256, 512, 1024], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[128, 256, 512, 1024], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=512, - num_classes=150 - ), - test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426)), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_large_640_160k_ade20k_ms.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_large_640_160k_ade20k_ms.py deleted file mode 100644 index 5d75db86cfde679b2d31b9a60433354eb1a11a29..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_large_640_160k_ade20k_ms.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (640, 640) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 27, 3], - dims=[192, 384, 768, 1536], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[192, 384, 768, 1536], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=768, - num_classes=150 - ), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=False, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_large_640_160k_ade20k_ss.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_large_640_160k_ade20k_ss.py deleted file mode 100644 index 663af418eee9baa25887146c8cefd51ef9e933b1..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_large_640_160k_ade20k_ss.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (640, 640) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 27, 3], - dims=[192, 384, 768, 1536], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[192, 384, 768, 1536], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=768, - num_classes=150 - ), - test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426)), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=False, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_small_512_160k_ade20k_ms.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_small_512_160k_ade20k_ms.py deleted file mode 100644 index 624ca1f4e120a522da7efba547a6f51cae9ad60c..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_small_512_160k_ade20k_ms.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (512, 512) - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 27, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.3, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[96, 192, 384, 768], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=384, - num_classes=150 - ), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_small_512_160k_ade20k_ss.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_small_512_160k_ade20k_ss.py deleted file mode 100644 index c7f3f895b4ff72265e82fc029e6bfbe04d3ce09d..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_small_512_160k_ade20k_ss.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (512, 512) - -model = dict( - backbone=dict( - in_chans=3, - depths=[3, 3, 27, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.3, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[96, 192, 384, 768], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=384, - num_classes=150 - ), - test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341)), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ms.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ms.py deleted file mode 100644 index 9b9cd8b22224c454c351a9fde315b110bbf08040..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ms.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (512, 512) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 9, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[96, 192, 384, 768], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=384, - num_classes=150 - ), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 6}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ss.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ss.py deleted file mode 100644 index 7c5418ba1c96e1411ec1e90eeac8b7773b75879c..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ss.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (512, 512) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 9, 3], - dims=[96, 192, 384, 768], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[96, 192, 384, 768], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=384, - num_classes=150 - ), - test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341)), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 6}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=True, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_xlarge_640_160k_ade20k_ms.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_xlarge_640_160k_ade20k_ms.py deleted file mode 100644 index 0118b9a6f8f22006313f65533978b2226eccd9cd..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_xlarge_640_160k_ade20k_ms.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (640, 640) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 27, 3], - dims=[256, 512, 1024, 2048], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[256, 512, 1024, 2048], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=1024, - num_classes=150 - ), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.00008, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=False, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_xlarge_640_160k_ade20k_ss.py b/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_xlarge_640_160k_ade20k_ss.py deleted file mode 100644 index 06cd5447675f808f48eea1c3702c11120bac1922..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/configs/convnext/upernet_convnext_xlarge_640_160k_ade20k_ss.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -_base_ = [ - '../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k_640x640.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -crop_size = (640, 640) - -model = dict( - backbone=dict( - type='ConvNeXt', - in_chans=3, - depths=[3, 3, 27, 3], - dims=[256, 512, 1024, 2048], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - out_indices=[0, 1, 2, 3], - ), - decode_head=dict( - in_channels=[256, 512, 1024, 2048], - num_classes=150, - ), - auxiliary_head=dict( - in_channels=1024, - num_classes=150 - ), - test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426)), -) - -optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW', - lr=0.00008, betas=(0.9, 0.999), weight_decay=0.05, - paramwise_cfg={'decay_rate': 0.9, - 'decay_type': 'stage_wise', - 'num_layers': 12}) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -# By default, models are trained on 8 GPUs with 2 images per GPU -data=dict(samples_per_gpu=2) - -runner = dict(type='IterBasedRunnerAmp') - -# do not use mmdet version fp16 -fp16 = None -optimizer_config = dict( - type="DistOptimizerHook", - update_interval=1, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - use_fp16=False, -) diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/__init__.py b/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/__init__.py deleted file mode 100644 index 381638c40e03975132de4575e10ad1e50ceb6e47..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -# -*- coding: utf-8 -*- - -from .checkpoint import load_checkpoint -from .layer_decay_optimizer_constructor import LearningRateDecayOptimizerConstructor -from .resize_transform import SETR_Resize -from .apex_runner.optimizer import DistOptimizerHook -from .train_api import train_segmentor -from .customized_text import CustomizedTextLoggerHook - -__all__ = ['load_checkpoint', 'LearningRateDecayOptimizerConstructor', 'SETR_Resize', 'DistOptimizerHook', 'train_segmentor', 'CustomizedTextLoggerHook'] diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/apex_runner/checkpoint.py b/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/apex_runner/checkpoint.py deleted file mode 100644 index e2fb383a75242a5ed3a83c78a7f1c93ab5a28627..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/apex_runner/checkpoint.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) Open-MMLab. All rights reserved. -import os.path as osp -import time -from tempfile import TemporaryDirectory - -import torch -from torch.optim import Optimizer - -import mmcv -from mmcv.parallel import is_module_wrapper -from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict - -try: - import apex -except: - print('apex is not installed') - - -def save_checkpoint(model, filename, optimizer=None, meta=None): - """Save checkpoint to file. - - The checkpoint will have 4 fields: ``meta``, ``state_dict`` and - ``optimizer``, ``amp``. By default ``meta`` will contain version - and time info. - - Args: - model (Module): Module whose params are to be saved. - filename (str): Checkpoint filename. - optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. - meta (dict, optional): Metadata to be saved in checkpoint. - """ - if meta is None: - meta = {} - elif not isinstance(meta, dict): - raise TypeError(f'meta must be a dict or None, but got {type(meta)}') - meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) - - if is_module_wrapper(model): - model = model.module - - if hasattr(model, 'CLASSES') and model.CLASSES is not None: - # save class name to the meta - meta.update(CLASSES=model.CLASSES) - - checkpoint = { - 'meta': meta, - 'state_dict': weights_to_cpu(get_state_dict(model)) - } - # save optimizer state dict in the checkpoint - if isinstance(optimizer, Optimizer): - checkpoint['optimizer'] = optimizer.state_dict() - elif isinstance(optimizer, dict): - checkpoint['optimizer'] = {} - for name, optim in optimizer.items(): - checkpoint['optimizer'][name] = optim.state_dict() - - # save amp state dict in the checkpoint - # checkpoint['amp'] = apex.amp.state_dict() - - if filename.startswith('pavi://'): - try: - from pavi import modelcloud - from pavi.exception import NodeNotFoundError - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - model_path = filename[7:] - root = modelcloud.Folder() - model_dir, model_name = osp.split(model_path) - try: - model = modelcloud.get(model_dir) - except NodeNotFoundError: - model = root.create_training_model(model_dir) - with TemporaryDirectory() as tmp_dir: - checkpoint_file = osp.join(tmp_dir, model_name) - with open(checkpoint_file, 'wb') as f: - torch.save(checkpoint, f) - f.flush() - model.create_file(checkpoint_file, name=model_name) - else: - mmcv.mkdir_or_exist(osp.dirname(filename)) - # immediately flush buffer - with open(filename, 'wb') as f: - torch.save(checkpoint, f) - f.flush() diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/customized_text.py b/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/customized_text.py deleted file mode 100644 index bfbc9633f9587a6815d2eceb030e5b6a8944e1d2..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/customized_text.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import datetime -from collections import OrderedDict - -import torch - -import mmcv -from mmcv.runner import HOOKS -from mmcv.runner import TextLoggerHook - - -@HOOKS.register_module() -class CustomizedTextLoggerHook(TextLoggerHook): - """Customized Text Logger hook. - - This logger prints out both lr and layer_0_lr. - - """ - - def _log_info(self, log_dict, runner): - # print exp name for users to distinguish experiments - # at every ``interval_exp_name`` iterations and the end of each epoch - if runner.meta is not None and 'exp_name' in runner.meta: - if (self.every_n_iters(runner, self.interval_exp_name)) or ( - self.by_epoch and self.end_of_epoch(runner)): - exp_info = f'Exp name: {runner.meta["exp_name"]}' - runner.logger.info(exp_info) - - if log_dict['mode'] == 'train': - lr_str = {} - for lr_type in ['lr', 'layer_0_lr']: - if isinstance(log_dict[lr_type], dict): - lr_str[lr_type] = [] - for k, val in log_dict[lr_type].items(): - lr_str.append(f'{lr_type}_{k}: {val:.3e}') - lr_str[lr_type] = ' '.join(lr_str) - else: - lr_str[lr_type] = f'{lr_type}: {log_dict[lr_type]:.3e}' - - # by epoch: Epoch [4][100/1000] - # by iter: Iter [100/100000] - if self.by_epoch: - log_str = f'Epoch [{log_dict["epoch"]}]' \ - f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t' - else: - log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t' - log_str += f'{lr_str["lr"]}, {lr_str["layer_0_lr"]}, ' - - if 'time' in log_dict.keys(): - self.time_sec_tot += (log_dict['time'] * self.interval) - time_sec_avg = self.time_sec_tot / ( - runner.iter - self.start_iter + 1) - eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1) - eta_str = str(datetime.timedelta(seconds=int(eta_sec))) - log_str += f'eta: {eta_str}, ' - log_str += f'time: {log_dict["time"]:.3f}, ' \ - f'data_time: {log_dict["data_time"]:.3f}, ' - # statistic memory - if torch.cuda.is_available(): - log_str += f'memory: {log_dict["memory"]}, ' - else: - # val/test time - # here 1000 is the length of the val dataloader - # by epoch: Epoch[val] [4][1000] - # by iter: Iter[val] [1000] - if self.by_epoch: - log_str = f'Epoch({log_dict["mode"]}) ' \ - f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t' - else: - log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t' - - log_items = [] - for name, val in log_dict.items(): - # TODO: resolve this hack - # these items have been in log_str - if name in [ - 'mode', 'Epoch', 'iter', 'lr', 'layer_0_lr', 'time', 'data_time', - 'memory', 'epoch' - ]: - continue - if isinstance(val, float): - val = f'{val:.4f}' - log_items.append(f'{name}: {val}') - log_str += ', '.join(log_items) - - runner.logger.info(log_str) - - - def log(self, runner): - if 'eval_iter_num' in runner.log_buffer.output: - # this doesn't modify runner.iter and is regardless of by_epoch - cur_iter = runner.log_buffer.output.pop('eval_iter_num') - else: - cur_iter = self.get_iter(runner, inner_iter=True) - - log_dict = OrderedDict( - mode=self.get_mode(runner), - epoch=self.get_epoch(runner), - iter=cur_iter) - - # record lr and layer_0_lr - cur_lr = runner.current_lr() - if isinstance(cur_lr, list): - log_dict['layer_0_lr'] = min(cur_lr) - log_dict['lr'] = max(cur_lr) - else: - assert isinstance(cur_lr, dict) - log_dict['lr'], log_dict['layer_0_lr'] = {}, {} - for k, lr_ in cur_lr.items(): - assert isinstance(lr_, list) - log_dict['layer_0_lr'].update({k: min(lr_)}) - log_dict['lr'].update({k: max(lr_)}) - - if 'time' in runner.log_buffer.output: - # statistic memory - if torch.cuda.is_available(): - log_dict['memory'] = self._get_max_memory(runner) - - log_dict = dict(log_dict, **runner.log_buffer.output) - - self._log_info(log_dict, runner) - self._dump_log(log_dict, runner) - return log_dict diff --git a/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/layer_decay_optimizer_constructor.py b/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/layer_decay_optimizer_constructor.py deleted file mode 100644 index 8fd5869e2e5eb80e9ca08ae1d24f4a79b7f7545c..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/semantic_segmentation/mmcv_custom/layer_decay_optimizer_constructor.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import json -from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor -from mmcv.runner import get_dist_info - - -def get_num_layer_layer_wise(var_name, num_max_layer=12): - - if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"): - return 0 - elif var_name.startswith("backbone.downsample_layers"): - stage_id = int(var_name.split('.')[2]) - if stage_id == 0: - layer_id = 0 - elif stage_id == 1: - layer_id = 2 - elif stage_id == 2: - layer_id = 3 - elif stage_id == 3: - layer_id = num_max_layer - return layer_id - elif var_name.startswith("backbone.stages"): - stage_id = int(var_name.split('.')[2]) - block_id = int(var_name.split('.')[3]) - if stage_id == 0: - layer_id = 1 - elif stage_id == 1: - layer_id = 2 - elif stage_id == 2: - layer_id = 3 + block_id // 3 - elif stage_id == 3: - layer_id = num_max_layer - return layer_id - else: - return num_max_layer + 1 - - -def get_num_layer_stage_wise(var_name, num_max_layer): - if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"): - return 0 - elif var_name.startswith("backbone.downsample_layers"): - return 0 - elif var_name.startswith("backbone.stages"): - stage_id = int(var_name.split('.')[2]) - return stage_id + 1 - else: - return num_max_layer - 1 - - -@OPTIMIZER_BUILDERS.register_module() -class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor): - def add_params(self, params, module, prefix='', is_dcn_module=None): - """Add all parameters of module to the params list. - The parameters of the given module will be added to the list of param - groups, with specific rules defined by paramwise_cfg. - Args: - params (list[dict]): A list of param groups, it will be modified - in place. - module (nn.Module): The module to be added. - prefix (str): The prefix of the module - is_dcn_module (int|float|None): If the current module is a - submodule of DCN, `is_dcn_module` will be passed to - control conv_offset layer's learning rate. Defaults to None. - """ - parameter_groups = {} - print(self.paramwise_cfg) - num_layers = self.paramwise_cfg.get('num_layers') + 2 - decay_rate = self.paramwise_cfg.get('decay_rate') - decay_type = self.paramwise_cfg.get('decay_type', "layer_wise") - print("Build LearningRateDecayOptimizerConstructor %s %f - %d" % (decay_type, decay_rate, num_layers)) - weight_decay = self.base_wd - - for name, param in module.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if len(param.shape) == 1 or name.endswith(".bias") or name in ('pos_embed', 'cls_token'): - group_name = "no_decay" - this_weight_decay = 0. - else: - group_name = "decay" - this_weight_decay = weight_decay - - if decay_type == "layer_wise": - layer_id = get_num_layer_layer_wise(name, self.paramwise_cfg.get('num_layers')) - elif decay_type == "stage_wise": - layer_id = get_num_layer_stage_wise(name, num_layers) - - group_name = "layer_%d_%s" % (layer_id, group_name) - - if group_name not in parameter_groups: - scale = decay_rate ** (num_layers - layer_id - 1) - - parameter_groups[group_name] = { - "weight_decay": this_weight_decay, - "params": [], - "param_names": [], - "lr_scale": scale, - "group_name": group_name, - "lr": scale * self.base_lr, - } - - parameter_groups[group_name]["params"].append(param) - parameter_groups[group_name]["param_names"].append(name) - rank, _ = get_dist_info() - if rank == 0: - to_display = {} - for key in parameter_groups: - to_display[key] = { - "param_names": parameter_groups[key]["param_names"], - "lr_scale": parameter_groups[key]["lr_scale"], - "lr": parameter_groups[key]["lr"], - "weight_decay": parameter_groups[key]["weight_decay"], - } - print("Param groups = %s" % json.dumps(to_display, indent=2)) - - params.extend(parameter_groups.values()) diff --git a/cv/classification/convnext/pytorch/utils.py b/cv/classification/convnext/pytorch/utils.py deleted file mode 100644 index a34c480f610b61195762771d3af7980571b9d17a..0000000000000000000000000000000000000000 --- a/cv/classification/convnext/pytorch/utils.py +++ /dev/null @@ -1,507 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. - -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import os -import math -import time -from collections import defaultdict, deque -import datetime -import numpy as np -from timm.utils import get_state_dict - -from pathlib import Path - -import torch -import torch.distributed as dist -from torch._six import inf - -from tensorboardX import SummaryWriter - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value) - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if v is None: - continue - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append( - "{}: {}".format(name, str(meter)) - ) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - i = 0 - if not header: - header = '' - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt='{avg:.4f}') - data_time = SmoothedValue(fmt='{avg:.4f}') - space_fmt = ':' + str(len(str(len(iterable)))) + 'd' - log_msg = [ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}' - ] - if torch.cuda.is_available(): - log_msg.append('max mem: {memory:.0f}') - log_msg = self.delimiter.join(log_msg) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB)) - else: - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time))) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('{} Total time: {} ({:.4f} s / it)'.format( - header, total_time_str, total_time / len(iterable))) - - -class TensorboardLogger(object): - def __init__(self, log_dir): - self.writer = SummaryWriter(logdir=log_dir) - self.step = 0 - - def set_step(self, step=None): - if step is not None: - self.step = step - else: - self.step += 1 - - def update(self, head='scalar', step=None, **kwargs): - for k, v in kwargs.items(): - if v is None: - continue - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step) - - def flush(self): - self.writer.flush() - - -class WandbLogger(object): - def __init__(self, args): - self.args = args - - try: - import wandb - self._wandb = wandb - except ImportError: - raise ImportError( - "To use the Weights and Biases Logger please install wandb." - "Run `pip install wandb` to install it." - ) - - # Initialize a W&B run - if self._wandb.run is None: - self._wandb.init( - project=args.project, - config=args - ) - - def log_epoch_metrics(self, metrics, commit=True): - """ - Log train/test metrics onto W&B. - """ - # Log number of model parameters as W&B summary - self._wandb.summary['n_parameters'] = metrics.get('n_parameters', None) - metrics.pop('n_parameters', None) - - # Log current epoch - self._wandb.log({'epoch': metrics.get('epoch')}, commit=False) - metrics.pop('epoch') - - for k, v in metrics.items(): - if 'train' in k: - self._wandb.log({f'Global Train/{k}': v}, commit=False) - elif 'test' in k: - self._wandb.log({f'Global Test/{k}': v}, commit=False) - - self._wandb.log({}) - - def log_checkpoints(self): - output_dir = self.args.output_dir - model_artifact = self._wandb.Artifact( - self._wandb.run.id + "_model", type="model" - ) - - model_artifact.add_dir(output_dir) - self._wandb.log_artifact(model_artifact, aliases=["latest", "best"]) - - def set_steps(self): - # Set global training step - self._wandb.define_metric('Rank-0 Batch Wise/*', step_metric='Rank-0 Batch Wise/global_train_step') - # Set epoch-wise step - self._wandb.define_metric('Global Train/*', step_metric='epoch') - self._wandb.define_metric('Global Test/*', step_metric='epoch') - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop('force', False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - - if args.dist_on_itp: - args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) - args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) - args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) - args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) - os.environ['LOCAL_RANK'] = str(args.gpu) - os.environ['RANK'] = str(args.rank) - os.environ['WORLD_SIZE'] = str(args.world_size) - # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] - elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ['WORLD_SIZE']) - args.gpu = int(os.environ['LOCAL_RANK']) - elif 'SLURM_PROCID' in os.environ: - args.rank = int(os.environ['SLURM_PROCID']) - args.gpu = args.rank % torch.cuda.device_count() - - os.environ['RANK'] = str(args.rank) - os.environ['LOCAL_RANK'] = str(args.gpu) - os.environ['WORLD_SIZE'] = str(args.world_size) - else: - print('Not using distributed mode') - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = 'nccl' - print('| distributed init (rank {}): {}, gpu {}'.format( - args.rank, args.dist_url, args.gpu), flush=True) - torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - -def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"): - missing_keys = [] - unexpected_keys = [] - error_msgs = [] - # copy state_dict so _load_from_state_dict can modify it - metadata = getattr(state_dict, '_metadata', None) - state_dict = state_dict.copy() - if metadata is not None: - state_dict._metadata = metadata - - def load(module, prefix=''): - local_metadata = {} if metadata is None else metadata.get( - prefix[:-1], {}) - module._load_from_state_dict( - state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) - for name, child in module._modules.items(): - if child is not None: - load(child, prefix + name + '.') - - load(model, prefix=prefix) - - warn_missing_keys = [] - ignore_missing_keys = [] - for key in missing_keys: - keep_flag = True - for ignore_key in ignore_missing.split('|'): - if ignore_key in key: - keep_flag = False - break - if keep_flag: - warn_missing_keys.append(key) - else: - ignore_missing_keys.append(key) - - missing_keys = warn_missing_keys - - if len(missing_keys) > 0: - print("Weights of {} not initialized from pretrained model: {}".format( - model.__class__.__name__, missing_keys)) - if len(unexpected_keys) > 0: - print("Weights from pretrained model not used in {}: {}".format( - model.__class__.__name__, unexpected_keys)) - if len(ignore_missing_keys) > 0: - print("Ignored weights of {} not initialized from pretrained model: {}".format( - model.__class__.__name__, ignore_missing_keys)) - if len(error_msgs) > 0: - print('\n'.join(error_msgs)) - - -class NativeScalerWithGradNormCount: - state_dict_key = "amp_scaler" - - def __init__(self): - self._scaler = torch.cuda.amp.GradScaler() - - def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): - self._scaler.scale(loss).backward(create_graph=create_graph) - if update_grad: - if clip_grad is not None: - assert parameters is not None - self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place - norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) - else: - self._scaler.unscale_(optimizer) - norm = get_grad_norm_(parameters) - self._scaler.step(optimizer) - self._scaler.update() - else: - norm = None - return norm - - def state_dict(self): - return self._scaler.state_dict() - - def load_state_dict(self, state_dict): - self._scaler.load_state_dict(state_dict) - - -def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = [p for p in parameters if p.grad is not None] - norm_type = float(norm_type) - if len(parameters) == 0: - return torch.tensor(0.) - device = parameters[0].grad.device - if norm_type == inf: - total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) - else: - total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) - return total_norm - - -def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, - start_warmup_value=0, warmup_steps=-1): - warmup_schedule = np.array([]) - warmup_iters = warmup_epochs * niter_per_ep - if warmup_steps > 0: - warmup_iters = warmup_steps - print("Set warmup steps = %d" % warmup_iters) - if warmup_epochs > 0: - warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) - - iters = np.arange(epochs * niter_per_ep - warmup_iters) - schedule = np.array( - [final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters]) - - schedule = np.concatenate((warmup_schedule, schedule)) - - assert len(schedule) == epochs * niter_per_ep - return schedule - -def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): - output_dir = Path(args.output_dir) - epoch_name = str(epoch) - checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] - for checkpoint_path in checkpoint_paths: - to_save = { - 'model': model_without_ddp.state_dict(), - 'optimizer': optimizer.state_dict(), - 'epoch': epoch, - 'scaler': loss_scaler.state_dict(), - 'args': args, - } - - if model_ema is not None: - to_save['model_ema'] = get_state_dict(model_ema) - - save_on_master(to_save, checkpoint_path) - - if is_main_process() and isinstance(epoch, int): - to_del = epoch - args.save_ckpt_num * args.save_ckpt_freq - old_ckpt = output_dir / ('checkpoint-%s.pth' % to_del) - if os.path.exists(old_ckpt): - os.remove(old_ckpt) - - -def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): - output_dir = Path(args.output_dir) - if args.auto_resume and len(args.resume) == 0: - import glob - all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth')) - latest_ckpt = -1 - for ckpt in all_checkpoints: - t = ckpt.split('-')[-1].split('.')[0] - if t.isdigit(): - latest_ckpt = max(int(t), latest_ckpt) - if latest_ckpt >= 0: - args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt) - print("Auto resume checkpoint: %s" % args.resume) - - if args.resume: - if args.resume.startswith('https'): - checkpoint = torch.hub.load_state_dict_from_url( - args.resume, map_location='cpu', check_hash=True) - else: - checkpoint = torch.load(args.resume, map_location='cpu') - model_without_ddp.load_state_dict(checkpoint['model']) - print("Resume checkpoint %s" % args.resume) - if 'optimizer' in checkpoint and 'epoch' in checkpoint: - optimizer.load_state_dict(checkpoint['optimizer']) - if not isinstance(checkpoint['epoch'], str): # does not support resuming with 'best', 'best-ema' - args.start_epoch = checkpoint['epoch'] + 1 - else: - assert args.eval, 'Does not support resuming with checkpoint-best' - if hasattr(args, 'model_ema') and args.model_ema: - if 'model_ema' in checkpoint.keys(): - model_ema.ema.load_state_dict(checkpoint['model_ema']) - else: - model_ema.ema.load_state_dict(checkpoint['model']) - if 'scaler' in checkpoint: - loss_scaler.load_state_dict(checkpoint['scaler']) - print("With optim & sched!") diff --git a/cv/detection/co-detr/pytorch/.gitignore b/cv/detection/co-detr/pytorch/.gitignore deleted file mode 100644 index a892739eba9ba45c732061cd43b10b8169167aae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/.gitignore +++ /dev/null @@ -1,125 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/en/_build/ -docs/zh_cn/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ - -data/ -data -.vscode -.idea -.DS_Store - -# custom -*.pkl -*.pkl.json -*.log.json -docs/modelzoo_statistics.md -mmdet/.mim -work_dirs/ - -# Pytorch -*.pth -*.py~ -*.sh~ - diff --git a/cv/detection/co-detr/pytorch/LICENSE b/cv/detection/co-detr/pytorch/LICENSE deleted file mode 100644 index c7aabcf1e5bc5f5103fdd3a9e1cc8628bb1bf9a8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022 SenseTime X-Lab - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/cv/detection/co-detr/pytorch/README-origin.md b/cv/detection/co-detr/pytorch/README-origin.md deleted file mode 100644 index dd2ea20ee69fa245cd3bbd966a7c61e3db8f3636..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/README-origin.md +++ /dev/null @@ -1,147 +0,0 @@ -# DETRs with Collaborative Hybrid Assignments Training - -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/detrs-with-collaborative-hybrid-assignments/object-detection-on-coco-minival)](https://paperswithcode.com/sota/object-detection-on-coco-minival?p=detrs-with-collaborative-hybrid-assignments) -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/detrs-with-collaborative-hybrid-assignments/object-detection-on-coco)](https://paperswithcode.com/sota/object-detection-on-coco?p=detrs-with-collaborative-hybrid-assignments) -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/detrs-with-collaborative-hybrid-assignments/object-detection-on-lvis-v1-0-minival)](https://paperswithcode.com/sota/object-detection-on-lvis-v1-0-minival?p=detrs-with-collaborative-hybrid-assignments) -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/detrs-with-collaborative-hybrid-assignments/object-detection-on-lvis-v1-0-val)](https://paperswithcode.com/sota/object-detection-on-lvis-v1-0-val?p=detrs-with-collaborative-hybrid-assignments) -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/detrs-with-collaborative-hybrid-assignments/instance-segmentation-on-lvis-v1-0-val)](https://paperswithcode.com/sota/instance-segmentation-on-lvis-v1-0-val?p=detrs-with-collaborative-hybrid-assignments) - -This repo is the official implementation of ["DETRs with Collaborative Hybrid Assignments Training"](https://arxiv.org/pdf/2211.12860.pdf) by Zhuofan Zong, Guanglu Song, and Yu Liu. - - -## News - -* ***[10/19/2023]*** Our SOTA model Co-DETR w/ ViT-L is released now. Please refer to [this page](https://github.com/Sense-X/Co-DETR/blob/main/docs/en/sota_release.md) for more details. -* ***[09/10/2023]*** We release LVIS inference configs and a stronger LVIS detector that achieves **64.5 box AP**. -* ***[08/21/2023]*** Our O365 pre-trained Co-DETR with Swin-L achieves **64.8 AP** on COCO test-dev. The config and weights are released. -* ***[07/20/2023]*** Code for Co-DINO is released: **55.4 AP** with ResNet-50 and **60.7 AP** with Swin-L. -* ***[07/14/2023]*** Co-DETR is accepted to ICCV 2023! -* ***[07/12/2023]*** We finetune Co-DETR on LVIS and achieve the best results **without TTA**: **71.9 box AP** and **59.7 mask AP** on LVIS minival, **67.9 box AP** and **56.0 mask AP** on LVIS val. For instance segmentation, we report the performance of the auxiliary mask branch. -* ***[07/03/2023]*** Co-DETR with [ViT-L](https://github.com/baaivision/EVA/tree/master/EVA-02) **(304M parameters)** sets a new record of 65.6 **66.0 AP** on COCO test-dev, surpassing the previous best model [InternImage-G](https://github.com/OpenGVLab/InternImage) **(~3000M parameters)**. It is the **first model to exceed 66.0 AP on COCO test-dev**. -* ***[07/03/2023]*** Code for Co-Deformable-DETR is released. -* ***[11/19/2022]*** We achieved 64.4 AP on COCO minival and 64.5 AP on COCO test-dev with only ImageNet-1K as pre-training data. Codes will be available soon. - - -## Introduction - -![teaser](figures/framework.png) - -In this paper, we present a novel collaborative hybrid assignments training scheme, namely Co-DETR, to learn more efficient and effective DETR-based detectors from versatile label assignment manners. -1. **Encoder optimization**: The proposed training scheme can easily enhance the encoder's learning ability in end-to-end detectors by training multiple parallel auxiliary heads supervised by one-to-many label assignments. -2. **Decoder optimization**: We conduct extra customized positive queries by extracting the positive coordinates from these auxiliary heads to improve attention learning of the decoder. -3. **State-of-the-art performance**: Co-DETR with [ViT-L](https://github.com/baaivision/EVA/tree/master/EVA-02) (304M parameters) is **the first model to achieve 66.0 AP on COCO test-dev.** - -![teaser](figures/performance.png) - -## Model Zoo - -### Objects365 pre-trained Co-DETR - -Note: the inconsistent pre-training and fine-tuning augmentation settings (DETR and LSJ aug) for the Swin-L model degenerate the performance on LVIS. -| Model | Backbone | Epochs | Aug | Dataset | box AP (val) | Config | Download | -| ------ | -------- | ------ | --- | ------- | ------------ | ------ | ----- | -| Co-DINO | Swin-L | 16 | DETR | COCO | 64.1 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_swin_large_16e_o365tococo.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | Swin-L | 16 | LSJ | LVIS | 64.5 | [config (test)](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_16e_o365tolvis.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | - -### Co-DETR with ResNet-50 - -| Model | Backbone | Epochs | Aug | Dataset | box AP | Config | Download | -| ------ | -------- | ------ | --- | ------- | ------ | ------ | ----- | -| Co-DINO | R50 | 12 | DETR | COCO | 52.1 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_r50_1x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | R50 | 12 | LSJ | COCO | 52.1 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_lsj_r50_1x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO-9enc | R50 | 12 | LSJ | COCO | 52.6 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_9encoder_lsj_r50_1x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | R50 | 36 | LSJ | COCO | 54.8 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_lsj_r50_3x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO-9enc | R50 | 36 | LSJ | COCO | 55.4 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_9encoder_lsj_r50_3x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | - - -### Co-DETR with Swin-L - -| Model | Backbone | Epochs | Aug | Dataset | box AP | Config | Download | -| ------ | -------- | ------ | --- | ------- | ------ | ------ | ----- | -| Co-DINO | Swin-L | 12 | DETR | COCO | 58.9 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_swin_large_1x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | Swin-L | 24 | DETR | COCO | 59.8 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_swin_large_2x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | Swin-L | 36 | DETR | COCO | 60.0 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_swin_large_3x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | Swin-L | 12 | LSJ | COCO | 59.3 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_1x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | Swin-L | 24 | LSJ | COCO | 60.4 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_2x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | Swin-L | 36 | LSJ | COCO | 60.7 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_3x_coco.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | -| Co-DINO | Swin-L | 36 | LSJ | LVIS | 56.9 | [config (test)](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_3x_lvis.py) | [model](https://drive.google.com/drive/folders/1nAXOkzqrEgz-YnXxIEs4d5j9li_kmrnv?usp=sharing) | - -### Co-Deformable-DETR - -| Model | Backbone | Epochs | Queries | box AP | Config | Download | -| ------ | -------- | ------ | ------- | ------ | ---- | --- | -| Co-Deformable-DETR | R50 | 12 | 300 | 49.5 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | -| Co-Deformable-DETR | Swin-T | 12 | 300 | 51.7 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_swin_tiny_1x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | -| Co-Deformable-DETR | Swin-T | 36 | 300 | 54.1 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_swin_tiny_3x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | -| Co-Deformable-DETR | Swin-S | 12 | 300 | 53.4 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_swin_small_1x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | -| Co-Deformable-DETR | Swin-S | 36 | 300 | 55.3 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_swin_small_3x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | -| Co-Deformable-DETR | Swin-B | 12 | 300 | 55.5 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_swin_base_1x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | -| Co-Deformable-DETR | Swin-B | 36 | 300 | 57.5 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_swin_base_3x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | -| Co-Deformable-DETR | Swin-L | 12 | 300 | 56.9 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_swin_large_1x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | -| Co-Deformable-DETR | Swin-L | 36 | 900 | 58.5 | [config](https://github.com/Sense-X/Co-DETR/blob/main/projects/configs/co_deformable_detr/co_deformable_detr_swin_large_900q_3x_coco.py) | [model](https://drive.google.com/drive/folders/1asWoZ3SuM6APTL9D-QUF_YW9mjULNdh9?usp=sharing) \| [log](https://drive.google.com/drive/folders/1GktHRm2oAxmOzdK3jPaRqNu4uOQhecgZ?usp=sharing) | - -## Running - -### Install -We implement Co-DETR using [MMDetection V2.25.3](https://github.com/open-mmlab/mmdetection/releases/tag/v2.25.3) and [MMCV V1.5.0](https://github.com/open-mmlab/mmcv/releases/tag/v1.5.0). -The source code of MMdetection has been included in this repo and you only need to build MMCV following [official instructions](https://github.com/open-mmlab/mmcv/tree/v1.5.0#installation). -We test our models under ```python=3.7.11,pytorch=1.11.0,cuda=11.3```. Other versions may not be compatible. - -### Data -The COCO dataset and LVIS dataset should be organized as: -``` -Co-DETR -└── data - ├── coco - │ ├── annotations - │ │ ├── instances_train2017.json - │ │ └── instances_val2017.json - │ ├── train2017 - │ └── val2017 - │ - └── lvis_v1 - ├── annotations - │ ├── lvis_v1_train.json - │ └── lvis_v1_val.json - ├── train2017 - └── val2017 -``` - -### Training -Train Co-Deformable-DETR + ResNet-50 with 8 GPUs: -```shell -sh tools/dist_train.sh projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py 8 path_to_exp -``` -Train using slurm: -```shell -sh tools/slurm_train.sh partition job_name projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py path_to_exp -``` - -### Testing -Test Co-Deformable-DETR + ResNet-50 with 8 GPUs, and evaluate: -```shell -sh tools/dist_test.sh projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py path_to_checkpoint 8 --eval bbox -``` -Test using slurm: -```shell -sh tools/slurm_test.sh partition job_name projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py path_to_checkpoint --eval bbox -``` - -## Cite Co-DETR - -If you find this repository useful, please use the following BibTeX entry for citation. - -```latex -@misc{codetr2022, - title={DETRs with Collaborative Hybrid Assignments Training}, - author={Zhuofan Zong and Guanglu Song and Yu Liu}, - year={2022}, - eprint={2211.12860}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` - -## License - -This project is released under the MIT license. Please see the [LICENSE](LICENSE) file for more information. diff --git a/cv/detection/co-detr/pytorch/README.md b/cv/detection/co-detr/pytorch/README.md index 5515d13c43aff9dc8f1cb07eb2cbc93543e7875b..e7ace99fe8cf470e8999573e8b9fdc58626a21f8 100644 --- a/cv/detection/co-detr/pytorch/README.md +++ b/cv/detection/co-detr/pytorch/README.md @@ -21,6 +21,13 @@ pip3 install urllib3==1.26.15 yum install -y mesa-libGL ``` +### (3) download repo +```bash +git clone https://github.com/Sense-X/Co-DETR.git +cd /path/to/Co-DETR +git checkout bf3d49d7c02929788dfe2f251b6b01cbe196b736 +``` + ## Step 2: Preparing datasets Go to visit [COCO official website](https://cocodataset.org/#download), then select the COCO dataset you want to download. @@ -57,7 +64,7 @@ ln -s /path/to/coco ./data ```bash # One GPU export CUDA_VISIBLE_DEVICES=0 -python3 train.py projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py --work-dir path_to_exp --no-validate --auto-resume +python3 tools/train.py projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py --work-dir path_to_exp --no-validate --auto-resume # Eight GPUs bash tools/dist_train.sh projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py 8 path_to_exp --no-validate --auto-resume diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/cityscapes_detection.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/cityscapes_detection.py deleted file mode 100644 index e341b59d6fa6265c2d17dc32aae2341871670a3d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/cityscapes_detection.py +++ /dev/null @@ -1,56 +0,0 @@ -# dataset settings -dataset_type = 'CityscapesDataset' -data_root = 'data/cityscapes/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=1, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=8, - dataset=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_train.json', - img_prefix=data_root + 'leftImg8bit/train/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_val.json', - img_prefix=data_root + 'leftImg8bit/val/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_test.json', - img_prefix=data_root + 'leftImg8bit/test/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/cityscapes_instance.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/cityscapes_instance.py deleted file mode 100644 index 4e3c34e2c85b4fc2ba854e1b409af70dc2c34e94..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/cityscapes_instance.py +++ /dev/null @@ -1,56 +0,0 @@ -# dataset settings -dataset_type = 'CityscapesDataset' -data_root = 'data/cityscapes/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=1, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=8, - dataset=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_train.json', - img_prefix=data_root + 'leftImg8bit/train/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_val.json', - img_prefix=data_root + 'leftImg8bit/val/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_test.json', - img_prefix=data_root + 'leftImg8bit/test/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_detection.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_detection.py deleted file mode 100644 index 149f590bb45fa65c29fd4c005e4a237d7dd2e117..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_detection.py +++ /dev/null @@ -1,49 +0,0 @@ -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_instance.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_instance.py deleted file mode 100644 index 9901a858414465d19d8ec6ced316b460166176b4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_instance.py +++ /dev/null @@ -1,49 +0,0 @@ -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_instance_semantic.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_instance_semantic.py deleted file mode 100644 index 6c8bf07b278f615e7ff5e67490d7a92068574b5b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_instance_semantic.py +++ /dev/null @@ -1,54 +0,0 @@ -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 8), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - seg_prefix=data_root + 'stuffthingmaps/train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_panoptic.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_panoptic.py deleted file mode 100644 index dbade7c0ac20141806b93f0ea7b5ca26d748246e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/coco_panoptic.py +++ /dev/null @@ -1,59 +0,0 @@ -# dataset settings -dataset_type = 'CocoPanopticDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadPanopticAnnotations', - with_bbox=True, - with_mask=True, - with_seg=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 4), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/panoptic_train2017.json', - img_prefix=data_root + 'train2017/', - seg_prefix=data_root + 'annotations/panoptic_train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/panoptic_val2017.json', - img_prefix=data_root + 'val2017/', - seg_prefix=data_root + 'annotations/panoptic_val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/panoptic_val2017.json', - img_prefix=data_root + 'val2017/', - seg_prefix=data_root + 'annotations/panoptic_val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric=['PQ']) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/deepfashion.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/deepfashion.py deleted file mode 100644 index 308b4b2ac4d9e3516ba4a57e9d3b6af91e97f24b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/deepfashion.py +++ /dev/null @@ -1,53 +0,0 @@ -# dataset settings -dataset_type = 'DeepFashionDataset' -data_root = 'data/DeepFashion/In-shop/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(750, 1101), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(750, 1101), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - imgs_per_gpu=2, - workers_per_gpu=1, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', - img_prefix=data_root + 'Img/', - pipeline=train_pipeline, - data_root=data_root), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', - img_prefix=data_root + 'Img/', - pipeline=test_pipeline, - data_root=data_root), - test=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/DeepFashion_segmentation_gallery.json', - img_prefix=data_root + 'Img/', - pipeline=test_pipeline, - data_root=data_root)) -evaluation = dict(interval=5, metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/lvis_v0.5_instance.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/lvis_v0.5_instance.py deleted file mode 100644 index 207e0053c24d73e05e78c764d05e65c102675320..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/lvis_v0.5_instance.py +++ /dev/null @@ -1,24 +0,0 @@ -# dataset settings -_base_ = 'coco_instance.py' -dataset_type = 'LVISV05Dataset' -data_root = 'data/lvis_v0.5/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - _delete_=True, - type='ClassBalancedDataset', - oversample_thr=1e-3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v0.5_train.json', - img_prefix=data_root + 'train2017/')), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v0.5_val.json', - img_prefix=data_root + 'val2017/'), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v0.5_val.json', - img_prefix=data_root + 'val2017/')) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/lvis_v1_instance.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/lvis_v1_instance.py deleted file mode 100644 index be791edd79495dce88d010eea63e33d398f242b0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/lvis_v1_instance.py +++ /dev/null @@ -1,24 +0,0 @@ -# dataset settings -_base_ = 'coco_instance.py' -dataset_type = 'LVISV1Dataset' -data_root = 'data/lvis_v1/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - _delete_=True, - type='ClassBalancedDataset', - oversample_thr=1e-3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_train.json', - img_prefix=data_root)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/openimages_detection.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/openimages_detection.py deleted file mode 100644 index a65d30634adbdc7ce21c1bd24fed6c99adc50f09..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/openimages_detection.py +++ /dev/null @@ -1,65 +0,0 @@ -# dataset settings -dataset_type = 'OpenImagesDataset' -data_root = 'data/OpenImages/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, denorm_bbox=True), - dict(type='Resize', img_scale=(1024, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1024, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ], - ), -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv', - img_prefix=data_root + 'OpenImages/train/', - label_file=data_root + 'annotations/class-descriptions-boxable.csv', - hierarchy_file=data_root + - 'annotations/bbox_labels_600_hierarchy.json', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/validation-annotations-bbox.csv', - img_prefix=data_root + 'OpenImages/validation/', - label_file=data_root + 'annotations/class-descriptions-boxable.csv', - hierarchy_file=data_root + - 'annotations/bbox_labels_600_hierarchy.json', - meta_file=data_root + 'annotations/validation-image-metas.pkl', - image_level_ann_file=data_root + - 'annotations/validation-annotations-human-imagelabels-boxable.csv', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/validation-annotations-bbox.csv', - img_prefix=data_root + 'OpenImages/validation/', - label_file=data_root + 'annotations/class-descriptions-boxable.csv', - hierarchy_file=data_root + - 'annotations/bbox_labels_600_hierarchy.json', - meta_file=data_root + 'annotations/validation-image-metas.pkl', - image_level_ann_file=data_root + - 'annotations/validation-annotations-human-imagelabels-boxable.csv', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='mAP') diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/voc0712.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/voc0712.py deleted file mode 100644 index ae09acdd5c9580217815300abbad9f08b71b37ed..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/voc0712.py +++ /dev/null @@ -1,55 +0,0 @@ -# dataset settings -dataset_type = 'VOCDataset' -data_root = 'data/VOCdevkit/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1000, 600), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file=[ - data_root + 'VOC2007/ImageSets/Main/trainval.txt', - data_root + 'VOC2012/ImageSets/Main/trainval.txt' - ], - img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', - img_prefix=data_root + 'VOC2007/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', - img_prefix=data_root + 'VOC2007/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='mAP') diff --git a/cv/detection/co-detr/pytorch/configs/_base_/datasets/wider_face.py b/cv/detection/co-detr/pytorch/configs/_base_/datasets/wider_face.py deleted file mode 100644 index d1d649be42bca2955fb56a784fe80bcc2fdce4e1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/datasets/wider_face.py +++ /dev/null @@ -1,63 +0,0 @@ -# dataset settings -dataset_type = 'WIDERFaceDataset' -data_root = 'data/WIDERFace/' -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(300, 300), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(300, 300), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=60, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=2, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'train.txt', - img_prefix=data_root + 'WIDER_train/', - min_size=17, - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'val.txt', - img_prefix=data_root + 'WIDER_val/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'val.txt', - img_prefix=data_root + 'WIDER_val/', - pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/default_runtime.py b/cv/detection/co-detr/pytorch/configs/_base_/default_runtime.py deleted file mode 100644 index 5b0b1452c0a625e331be7b1e6c5cf341cc91ff64..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/default_runtime.py +++ /dev/null @@ -1,27 +0,0 @@ -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -custom_hooks = [dict(type='NumClassCheckHook')] - -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] - -# disable opencv multithreading to avoid system being overloaded -opencv_num_threads = 0 -# set multi-process start method as `fork` to speed up the training -mp_start_method = 'fork' - -# Default setting for scaling LR automatically -# - `enable` means enable scaling LR automatically -# or not by default. -# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). -auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py deleted file mode 100644 index 2902ccae5a8ffaa6ae9c49212b68a71035c83e60..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py +++ /dev/null @@ -1,196 +0,0 @@ -# model settings -model = dict( - type='CascadeRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='CascadeRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/cascade_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/configs/_base_/models/cascade_rcnn_r50_fpn.py deleted file mode 100644 index 42f74ae748a32bdce10ab9003fd45f87721d02ff..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/cascade_rcnn_r50_fpn.py +++ /dev/null @@ -1,179 +0,0 @@ -# model settings -model = dict( - type='CascadeRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='CascadeRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ]), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/fast_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/configs/_base_/models/fast_rcnn_r50_fpn.py deleted file mode 100644 index 9982fe0956d60022a2c702a824ffaff192e93e1e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/fast_rcnn_r50_fpn.py +++ /dev/null @@ -1,62 +0,0 @@ -# model settings -model = dict( - type='FastRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_caffe_c4.py b/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_caffe_c4.py deleted file mode 100644 index dbf965afe3de8e91505cf5deeae0d32c55f93c4f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_caffe_c4.py +++ /dev/null @@ -1,117 +0,0 @@ -# model settings -norm_cfg = dict(type='BN', requires_grad=False) -model = dict( - type='FasterRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=3, - strides=(1, 2, 2), - dilations=(1, 1, 1), - out_indices=(2, ), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - rpn_head=dict( - type='RPNHead', - in_channels=1024, - feat_channels=1024, - anchor_generator=dict( - type='AnchorGenerator', - scales=[2, 4, 8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[16]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - shared_head=dict( - type='ResLayer', - depth=50, - stage=3, - stride=2, - dilation=1, - style='caffe', - norm_cfg=norm_cfg, - norm_eval=True, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=1024, - featmap_strides=[16]), - bbox_head=dict( - type='BBoxHead', - with_avg_pool=True, - roi_feat_size=7, - in_channels=2048, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=12000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=6000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py b/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py deleted file mode 100644 index a377a6f09664b5eca189fa77dcb47c69842fdbf2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py +++ /dev/null @@ -1,105 +0,0 @@ -# model settings -norm_cfg = dict(type='BN', requires_grad=False) -model = dict( - type='FasterRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - strides=(1, 2, 2, 1), - dilations=(1, 1, 1, 2), - out_indices=(3, ), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - rpn_head=dict( - type='RPNHead', - in_channels=2048, - feat_channels=2048, - anchor_generator=dict( - type='AnchorGenerator', - scales=[2, 4, 8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[16]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=2048, - featmap_strides=[16]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=2048, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=12000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms=dict(type='nms', iou_threshold=0.7), - nms_pre=6000, - max_per_img=1000, - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_fpn.py deleted file mode 100644 index 1ef8e7b2579504e7614429609524ae38239701cc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/faster_rcnn_r50_fpn.py +++ /dev/null @@ -1,108 +0,0 @@ -# model settings -model = dict( - type='FasterRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100) - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - )) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/mask_rcnn_r50_caffe_c4.py b/cv/detection/co-detr/pytorch/configs/_base_/models/mask_rcnn_r50_caffe_c4.py deleted file mode 100644 index 122202e1a5d6b3367de9a8c632864cf168ca5b9d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/mask_rcnn_r50_caffe_c4.py +++ /dev/null @@ -1,125 +0,0 @@ -# model settings -norm_cfg = dict(type='BN', requires_grad=False) -model = dict( - type='MaskRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=3, - strides=(1, 2, 2), - dilations=(1, 1, 1), - out_indices=(2, ), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - rpn_head=dict( - type='RPNHead', - in_channels=1024, - feat_channels=1024, - anchor_generator=dict( - type='AnchorGenerator', - scales=[2, 4, 8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[16]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - shared_head=dict( - type='ResLayer', - depth=50, - stage=3, - stride=2, - dilation=1, - style='caffe', - norm_cfg=norm_cfg, - norm_eval=True), - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=1024, - featmap_strides=[16]), - bbox_head=dict( - type='BBoxHead', - with_avg_pool=True, - roi_feat_size=7, - in_channels=2048, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=None, - mask_head=dict( - type='FCNMaskHead', - num_convs=0, - in_channels=2048, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=12000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=14, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=6000, - nms=dict(type='nms', iou_threshold=0.7), - max_per_img=1000, - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/mask_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/configs/_base_/models/mask_rcnn_r50_fpn.py deleted file mode 100644 index d903e55e2d95135b1448e566d4d5ec8146597a6a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/mask_rcnn_r50_fpn.py +++ /dev/null @@ -1,120 +0,0 @@ -# model settings -model = dict( - type='MaskRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/retinanet_r50_fpn.py b/cv/detection/co-detr/pytorch/configs/_base_/models/retinanet_r50_fpn.py deleted file mode 100644 index 56e43fa7764cb0f48510415f21888ba0df0c6eb5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/retinanet_r50_fpn.py +++ /dev/null @@ -1,60 +0,0 @@ -# model settings -model = dict( - type='RetinaNet', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_input', - num_outs=5), - bbox_head=dict( - type='RetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - # model training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/rpn_r50_caffe_c4.py b/cv/detection/co-detr/pytorch/configs/_base_/models/rpn_r50_caffe_c4.py deleted file mode 100644 index 8b32ca99258e5ddf249d11eadcd46630d88bd55e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/rpn_r50_caffe_c4.py +++ /dev/null @@ -1,58 +0,0 @@ -# model settings -model = dict( - type='RPN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=3, - strides=(1, 2, 2), - dilations=(1, 1, 1), - out_indices=(2, ), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - neck=None, - rpn_head=dict( - type='RPNHead', - in_channels=1024, - feat_channels=1024, - anchor_generator=dict( - type='AnchorGenerator', - scales=[2, 4, 8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[16]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=12000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/rpn_r50_fpn.py b/cv/detection/co-detr/pytorch/configs/_base_/models/rpn_r50_fpn.py deleted file mode 100644 index edaf4d4b06b64b88a4ddd64419fc026e64a6af1d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/rpn_r50_fpn.py +++ /dev/null @@ -1,58 +0,0 @@ -# model settings -model = dict( - type='RPN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/models/ssd300.py b/cv/detection/co-detr/pytorch/configs/_base_/models/ssd300.py deleted file mode 100644 index f17df010069e300f9f0b6eb456f87e61b8582787..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/models/ssd300.py +++ /dev/null @@ -1,56 +0,0 @@ -# model settings -input_size = 300 -model = dict( - type='SingleStageDetector', - backbone=dict( - type='SSDVGG', - depth=16, - with_last_pool=False, - ceil_mode=True, - out_indices=(3, 4), - out_feature_indices=(22, 34), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')), - neck=dict( - type='SSDNeck', - in_channels=(512, 1024), - out_channels=(512, 1024, 512, 256, 256, 256), - level_strides=(2, 2, 1, 1), - level_paddings=(1, 1, 0, 0), - l2_norm_scale=20), - bbox_head=dict( - type='SSDHead', - in_channels=(512, 1024, 512, 256, 256, 256), - num_classes=80, - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - input_size=input_size, - basesize_ratio_range=(0.15, 0.9), - strides=[8, 16, 32, 64, 100, 300], - ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2])), - # model training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0., - ignore_iof_thr=-1, - gt_max_assign_all=False), - smoothl1_beta=1., - allowed_border=-1, - pos_weight=-1, - neg_pos_ratio=3, - debug=False), - test_cfg=dict( - nms_pre=1000, - nms=dict(type='nms', iou_threshold=0.45), - min_bbox_size=0, - score_thr=0.02, - max_per_img=200)) -cudnn_benchmark = True diff --git a/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_1x.py b/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_1x.py deleted file mode 100644 index 13b3783cbbe93b6c32bc415dc50f633dffa4aec7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_1x.py +++ /dev/null @@ -1,11 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_20e.py b/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_20e.py deleted file mode 100644 index 00e859022156dcbef6501c04d03f335639f2c1f6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_20e.py +++ /dev/null @@ -1,11 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_2x.py b/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_2x.py deleted file mode 100644 index 69dc9ee8080649ce3646b5775b0ca2e9c863d0f5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/_base_/schedules/schedule_2x.py +++ /dev/null @@ -1,11 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/albu_example/README.md b/cv/detection/co-detr/pytorch/configs/albu_example/README.md deleted file mode 100644 index 9a180f0fde7dc498fbf44e8ccfb31c732bff0425..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/albu_example/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Albu Example - -> [Albumentations: fast and flexible image augmentations](https://arxiv.org/abs/1809.06839) - - - -## Abstract - -Data augmentation is a commonly used technique for increasing both the size and the diversity of labeled training sets by leveraging input transformations that preserve output labels. In computer vision domain, image augmentations have become a common implicit regularization technique to combat overfitting in deep convolutional neural networks and are ubiquitously used to improve performance. While most deep learning frameworks implement basic image transformations, the list is typically limited to some variations and combinations of flipping, rotating, scaling, and cropping. Moreover, the image processing speed varies in existing tools for image augmentation. We present Albumentations, a fast and flexible library for image augmentations with many various image transform operations available, that is also an easy-to-use wrapper around other augmentation libraries. We provide examples of image augmentations for different computer vision tasks and show that Albumentations is faster than other commonly used image augmentation tools on the most of commonly used image transformations. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | 1x | 4.4 | 16.6 | 38.0 | 34.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208_225520.log.json) | - -## Citation - -```latex -@article{2018arXiv180906839B, - author = {A. Buslaev, A. Parinov, E. Khvedchenya, V.~I. Iglovikov and A.~A. Kalinin}, - title = "{Albumentations: fast and flexible image augmentations}", - journal = {ArXiv e-prints}, - eprint = {1809.06839}, - year = 2018 -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py b/cv/detection/co-detr/pytorch/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py deleted file mode 100644 index b3f879a6c573871ea17b2bf158173aadf14457b6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py +++ /dev/null @@ -1,73 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -albu_train_transforms = [ - dict( - type='ShiftScaleRotate', - shift_limit=0.0625, - scale_limit=0.0, - rotate_limit=0, - interpolation=1, - p=0.5), - dict( - type='RandomBrightnessContrast', - brightness_limit=[0.1, 0.3], - contrast_limit=[0.1, 0.3], - p=0.2), - dict( - type='OneOf', - transforms=[ - dict( - type='RGBShift', - r_shift_limit=10, - g_shift_limit=10, - b_shift_limit=10, - p=1.0), - dict( - type='HueSaturationValue', - hue_shift_limit=20, - sat_shift_limit=30, - val_shift_limit=20, - p=1.0) - ], - p=0.1), - dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2), - dict(type='ChannelShuffle', p=0.1), - dict( - type='OneOf', - transforms=[ - dict(type='Blur', blur_limit=3, p=1.0), - dict(type='MedianBlur', blur_limit=3, p=1.0) - ], - p=0.1), -] -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='Pad', size_divisor=32), - dict( - type='Albu', - transforms=albu_train_transforms, - bbox_params=dict( - type='BboxParams', - format='pascal_voc', - label_fields=['gt_labels'], - min_visibility=0.0, - filter_lost_elements=True), - keymap={ - 'img': 'image', - 'gt_masks': 'masks', - 'gt_bboxes': 'bboxes' - }, - update_pad_shape=False, - skip_img_without_anno=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'], - meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg', - 'pad_shape', 'scale_factor')) -] -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/atss/README.md b/cv/detection/co-detr/pytorch/configs/atss/README.md deleted file mode 100644 index 055ed0598753309ed2c8522fed163a94a10debf2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/atss/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# ATSS - -> [Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection](https://arxiv.org/abs/1912.02424) - - - -## Abstract - -Object detection has been dominated by anchor-based detectors for several years. Recently, anchor-free detectors have become popular due to the proposal of FPN and Focal Loss. In this paper, we first point out that the essential difference between anchor-based and anchor-free detection is actually how to define positive and negative training samples, which leads to the performance gap between them. If they adopt the same definition of positive and negative samples during training, there is no obvious difference in the final performance, no matter regressing from a box or a point. This shows that how to select positive and negative training samples is important for current object detectors. Then, we propose an Adaptive Training Sample Selection (ATSS) to automatically select positive and negative samples according to statistical characteristics of object. It significantly improves the performance of anchor-based and anchor-free detectors and bridges the gap between them. Finally, we discuss the necessity of tiling multiple anchors per location on the image to detect objects. Extensive experiments conducted on MS COCO support our aforementioned analysis and conclusions. With the newly introduced ATSS, we improve state-of-the-art detectors by a large margin to 50.7% AP without introducing any overhead. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | 1x | 3.7 | 19.7 | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss/atss_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209_102539.log.json) | -| R-101 | pytorch | 1x | 5.6 | 12.3 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss/atss_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.log.json) | - -## Citation - -```latex -@article{zhang2019bridging, - title = {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection}, - author = {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.}, - journal = {arXiv preprint arXiv:1912.02424}, - year = {2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/atss/atss_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/atss/atss_r101_fpn_1x_coco.py deleted file mode 100644 index 5225d2ab672738d4d427eba252e92bd554252476..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/atss/atss_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './atss_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/atss/atss_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/atss/atss_r50_fpn_1x_coco.py deleted file mode 100644 index 42ff4c598f94f221ded7c91ce330e43310beddae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/atss/atss_r50_fpn_1x_coco.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='ATSS', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='ATSSHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/atss/metafile.yml b/cv/detection/co-detr/pytorch/configs/atss/metafile.yml deleted file mode 100644 index f4c567ef29ba9ea4fddd7bc00d63a4bca41b1cfa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/atss/metafile.yml +++ /dev/null @@ -1,60 +0,0 @@ -Collections: - - Name: ATSS - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ATSS - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1912.02424 - Title: 'Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection' - README: configs/atss/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/atss.py#L6 - Version: v2.0.0 - -Models: - - Name: atss_r50_fpn_1x_coco - In Collection: ATSS - Config: configs/atss/atss_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.7 - inference time (ms/im): - - value: 50.76 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth - - - Name: atss_r101_fpn_1x_coco - In Collection: ATSS - Config: configs/atss/atss_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.6 - inference time (ms/im): - - value: 81.3 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth diff --git a/cv/detection/co-detr/pytorch/configs/autoassign/README.md b/cv/detection/co-detr/pytorch/configs/autoassign/README.md deleted file mode 100644 index 12972068fb9de413c2fac372b520b6494b229110..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/autoassign/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# AutoAssign - -> [AutoAssign: Differentiable Label Assignment for Dense Object Detection](https://arxiv.org/abs/2007.03496) - - - -## Abstract - -Determining positive/negative samples for object detection is known as label assignment. Here we present an anchor-free detector named AutoAssign. It requires little human knowledge and achieves appearance-aware through a fully differentiable weighting mechanism. During training, to both satisfy the prior distribution of data and adapt to category characteristics, we present Center Weighting to adjust the category-specific prior distributions. To adapt to object appearances, Confidence Weighting is proposed to adjust the specific assign strategy of each instance. The two weighting modules are then combined to generate positive and negative weights to adjust each location's confidence. Extensive experiments on the MS COCO show that our method steadily surpasses other best sampling strategies by large margins with various backbones. Moreover, our best model achieves 52.1% AP, outperforming all existing one-stage detectors. Besides, experiments on other datasets, e.g., PASCAL VOC, Objects365, and WiderFace, demonstrate the broad applicability of AutoAssign. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | -| :------: | :---: | :-----: | :------: | :----: | :------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | caffe | 1x | 4.08 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.log.json) | - -**Note**: - -1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.3 mAP. mAP 40.3 ~ 40.6 is acceptable. Such fluctuation can also be found in the original implementation. -2. You can get a more stable results ~ mAP 40.6 with a schedule total 13 epoch, and learning rate is divided by 10 at 10th and 13th epoch. - -## Citation - -```latex -@article{zhu2020autoassign, - title={AutoAssign: Differentiable Label Assignment for Dense Object Detection}, - author={Zhu, Benjin and Wang, Jianfeng and Jiang, Zhengkai and Zong, Fuhang and Liu, Songtao and Li, Zeming and Sun, Jian}, - journal={arXiv preprint arXiv:2007.03496}, - year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py b/cv/detection/co-detr/pytorch/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py deleted file mode 100644 index db548dc3ca4e54f631668f880eb53586bc17579c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py +++ /dev/null @@ -1,85 +0,0 @@ -# We follow the original implementation which -# adopts the Caffe pre-trained backbone. -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='AutoAssign', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs=True, - num_outs=5, - relu_before_extra_convs=True, - init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')), - bbox_head=dict( - type='AutoAssignHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - loss_bbox=dict(type='GIoULoss', loss_weight=5.0)), - train_cfg=None, - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(lr=0.01, paramwise_cfg=dict(norm_decay_mult=0.)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=1.0 / 1000, - step=[8, 11]) -total_epochs = 12 diff --git a/cv/detection/co-detr/pytorch/configs/autoassign/metafile.yml b/cv/detection/co-detr/pytorch/configs/autoassign/metafile.yml deleted file mode 100644 index f1e9051934e737736cfe6f3fb3ed3562b517f9ec..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/autoassign/metafile.yml +++ /dev/null @@ -1,33 +0,0 @@ -Collections: - - Name: AutoAssign - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - AutoAssign - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/2007.03496 - Title: 'AutoAssign: Differentiable Label Assignment for Dense Object Detection' - README: configs/autoassign/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/autoassign.py#L6 - Version: v2.12.0 - -Models: - - Name: autoassign_r50_fpn_8x2_1x_coco - In Collection: AutoAssign - Config: configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py - Metadata: - Training Memory (GB): 4.08 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth diff --git a/cv/detection/co-detr/pytorch/configs/carafe/README.md b/cv/detection/co-detr/pytorch/configs/carafe/README.md deleted file mode 100644 index 803abe036d98f63ed56c1c32285a0d420e9faa8d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/carafe/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# CARAFE - -> [CARAFE: Content-Aware ReAssembly of FEatures](https://arxiv.org/abs/1905.02188) - - - -## Abstract - -Feature upsampling is a key operation in a number of modern convolutional network architectures, e.g. feature pyramids. Its design is critical for dense prediction tasks such as object detection and semantic/instance segmentation. In this work, we propose Content-Aware ReAssembly of FEatures (CARAFE), a universal, lightweight and highly effective operator to fulfill this goal. CARAFE has several appealing properties: (1) Large field of view. Unlike previous works (e.g. bilinear interpolation) that only exploit sub-pixel neighborhood, CARAFE can aggregate contextual information within a large receptive field. (2) Content-aware handling. Instead of using a fixed kernel for all samples (e.g. deconvolution), CARAFE enables instance-specific content-aware handling, which generates adaptive kernels on-the-fly. (3) Lightweight and fast to compute. CARAFE introduces little computational overhead and can be readily integrated into modern network architectures. We conduct comprehensive evaluations on standard benchmarks in object detection, instance/semantic segmentation and inpainting. CARAFE shows consistent and substantial gains across all the tasks (1.2%, 1.3%, 1.8%, 1.1db respectively) with negligible computational overhead. It has great potential to serve as a strong building block for future research. It has great potential to serve as a strong building block for future research. - -
- -
- -## Results and Models - -The results on COCO 2017 val is shown in the below table. - -| Method | Backbone | Style | Lr schd | Test Proposal Num | Inf time (fps) | Box AP | Mask AP | Config | Download | -| :--------------------: | :------: | :-----: | :-----: | :---------------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Faster R-CNN w/ CARAFE | R-50-FPN | pytorch | 1x | 1000 | 16.5 | 38.6 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_20200504_175733.log.json) | -| - | - | - | - | 2000 | | | | | | -| Mask R-CNN w/ CARAFE | R-50-FPN | pytorch | 1x | 1000 | 14.0 | 39.3 | 35.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_20200503_135957.log.json) | -| - | - | - | - | 2000 | | | | | | - -## Implementation - -The CUDA implementation of CARAFE can be find at https://github.com/myownskyW7/CARAFE. - -## Citation - -We provide config files to reproduce the object detection & instance segmentation results in the ICCV 2019 Oral paper for [CARAFE: Content-Aware ReAssembly of FEatures](https://arxiv.org/abs/1905.02188). - -```latex -@inproceedings{Wang_2019_ICCV, - title = {CARAFE: Content-Aware ReAssembly of FEatures}, - author = {Wang, Jiaqi and Chen, Kai and Xu, Rui and Liu, Ziwei and Loy, Chen Change and Lin, Dahua}, - booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py b/cv/detection/co-detr/pytorch/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py deleted file mode 100644 index dedac3f46b4710d16a8bc66f00663e379b2ebdc7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py +++ /dev/null @@ -1,50 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - neck=dict( - type='FPN_CARAFE', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5, - start_level=0, - end_level=-1, - norm_cfg=None, - act_cfg=None, - order=('conv', 'norm', 'act'), - upsample_cfg=dict( - type='carafe', - up_kernel=5, - up_group=1, - encoder_kernel=3, - encoder_dilation=1, - compressed_channels=64))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=64), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=64), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py b/cv/detection/co-detr/pytorch/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py deleted file mode 100644 index 668c023981b9d421e5b51a48757c3819d090307f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py +++ /dev/null @@ -1,60 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - neck=dict( - type='FPN_CARAFE', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5, - start_level=0, - end_level=-1, - norm_cfg=None, - act_cfg=None, - order=('conv', 'norm', 'act'), - upsample_cfg=dict( - type='carafe', - up_kernel=5, - up_group=1, - encoder_kernel=3, - encoder_dilation=1, - compressed_channels=64)), - roi_head=dict( - mask_head=dict( - upsample_cfg=dict( - type='carafe', - scale_factor=2, - up_kernel=5, - up_group=1, - encoder_kernel=3, - encoder_dilation=1, - compressed_channels=64)))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=64), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=64), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/carafe/metafile.yml b/cv/detection/co-detr/pytorch/configs/carafe/metafile.yml deleted file mode 100644 index b58a3f69ee94b5aa8cee1f2a294e57d098fe2552..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/carafe/metafile.yml +++ /dev/null @@ -1,55 +0,0 @@ -Collections: - - Name: CARAFE - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RPN - - FPN_CARAFE - - ResNet - - RoIPool - Paper: - URL: https://arxiv.org/abs/1905.02188 - Title: 'CARAFE: Content-Aware ReAssembly of FEatures' - README: configs/carafe/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/necks/fpn_carafe.py#L11 - Version: v2.12.0 - -Models: - - Name: faster_rcnn_r50_fpn_carafe_1x_coco - In Collection: CARAFE - Config: configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py - Metadata: - Training Memory (GB): 4.26 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth - - - Name: mask_rcnn_r50_fpn_carafe_1x_coco - In Collection: CARAFE - Config: configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py - Metadata: - Training Memory (GB): 4.31 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 35.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/README.md deleted file mode 100644 index 5a9e817618e1b5611ed90f21fa9708e61618f068..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Cascade R-CNN - -> [Cascade R-CNN: High Quality Object Detection and Instance Segmentation](https://arxiv.org/abs/1906.09756) - - - -## Abstract - -In object detection, the intersection over union (IoU) threshold is frequently used to define positives/negatives. The threshold used to train a detector defines its quality. While the commonly used threshold of 0.5 leads to noisy (low-quality) detections, detection performance frequently degrades for larger thresholds. This paradox of high-quality detection has two causes: 1) overfitting, due to vanishing positive samples for large thresholds, and 2) inference-time quality mismatch between detector and test hypotheses. A multi-stage object detection architecture, the Cascade R-CNN, composed of a sequence of detectors trained with increasing IoU thresholds, is proposed to address these problems. The detectors are trained sequentially, using the output of a detector as training set for the next. This resampling progressively improves hypotheses quality, guaranteeing a positive training set of equivalent size for all detectors and minimizing overfitting. The same cascade is applied at inference, to eliminate quality mismatches between hypotheses and detectors. An implementation of the Cascade R-CNN without bells or whistles achieves state-of-the-art performance on the COCO dataset, and significantly improves high-quality detection on generic and specific object detection datasets, including VOC, KITTI, CityPerson, and WiderFace. Finally, the Cascade R-CNN is generalized to instance segmentation, with nontrivial improvements over the Mask R-CNN. - -
- -
- -## Results and Models - -### Cascade R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | caffe | 1x | 4.2 | | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_20200504_174853.log.json) | -| R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316_214748.log.json) | -| R-50-FPN | pytorch | 20e | - | - | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_20200504_175131.log.json) | -| R-101-FPN | caffe | 1x | 6.2 | | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_20200504_175649.log.json) | -| R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317_101744.log.json) | -| R-101-FPN | pytorch | 20e | - | - | 42.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_20200504_231812.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 7.6 | 10.9 | 43.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316_055608.log.json) | -| X-101-32x4d-FPN | pytorch | 20e | 7.6 | | 43.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 10.7 | | 44.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702.log.json) | -| X-101-64x4d-FPN | pytorch | 20e | 10.7 | | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357.log.json) | - -### Cascade Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | caffe | 1x | 5.9 | | 41.2 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_20200504_174659.log.json) | -| R-50-FPN | pytorch | 1x | 6.0 | 11.2 | 41.2 | 35.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203_170449.log.json) | -| R-50-FPN | pytorch | 20e | - | - | 41.9 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_20200504_174711.log.json) | -| R-101-FPN | caffe | 1x | 7.8 | | 43.2 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_20200504_174813.log.json) | -| R-101-FPN | pytorch | 1x | 7.9 | 9.8 | 42.9 | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203_092521.log.json) | -| R-101-FPN | pytorch | 20e | - | - | 43.4 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_20200504_174836.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 9.2 | 8.6 | 44.3 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201_052416.log.json) | -| X-101-32x4d-FPN | pytorch | 20e | 9.2 | - | 45.0 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 12.2 | 6.7 | 45.3 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203_044059.log.json) | -| X-101-64x4d-FPN | pytorch | 20e | 12.2 | | 45.6 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033.log.json) | - -**Notes:** - -- The `20e` schedule in Cascade (Mask) R-CNN indicates decreasing the lr at 16 and 19 epochs, with a total of 20 epochs. - -## Pre-trained Models - -We also train some models with longer schedules and multi-scale training for Cascade Mask R-CNN. The users could finetune them for downstream tasks. - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | caffe | 3x | 5.7 | | 44.0 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651-6e29b3a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651.log.json) | -| R-50-FPN | pytorch | 3x | 5.9 | | 44.3 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719-5bdc3824.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719.log.json) | -| R-101-FPN | caffe | 3x | 7.7 | | 45.4 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620-a5bd2389.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620.log.json) | -| R-101-FPN | pytorch | 3x | 7.8 | | 45.5 | 39.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236-51a2d363.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236.log.json) | -| X-101-32x4d-FPN | pytorch | 3x | 9.0 | | 46.3 | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234-40773067.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234.log.json) | -| X-101-32x8d-FPN | pytorch | 3x | 12.1 | | 46.1 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640-9ff7e76f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640.log.json) | -| X-101-64x4d-FPN | pytorch | 3x | 12.0 | | 46.6 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311-d3e64ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311.log.json) | - -## Citation - -```latex -@article{Cai_2019, - title={Cascade R-CNN: High Quality Object Detection and Instance Segmentation}, - ISSN={1939-3539}, - url={http://dx.doi.org/10.1109/tpami.2019.2956516}, - DOI={10.1109/tpami.2019.2956516}, - journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher={Institute of Electrical and Electronics Engineers (IEEE)}, - author={Cai, Zhaowei and Vasconcelos, Nuno}, - year={2019}, - pages={1–1} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 5ee6231034a2fccc42b11b99830f748091551851..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py deleted file mode 100644 index 1df87fc6f30b027f459f0d246987b9de67c4b6bd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index f59c155848d6a40ec31c4de880f7900d9067c6ab..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py deleted file mode 100644 index 45ab7edffd33063022e95c6e2b44e503e69eda2c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py deleted file mode 100644 index 1b20f167082d8927b59785dfd97d3652640c0e21..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 12d37efc90ba33f59bb653f00f0166a0c548a5d4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,41 +0,0 @@ -_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py'] - -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py deleted file mode 100644 index 9fb817e82cf330cfa6b962fa88ad8c4eafb4899b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,49 +0,0 @@ -_base_ = ['./cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'] -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) - -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], -# multiscale_mode='range' -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 49ab539aa4cdf7c396b6f109efe2dc7a6d596a2a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py deleted file mode 100644 index 1296dc45dd89da9c0801e1242080c67957cace74..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py deleted file mode 100644 index ed0c6d1a88d7c05105c741613d558f92f13b9a9a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../common/mstrain_3x_coco_instance.py', - '../_base_/models/cascade_mask_rcnn_r50_fpn.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 06cbbe70dc84f25ba588e80d0061c634e63e94f9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py deleted file mode 100644 index 4e352362b17919bb2ebfffb5b442292880cfb27a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py deleted file mode 100644 index 7d37d17dcb37c5d25cd3bcd3d207e4edab6667a1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py deleted file mode 100644 index eeec1aa1aefabd12cd769eb039f26441b1bd584a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,60 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' - -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=8, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - style='pytorch', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) - -# ResNeXt-101-32x8d model trained with Caffe2 at FB, -# so the mean and std need to be changed. -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], - std=[57.375, 57.120, 58.395], - to_rgb=False) - -# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], -# multiscale_mode='range' -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 7dbef5fa2a3a3d962df78ffb1b0b4357b783fd67..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py deleted file mode 100644 index 579b1aca49383f9d3874f4797bc1dbb2a1311e7c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py deleted file mode 100644 index ed6cf4b53b709ecc81fc8a09d18e0f11e1ae8df5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 1e90f4bb004798265af98489d6ed584a6a09d434..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index 5c077760dd20dc5e00b3b2a1ca6de89347657231..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py deleted file mode 100644 index b1719c25d59bc6dbe1c0ef71f08160057c21d5bf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 696bcfb939e91c16898c2e039ec9a05d23105d1e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' - -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) - -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 87e21fbff82763caf0e14ba641493870a15578b1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py deleted file mode 100644 index 6f886e1c407ff9376929a7092f82e5508d2b1ac9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 5ac02c10d743d0ce4b9cc4bb5f1e29cbc6aff06a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py deleted file mode 100644 index 486e45ead418d83a80224f241bc2355b82877640..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 78229f0da3f5a1ac1dfc628821327efd5f34668d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_1x_coco.py' -model = dict( - type='CascadeRCNN', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py deleted file mode 100644 index 58812dec5a85d86d85b79d7b53ba33bc6327a815..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' -model = dict( - type='CascadeRCNN', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/metafile.yml b/cv/detection/co-detr/pytorch/configs/cascade_rcnn/metafile.yml deleted file mode 100644 index 65863259cdd12d9cf46c2993b4578579d7dc884a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rcnn/metafile.yml +++ /dev/null @@ -1,545 +0,0 @@ -Collections: - - Name: Cascade R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Cascade R-CNN - - FPN - - RPN - - ResNet - - RoIAlign - Paper: - URL: http://dx.doi.org/10.1109/tpami.2019.2956516 - Title: 'Cascade R-CNN: Delving into High Quality Object Detection' - README: configs/cascade_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/cascade_rcnn.py#L6 - Version: v2.0.0 - - Name: Cascade Mask R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Cascade R-CNN - - FPN - - RPN - - ResNet - - RoIAlign - Paper: - URL: http://dx.doi.org/10.1109/tpami.2019.2956516 - Title: 'Cascade R-CNN: Delving into High Quality Object Detection' - README: configs/cascade_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/cascade_rcnn.py#L6 - Version: v2.0.0 - -Models: - - Name: cascade_rcnn_r50_caffe_fpn_1x_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.2 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth - - - Name: cascade_rcnn_r50_fpn_1x_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.4 - inference time (ms/im): - - value: 62.11 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth - - - Name: cascade_rcnn_r50_fpn_20e_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py - Metadata: - Training Memory (GB): 4.4 - inference time (ms/im): - - value: 62.11 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth - - - Name: cascade_rcnn_r101_caffe_fpn_1x_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.2 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth - - - Name: cascade_rcnn_r101_fpn_1x_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.4 - inference time (ms/im): - - value: 74.07 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth - - - Name: cascade_rcnn_r101_fpn_20e_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py - Metadata: - Training Memory (GB): 6.4 - inference time (ms/im): - - value: 74.07 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth - - - Name: cascade_rcnn_x101_32x4d_fpn_1x_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.6 - inference time (ms/im): - - value: 91.74 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth - - - Name: cascade_rcnn_x101_32x4d_fpn_20e_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py - Metadata: - Training Memory (GB): 7.6 - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth - - - Name: cascade_rcnn_x101_64x4d_fpn_1x_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 10.7 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth - - - Name: cascade_rcnn_x101_64x4d_fpn_20e_coco - In Collection: Cascade R-CNN - Config: configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py - Metadata: - Training Memory (GB): 10.7 - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth - - - Name: cascade_mask_rcnn_r50_caffe_fpn_1x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.9 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth - - - Name: cascade_mask_rcnn_r50_fpn_1x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.0 - inference time (ms/im): - - value: 89.29 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 35.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth - - - Name: cascade_mask_rcnn_r50_fpn_20e_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py - Metadata: - Training Memory (GB): 6.0 - inference time (ms/im): - - value: 89.29 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth - - - Name: cascade_mask_rcnn_r101_caffe_fpn_1x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.8 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth - - - Name: cascade_mask_rcnn_r101_fpn_1x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.9 - inference time (ms/im): - - value: 102.04 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth - - - Name: cascade_mask_rcnn_r101_fpn_20e_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py - Metadata: - Training Memory (GB): 7.9 - inference time (ms/im): - - value: 102.04 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_1x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 9.2 - inference time (ms/im): - - value: 116.28 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_20e_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py - Metadata: - Training Memory (GB): 9.2 - inference time (ms/im): - - value: 116.28 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth - - - Name: cascade_mask_rcnn_x101_64x4d_fpn_1x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 12.2 - inference time (ms/im): - - value: 149.25 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth - - - Name: cascade_mask_rcnn_x101_64x4d_fpn_20e_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py - Metadata: - Training Memory (GB): 12.2 - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth - - - Name: cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 5.7 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210707_002651-6e29b3a6.pth - - - Name: cascade_mask_rcnn_r50_fpn_mstrain_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 5.9 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco_20210628_164719-5bdc3824.pth - - - Name: cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 7.7 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210707_002620-a5bd2389.pth - - - Name: cascade_mask_rcnn_r101_fpn_mstrain_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 7.8 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco_20210628_165236-51a2d363.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 9.0 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210706_225234-40773067.pth - - - Name: cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 12.1 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210719_180640-9ff7e76f.pth - - - Name: cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 12.0 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210719_210311-d3e64ba0.pth diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rpn/README.md b/cv/detection/co-detr/pytorch/configs/cascade_rpn/README.md deleted file mode 100644 index fb2b482b021162c9e47615e3a46533aceeeb4e0d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rpn/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# Cascade RPN - -> [Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution](https://arxiv.org/abs/1909.06720) - - - -## Abstract - -This paper considers an architecture referred to as Cascade Region Proposal Network (Cascade RPN) for improving the region-proposal quality and detection performance by systematically addressing the limitation of the conventional RPN that heuristically defines the anchors and aligns the features to the anchors. First, instead of using multiple anchors with predefined scales and aspect ratios, Cascade RPN relies on a single anchor per location and performs multi-stage refinement. Each stage is progressively more stringent in defining positive samples by starting out with an anchor-free metric followed by anchor-based metrics in the ensuing stages. Second, to attain alignment between the features and the anchors throughout the stages, adaptive convolution is proposed that takes the anchors in addition to the image features as its input and learns the sampled features guided by the anchors. A simple implementation of a two-stage Cascade RPN achieves AR 13.4 points higher than that of the conventional RPN, surpassing any existing region proposal methods. When adopting to Fast R-CNN and Faster R-CNN, Cascade RPN can improve the detection mAP by 3.1 and 3.5 points, respectively. - -
- -
- -## Results and Models - -### Region proposal performance - -| Method | Backbone | Style | Mem (GB) | Train time (s/iter) | Inf time (fps) | AR 1000 | Config | Download | -| :----: | :------: | :---: | :------: | :-----------------: | :------------: | :-----: | :---------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------: | -| CRPN | R-50-FPN | caffe | - | - | - | 72.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_r50_caffe_fpn_1x_coco/cascade_rpn_r50_caffe_fpn_1x_coco-7aa93cef.pth) | - -### Detection performance - -| Method | Proposal | Backbone | Style | Schedule | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Config | Download | -| :----------: | :---------: | :------: | :---: | :------: | :------: | :-----------------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Fast R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco/crpn_fast_rcnn_r50_caffe_fpn_1x_coco-cb486e66.pth) | -| Faster R-CNN | Cascade RPN | R-50-FPN | caffe | 1x | - | - | - | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth) | - -## Citation - -We provide the code for reproducing experiment results of [Cascade RPN](https://arxiv.org/abs/1909.06720). - -```latex -@inproceedings{vu2019cascade, - title={Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution}, - author={Vu, Thang and Jang, Hyunjun and Pham, Trung X and Yoo, Chang D}, - booktitle={Conference on Neural Information Processing Systems (NeurIPS)}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 29f5d0745b5689178bcbadc3c30b91ecc8cd5140..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,77 +0,0 @@ -_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - roi_head=dict( - bbox_head=dict( - bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rcnn=dict( - assigner=dict( - pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), - sampler=dict(num=256))), - test_cfg=dict(rcnn=dict(score_thr=1e-3))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=300), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=300), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='ToTensor', keys=['proposals']), - dict( - type='ToDataContainer', - fields=[dict(key='proposals', stack=False)]), - dict(type='Collect', keys=['img', 'proposals']), - ]) -] -data = dict( - train=dict( - proposal_file=data_root + - 'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl', - pipeline=train_pipeline), - val=dict( - proposal_file=data_root + - 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl', - pipeline=test_pipeline), - test=dict( - proposal_file=data_root + - 'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl', - pipeline=test_pipeline)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index bad86e6ddf084b5b7e145463c88a8d2d887d6a53..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,92 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' -rpn_weight = 0.7 -model = dict( - rpn_head=dict( - _delete_=True, - type='CascadeRPNHead', - num_stages=2, - stages=[ - dict( - type='StageCascadeRPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[1.0], - strides=[4, 8, 16, 32, 64]), - adapt_cfg=dict(type='dilation', dilation=3), - bridged_feature=True, - sampling=False, - with_cls=False, - reg_decoded_bbox=True, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=(.0, .0, .0, .0), - target_stds=(0.1, 0.1, 0.5, 0.5)), - loss_bbox=dict( - type='IoULoss', linear=True, - loss_weight=10.0 * rpn_weight)), - dict( - type='StageCascadeRPNHead', - in_channels=256, - feat_channels=256, - adapt_cfg=dict(type='offset'), - bridged_feature=False, - sampling=True, - with_cls=True, - reg_decoded_bbox=True, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=(.0, .0, .0, .0), - target_stds=(0.05, 0.05, 0.1, 0.1)), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0 * rpn_weight), - loss_bbox=dict( - type='IoULoss', linear=True, - loss_weight=10.0 * rpn_weight)) - ]), - roi_head=dict( - bbox_head=dict( - bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=[ - dict( - assigner=dict( - type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5), - allowed_border=-1, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.3, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False) - ], - rpn_proposal=dict(max_per_img=300, nms=dict(iou_threshold=0.8)), - rcnn=dict( - assigner=dict( - pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65), - sampler=dict(type='RandomSampler', num=256))), - test_cfg=dict( - rpn=dict(max_per_img=300, nms=dict(iou_threshold=0.8)), - rcnn=dict(score_thr=1e-3))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 5562e696a8d16514fc2139874799ab2ef1df74a1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,77 +0,0 @@ -_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' -model = dict( - rpn_head=dict( - _delete_=True, - type='CascadeRPNHead', - num_stages=2, - stages=[ - dict( - type='StageCascadeRPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[1.0], - strides=[4, 8, 16, 32, 64]), - adapt_cfg=dict(type='dilation', dilation=3), - bridged_feature=True, - sampling=False, - with_cls=False, - reg_decoded_bbox=True, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=(.0, .0, .0, .0), - target_stds=(0.1, 0.1, 0.5, 0.5)), - loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)), - dict( - type='StageCascadeRPNHead', - in_channels=256, - feat_channels=256, - adapt_cfg=dict(type='offset'), - bridged_feature=False, - sampling=True, - with_cls=True, - reg_decoded_bbox=True, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=(.0, .0, .0, .0), - target_stds=(0.05, 0.05, 0.1, 0.1)), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)) - ]), - train_cfg=dict(rpn=[ - dict( - assigner=dict( - type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5), - allowed_border=-1, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.3, - ignore_iof_thr=-1, - iou_calculator=dict(type='BboxOverlaps2D')), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.8), - min_bbox_size=0))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/cascade_rpn/metafile.yml b/cv/detection/co-detr/pytorch/configs/cascade_rpn/metafile.yml deleted file mode 100644 index 335b2bc7ef40873bfc30f206c6ca0bcbae5ca175..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cascade_rpn/metafile.yml +++ /dev/null @@ -1,44 +0,0 @@ -Collections: - - Name: Cascade RPN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Cascade RPN - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1909.06720 - Title: 'Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution' - README: configs/cascade_rpn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/dense_heads/cascade_rpn_head.py#L538 - Version: v2.8.0 - -Models: - - Name: crpn_fast_rcnn_r50_caffe_fpn_1x_coco - In Collection: Cascade RPN - Config: configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco/crpn_fast_rcnn_r50_caffe_fpn_1x_coco-cb486e66.pth - - - Name: crpn_faster_rcnn_r50_caffe_fpn_1x_coco - In Collection: Cascade RPN - Config: configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco/crpn_faster_rcnn_r50_caffe_fpn_1x_coco-c8283cca.pth diff --git a/cv/detection/co-detr/pytorch/configs/centernet/README.md b/cv/detection/co-detr/pytorch/configs/centernet/README.md deleted file mode 100644 index 0f951a0280addd3fab88aab32995ca669ac96cad..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/centernet/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# CenterNet - -> [Objects as Points](https://arxiv.org/abs/1904.07850) - - - -## Abstract - -Detection identifies objects as axis-aligned boxes in an image. Most successful object detectors enumerate a nearly exhaustive list of potential object locations and classify each. This is wasteful, inefficient, and requires additional post-processing. In this paper, we take a different approach. We model an object as a single point --- the center point of its bounding box. Our detector uses keypoint estimation to find center points and regresses to all other object properties, such as size, 3D location, orientation, and even pose. Our center point based approach, CenterNet, is end-to-end differentiable, simpler, faster, and more accurate than corresponding bounding box based detectors. CenterNet achieves the best speed-accuracy trade-off on the MS COCO dataset, with 28.1% AP at 142 FPS, 37.4% AP at 52 FPS, and 45.1% AP with multi-scale testing at 1.4 FPS. We use the same approach to estimate 3D bounding box in the KITTI benchmark and human pose on the COCO keypoint dataset. Our method performs competitively with sophisticated multi-stage methods and runs in real-time. - -
- -
- -## Results and Models - -| Backbone | DCN | Mem (GB) | Box AP | Flip box AP | Config | Download | -| :-------: | :-: | :------: | :----: | :---------: | :---------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ResNet-18 | N | 3.45 | 25.9 | 27.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/centernet/centernet_resnet18_140e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630.log.json) | -| ResNet-18 | Y | 3.47 | 29.5 | 30.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131.log.json) | - -Note: - -- Flip box AP setting is single-scale and `flip=True`. -- Due to complex data enhancement, we find that the performance is unstable and may fluctuate by about 0.4 mAP. mAP 29.4 ~ 29.8 is acceptable in ResNet-18-DCNv2. -- Compared to the source code, we refer to [CenterNet-Better](https://github.com/FateScript/CenterNet-better), and make the following changes - - fix wrong image mean and variance in image normalization to be compatible with the pre-trained backbone. - - Use SGD rather than ADAM optimizer and add warmup and grad clip. - - Use DistributedDataParallel as other models in MMDetection rather than using DataParallel. - -## Citation - -```latex -@article{zhou2019objects, - title={Objects as Points}, - author={Zhou, Xingyi and Wang, Dequan and Kr{\"a}henb{\"u}hl, Philipp}, - booktitle={arXiv preprint arXiv:1904.07850}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/centernet/centernet_resnet18_140e_coco.py b/cv/detection/co-detr/pytorch/configs/centernet/centernet_resnet18_140e_coco.py deleted file mode 100644 index 52c86a5eca27086dbc5ee2449aca749c550e852f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/centernet/centernet_resnet18_140e_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './centernet_resnet18_dcnv2_140e_coco.py' - -model = dict(neck=dict(use_dcn=False)) diff --git a/cv/detection/co-detr/pytorch/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py b/cv/detection/co-detr/pytorch/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py deleted file mode 100644 index b8a0bb10ddcf598b3031926d1198d2a22f92dea7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py +++ /dev/null @@ -1,127 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='CenterNet', - backbone=dict( - type='ResNet', - depth=18, - norm_eval=False, - norm_cfg=dict(type='BN'), - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), - neck=dict( - type='CTResNetNeck', - in_channel=512, - num_deconv_filters=(256, 128, 64), - num_deconv_kernels=(4, 4, 4), - use_dcn=True), - bbox_head=dict( - type='CenterNetHead', - num_classes=80, - in_channel=64, - feat_channel=64, - loss_center_heatmap=dict(type='GaussianFocalLoss', loss_weight=1.0), - loss_wh=dict(type='L1Loss', loss_weight=0.1), - loss_offset=dict(type='L1Loss', loss_weight=1.0)), - train_cfg=None, - test_cfg=dict(topk=100, local_maximum_kernel=3, max_per_img=100)) - -# We fixed the incorrect img_norm_cfg problem in the source code. -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True, color_type='color'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='RandomCenterCropPad', - crop_size=(512, 512), - ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), - mean=[0, 0, 0], - std=[1, 1, 1], - to_rgb=True, - test_pad_mode=None), - dict(type='Resize', img_scale=(512, 512), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict( - type='MultiScaleFlipAug', - scale_factor=1.0, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict( - type='RandomCenterCropPad', - ratios=None, - border=None, - mean=[0, 0, 0], - std=[1, 1, 1], - to_rgb=True, - test_mode=True, - test_pad_mode=['logical_or', 31], - test_pad_add_pix=1), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - meta_keys=('filename', 'ori_filename', 'ori_shape', - 'img_shape', 'pad_shape', 'scale_factor', 'flip', - 'flip_direction', 'img_norm_cfg', 'border'), - keys=['img']) - ]) -] - -dataset_type = 'CocoDataset' -data_root = 'data/coco/' - -# Use RepeatDataset to speed up training -data = dict( - samples_per_gpu=16, - workers_per_gpu=4, - train=dict( - _delete_=True, - type='RepeatDataset', - times=5, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -# Based on the default settings of modern detectors, the SGD effect is better -# than the Adam in the source code, so we use SGD default settings and -# if you use adam+lr5e-4, the map is 29.1. -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) - -# learning policy -# Based on the default settings of modern detectors, we added warmup settings. -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=1.0 / 1000, - step=[18, 24]) # the real step is [18*5, 24*5] -runner = dict(max_epochs=28) # the real epoch is 28*5=140 - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (16 samples per GPU) -auto_scale_lr = dict(base_batch_size=128) diff --git a/cv/detection/co-detr/pytorch/configs/centernet/metafile.yml b/cv/detection/co-detr/pytorch/configs/centernet/metafile.yml deleted file mode 100644 index e86e57b54e51bff24f0f582f84711fad91e75a62..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/centernet/metafile.yml +++ /dev/null @@ -1,46 +0,0 @@ -Collections: - - Name: CenterNet - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x TITANXP GPUs - Architecture: - - ResNet - Paper: - URL: https://arxiv.org/abs/1904.07850 - Title: 'Objects as Points' - README: configs/centernet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.13.0/mmdet/models/detectors/centernet.py#L10 - Version: v2.13.0 - -Models: - - Name: centernet_resnet18_dcnv2_140e_coco - In Collection: CenterNet - Config: configs/centernet/centernet_resnet18_dcnv2_140e_coco.py - Metadata: - Batch Size: 128 - Training Memory (GB): 3.47 - Epochs: 140 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 29.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_dcnv2_140e_coco/centernet_resnet18_dcnv2_140e_coco_20210702_155131-c8cd631f.pth - - - Name: centernet_resnet18_140e_coco - In Collection: CenterNet - Config: configs/centernet/centernet_resnet18_140e_coco.py - Metadata: - Batch Size: 128 - Training Memory (GB): 3.45 - Epochs: 140 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 25.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/centernet/centernet_resnet18_140e_coco/centernet_resnet18_140e_coco_20210705_093630-bb5b3bf7.pth diff --git a/cv/detection/co-detr/pytorch/configs/centripetalnet/README.md b/cv/detection/co-detr/pytorch/configs/centripetalnet/README.md deleted file mode 100644 index b01b00a88ddd6051e9b0d7073775e2bf55f96c2e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/centripetalnet/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# CentripetalNet - -> [CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection](https://arxiv.org/abs/2003.09119) - - - -## Abstract - -Keypoint-based detectors have achieved pretty-well performance. However, incorrect keypoint matching is still widespread and greatly affects the performance of the detector. In this paper, we propose CentripetalNet which uses centripetal shift to pair corner keypoints from the same instance. CentripetalNet predicts the position and the centripetal shift of the corner points and matches corners whose shifted results are aligned. Combining position information, our approach matches corner points more accurately than the conventional embedding approaches do. Corner pooling extracts information inside the bounding boxes onto the border. To make this information more aware at the corners, we design a cross-star deformable convolution network to conduct feature adaption. Furthermore, we explore instance segmentation on anchor-free detectors by equipping our CentripetalNet with a mask prediction module. On MS-COCO test-dev, our CentripetalNet not only outperforms all existing anchor-free detectors with an AP of 48.0% but also achieves comparable performance to the state-of-the-art instance segmentation approaches with a 40.2% MaskAP. - -
- -
- -## Results and Models - -| Backbone | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :--------------: | :--------------------------------------------------------------: | :---------------: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| HourglassNet-104 | [16 x 6](./centripetalnet_hourglass104_mstest_16x6_210e_coco.py) | 190/210 | 16.7 | 3.7 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804.log.json) | - -Note: - -- TTA setting is single-scale and `flip=True`. -- The model we released is the best checkpoint rather than the latest checkpoint (box AP 44.8 vs 44.6 in our experiment). - -## Citation - -```latex -@InProceedings{Dong_2020_CVPR, -author = {Dong, Zhiwei and Li, Guoxuan and Liao, Yue and Wang, Fei and Ren, Pengju and Qian, Chen}, -title = {CentripetalNet: Pursuing High-Quality Keypoint Pairs for Object Detection}, -booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, -month = {June}, -year = {2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py b/cv/detection/co-detr/pytorch/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py deleted file mode 100644 index 5281c5bf885d298df15240c9b6f43782d9809fd5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py +++ /dev/null @@ -1,110 +0,0 @@ -_base_ = [ - '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' -] - -# model settings -model = dict( - type='CornerNet', - backbone=dict( - type='HourglassNet', - downsample_times=5, - num_stacks=2, - stage_channels=[256, 256, 384, 384, 384, 512], - stage_blocks=[2, 2, 2, 2, 2, 4], - norm_cfg=dict(type='BN', requires_grad=True)), - neck=None, - bbox_head=dict( - type='CentripetalHead', - num_classes=80, - in_channels=256, - num_feat_levels=2, - corner_emb_channels=0, - loss_heatmap=dict( - type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), - loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1), - loss_guiding_shift=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=0.05), - loss_centripetal_shift=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1)), - # training and testing settings - train_cfg=None, - test_cfg=dict( - corner_topk=100, - local_maximum_kernel=3, - distance_threshold=0.5, - score_thr=0.05, - max_per_img=100, - nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) -# data settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='RandomCenterCropPad', - crop_size=(511, 511), - ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), - test_mode=False, - test_pad_mode=None, - **img_norm_cfg), - dict(type='Resize', img_scale=(511, 511), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict( - type='MultiScaleFlipAug', - scale_factor=1.0, - flip=True, - transforms=[ - dict(type='Resize'), - dict( - type='RandomCenterCropPad', - crop_size=None, - ratios=None, - border=None, - test_mode=True, - test_pad_mode=['logical_or', 127], - **img_norm_cfg), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict( - type='Collect', - keys=['img'], - meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', - 'scale_factor', 'flip', 'img_norm_cfg', 'border')), - ]) -] -data = dict( - samples_per_gpu=6, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='Adam', lr=0.0005) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[190]) -runner = dict(type='EpochBasedRunner', max_epochs=210) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (16 GPUs) x (6 samples per GPU) -auto_scale_lr = dict(base_batch_size=96) diff --git a/cv/detection/co-detr/pytorch/configs/centripetalnet/metafile.yml b/cv/detection/co-detr/pytorch/configs/centripetalnet/metafile.yml deleted file mode 100644 index 61aed3e58571e78fe2d604dd6a4abc69f19a3988..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/centripetalnet/metafile.yml +++ /dev/null @@ -1,39 +0,0 @@ -Collections: - - Name: CentripetalNet - Metadata: - Training Data: COCO - Training Techniques: - - Adam - Training Resources: 16x V100 GPUs - Architecture: - - Corner Pooling - - Stacked Hourglass Network - Paper: - URL: https://arxiv.org/abs/2003.09119 - Title: 'CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection' - README: configs/centripetalnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/cornernet.py#L9 - Version: v2.5.0 - -Models: - - Name: centripetalnet_hourglass104_mstest_16x6_210e_coco - In Collection: CentripetalNet - Config: configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py - Metadata: - Batch Size: 96 - Training Memory (GB): 16.7 - inference time (ms/im): - - value: 270.27 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 210 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth diff --git a/cv/detection/co-detr/pytorch/configs/cityscapes/README.md b/cv/detection/co-detr/pytorch/configs/cityscapes/README.md deleted file mode 100644 index c52a79f468e33ab8e4e8f6f1e29165eac8311186..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cityscapes/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Cityscapes - -> [The Cityscapes Dataset for Semantic Urban Scene Understanding](https://arxiv.org/abs/1604.01685) - - - -## Abstract - -Visual understanding of complex urban street scenes is an enabling factor for a wide range of applications. Object detection has benefited enormously from large-scale datasets, especially in the context of deep learning. For semantic urban scene understanding, however, no current dataset adequately captures the complexity of real-world urban scenes. -To address this, we introduce Cityscapes, a benchmark suite and large-scale dataset to train and test approaches for pixel-level and instance-level semantic labeling. Cityscapes is comprised of a large, diverse set of stereo video sequences recorded in streets from 50 different cities. 5000 of these images have high quality pixel-level annotations; 20000 additional images have coarse annotations to enable methods that leverage large volumes of weakly-labeled data. Crucially, our effort exceeds previous attempts in terms of dataset size, annotation richness, scene variability, and complexity. Our accompanying empirical study provides an in-depth analysis of the dataset characteristics, as well as a performance evaluation of several state-of-the-art approaches based on our benchmark. - -
- -
- -## Common settings - -- All baselines were trained using 8 GPU with a batch size of 8 (1 images per GPU) using the [linear scaling rule](https://arxiv.org/abs/1706.02677) to scale the learning rate. -- All models were trained on `cityscapes_train`, and tested on `cityscapes_val`. -- 1x training schedule indicates 64 epochs which corresponds to slightly less than the 24k iterations reported in the original schedule from the [Mask R-CNN paper](https://arxiv.org/abs/1703.06870) -- COCO pre-trained weights are used to initialize. -- A conversion [script](../../tools/dataset_converters/cityscapes.py) is provided to convert Cityscapes into COCO format. Please refer to [install.md](../../docs/1_exist_data_model.md#prepare-datasets) for details. -- `CityscapesDataset` implemented three evaluation methods. `bbox` and `segm` are standard COCO bbox/mask AP. `cityscapes` is the cityscapes dataset official evaluation, which may be slightly higher than COCO. - -### Faster R-CNN - -| Backbone | Style | Lr schd | Scale | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :-----: | :-----: | :------: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 800-1024 | 5.2 | - | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502-829424c0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502_114915.log.json) | - -### Mask R-CNN - -| Backbone | Style | Lr schd | Scale | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :------: | :-----: | :-----: | :------: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 800-1024 | 5.3 | - | 40.9 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes/mask_rcnn_r50_fpn_1x_cityscapes_20201211_133733-d2858245.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes/mask_rcnn_r50_fpn_1x_cityscapes_20201211_133733.log.json) | - -## Citation - -```latex -@inproceedings{Cordts2016Cityscapes, - title={The Cityscapes Dataset for Semantic Urban Scene Understanding}, - author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt}, - booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year={2016} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py b/cv/detection/co-detr/pytorch/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py deleted file mode 100644 index ca636bdad8850c6db359b78ca2f01fa86811dedf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py +++ /dev/null @@ -1,44 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/cityscapes_detection.py', - '../_base_/default_runtime.py' -] -model = dict( - backbone=dict(init_cfg=None), - roi_head=dict( - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=8, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))) -# optimizer -# lr is set for a batch size of 8 -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - # [7] yields higher performance than [6] - step=[7]) -runner = dict( - type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64 -log_config = dict(interval=100) -# For better, more stable performance initialize from COCO -load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (1 samples per GPU) -auto_scale_lr = dict(base_batch_size=8) diff --git a/cv/detection/co-detr/pytorch/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py b/cv/detection/co-detr/pytorch/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py deleted file mode 100644 index 83ea058d15efa7361ffeeb8c6ba5811133113c82..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py +++ /dev/null @@ -1,51 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' -] -model = dict( - backbone=dict(init_cfg=None), - roi_head=dict( - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=8, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=8, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) -# optimizer -# lr is set for a batch size of 8 -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - # [7] yields higher performance than [6] - step=[7]) -runner = dict( - type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64 -log_config = dict(interval=100) -# For better, more stable performance initialize from COCO -load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (1 samples per GPU) -auto_scale_lr = dict(base_batch_size=8) diff --git a/cv/detection/co-detr/pytorch/configs/common/lsj_100e_coco_instance.py b/cv/detection/co-detr/pytorch/configs/common/lsj_100e_coco_instance.py deleted file mode 100644 index cacf23d74c344c39ed511c5d7183cee490d20ee7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/common/lsj_100e_coco_instance.py +++ /dev/null @@ -1,90 +0,0 @@ -_base_ = '../_base_/default_runtime.py' -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -image_size = (1024, 1024) - -file_client_args = dict(backend='disk') -# comment out the code below to use different file client -# file_client_args = dict( -# backend='petrel', -# path_mapping=dict({ -# './data/': 's3://openmmlab/datasets/detection/', -# 'data/': 's3://openmmlab/datasets/detection/' -# })) - -train_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=file_client_args), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=image_size), # padding to image_size leads 0.5+ mAP - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=file_client_args), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=4, # simply change this from 2 to 16 for 50e - 400e training. - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=5, metric=['bbox', 'segm']) - -# optimizer assumes bs=64 -optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) -optimizer_config = dict(grad_clip=None) - -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.067, - step=[22, 24]) -runner = dict(type='EpochBasedRunner', max_epochs=25) diff --git a/cv/detection/co-detr/pytorch/configs/common/mstrain-poly_3x_coco_instance.py b/cv/detection/co-detr/pytorch/configs/common/mstrain-poly_3x_coco_instance.py deleted file mode 100644 index c22ed9457197be61ec76117568f2351575573d43..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/common/mstrain-poly_3x_coco_instance.py +++ /dev/null @@ -1,80 +0,0 @@ -_base_ = '../_base_/default_runtime.py' -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], -# multiscale_mode='range' -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric=['bbox', 'segm']) - -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) - -# learning policy -# Experiments show that using step=[9, 11] has higher performance -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[9, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/common/mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/common/mstrain_3x_coco.py deleted file mode 100644 index 80ec8b8dbf0f76a99395bf615b6f2a60cafdd7e5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/common/mstrain_3x_coco.py +++ /dev/null @@ -1,76 +0,0 @@ -_base_ = '../_base_/default_runtime.py' -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], -# multiscale_mode='range' -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') - -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) - -# learning policy -# Experiments show that using step=[9, 11] has higher performance -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[9, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/common/mstrain_3x_coco_instance.py b/cv/detection/co-detr/pytorch/configs/common/mstrain_3x_coco_instance.py deleted file mode 100644 index 50f39bef3fe3c6e0f99259135745e89e000745ea..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/common/mstrain_3x_coco_instance.py +++ /dev/null @@ -1,76 +0,0 @@ -_base_ = '../_base_/default_runtime.py' -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], -# multiscale_mode='range' -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric=['bbox', 'segm']) - -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) - -# learning policy -# Experiments show that using step=[9, 11] has higher performance -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[9, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/common/ssj_270k_coco_instance.py b/cv/detection/co-detr/pytorch/configs/common/ssj_270k_coco_instance.py deleted file mode 100644 index 851098f8decda0690cd793737e5d7d05722a2559..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/common/ssj_270k_coco_instance.py +++ /dev/null @@ -1,91 +0,0 @@ -_base_ = '../_base_/default_runtime.py' -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -image_size = (1024, 1024) - -file_client_args = dict(backend='disk') - -# Standard Scale Jittering (SSJ) resizes and crops an image -# with a resize range of 0.8 to 1.25 of the original image size. -train_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=file_client_args), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.8, 1.25), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=image_size), # padding to image_size leads 0.5+ mAP - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=file_client_args), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) - -evaluation = dict(interval=6000, metric=['bbox', 'segm']) - -# optimizer assumes batch_size = (32 GPUs) x (2 samples per GPU) -optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) -optimizer_config = dict(grad_clip=None) - -# lr steps at [0.9, 0.95, 0.975] of the maximum iterations -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.001, - step=[243000, 256500, 263250]) -checkpoint_config = dict(interval=6000) -# The model is trained by 270k iterations with batch_size 64, -# which is roughly equivalent to 144 epochs. -runner = dict(type='IterBasedRunner', max_iters=270000) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (32 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/common/ssj_scp_270k_coco_instance.py b/cv/detection/co-detr/pytorch/configs/common/ssj_scp_270k_coco_instance.py deleted file mode 100644 index 540839ff2f2054c7f5ba661b92eab9a667674b5b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/common/ssj_scp_270k_coco_instance.py +++ /dev/null @@ -1,97 +0,0 @@ -_base_ = '../_base_/default_runtime.py' -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -image_size = (1024, 1024) - -file_client_args = dict(backend='disk') - -# Standard Scale Jittering (SSJ) resizes and crops an image -# with a resize range of 0.8 to 1.25 of the original image size. -load_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=file_client_args), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.8, 1.25), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Pad', size=image_size), -] -train_pipeline = [ - dict(type='CopyPaste', max_num_pasted=100), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=file_client_args), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='MultiImageMixDataset', - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=load_pipeline), - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) - -evaluation = dict(interval=6000, metric=['bbox', 'segm']) - -# optimizer assumes batch_size = (32 GPUs) x (2 samples per GPU) -optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004) -optimizer_config = dict(grad_clip=None) - -# lr steps at [0.9, 0.95, 0.975] of the maximum iterations -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.001, - step=[243000, 256500, 263250]) -checkpoint_config = dict(interval=6000) -# The model is trained by 270k iterations with batch_size 64, -# which is roughly equivalent to 144 epochs. -runner = dict(type='IterBasedRunner', max_iters=270000) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (32 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/convnext/README.md b/cv/detection/co-detr/pytorch/configs/convnext/README.md deleted file mode 100644 index edf72e83a8cb38af99ec1cf717eb18d4699b8ac7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/convnext/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# ConvNeXt - -> [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) - -## Abstract - -The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets. - -
- -
- -## Results and models - -| Method | Backbone | Pretrain | Lr schd | Multi-scale crop | FP16 | Mem (GB) | box AP | mask AP | Config | Download | -| :----------------: | :--------: | :---------: | :-----: | :--------------: | :--: | :------: | :----: | :-----: | :-------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Mask R-CNN | ConvNeXt-T | ImageNet-1K | 3x | yes | yes | 7.3 | 46.2 | 41.7 | [config](./mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953-050731f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953.log.json) | -| Cascade Mask R-CNN | ConvNeXt-T | ImageNet-1K | 3x | yes | yes | 9.0 | 50.3 | 43.6 | [config](./cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200.log.json) | -| Cascade Mask R-CNN | ConvNeXt-S | ImageNet-1K | 3x | yes | yes | 12.3 | 51.8 | 44.8 | [config](./cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004.log.json) | - -**Note**: - -- ConvNeXt backbone needs to install [MMClassification](https://github.com/open-mmlab/mmclassification) first, which has abundant backbones for downstream tasks. - -```shell -pip install mmcls>=0.22.0 -``` - -- The performance is unstable. `Cascade Mask R-CNN` may fluctuate about 0.2 mAP. - -## Citation - -```bibtex -@article{liu2022convnet, - title={A ConvNet for the 2020s}, - author={Liu, Zhuang and Mao, Hanzi and Wu, Chao-Yuan and Feichtenhofer, Christoph and Darrell, Trevor and Xie, Saining}, - journal={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - year={2022} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py b/cv/detection/co-detr/pytorch/configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py deleted file mode 100644 index 0ccc31d248872a79a8ab20ce8e5c17863f013b71..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py +++ /dev/null @@ -1,32 +0,0 @@ -_base_ = './cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' # noqa - -# please install mmcls>=0.22.0 -# import mmcls.models to trigger register_module in mmcls -custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) -checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa - -model = dict( - backbone=dict( - _delete_=True, - type='mmcls.ConvNeXt', - arch='small', - out_indices=[0, 1, 2, 3], - drop_path_rate=0.6, - layer_scale_init_value=1.0, - gap_before_final_norm=False, - init_cfg=dict( - type='Pretrained', checkpoint=checkpoint_file, - prefix='backbone.'))) - -optimizer = dict( - _delete_=True, - constructor='LearningRateDecayOptimizerConstructor', - type='AdamW', - lr=0.0002, - betas=(0.9, 0.999), - weight_decay=0.05, - paramwise_cfg={ - 'decay_rate': 0.7, - 'decay_type': 'layer_wise', - 'num_layers': 12 - }) diff --git a/cv/detection/co-detr/pytorch/configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py b/cv/detection/co-detr/pytorch/configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py deleted file mode 100644 index 93304c001da6a1de4888e6cdce7db1b766960fd7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py +++ /dev/null @@ -1,149 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# please install mmcls>=0.22.0 -# import mmcls.models to trigger register_module in mmcls -custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) -checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa - -model = dict( - backbone=dict( - _delete_=True, - type='mmcls.ConvNeXt', - arch='tiny', - out_indices=[0, 1, 2, 3], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - gap_before_final_norm=False, - init_cfg=dict( - type='Pretrained', checkpoint=checkpoint_file, - prefix='backbone.')), - neck=dict(in_channels=[96, 192, 384, 768]), - roi_head=dict(bbox_head=[ - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)), - dict( - type='ConvFCBBoxHead', - num_shared_convs=4, - num_shared_fcs=1, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - norm_cfg=dict(type='SyncBN', requires_grad=True), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)) - ])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[[ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ]]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline), persistent_workers=True) - -optimizer = dict( - _delete_=True, - constructor='LearningRateDecayOptimizerConstructor', - type='AdamW', - lr=0.0002, - betas=(0.9, 0.999), - weight_decay=0.05, - paramwise_cfg={ - 'decay_rate': 0.7, - 'decay_type': 'layer_wise', - 'num_layers': 6 - }) - -lr_config = dict(warmup_iters=1000, step=[27, 33]) -runner = dict(max_epochs=36) - -# you need to set mode='dynamic' if you are using pytorch<=1.5.0 -fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/cv/detection/co-detr/pytorch/configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py b/cv/detection/co-detr/pytorch/configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py deleted file mode 100644 index e8a283f54834d1abda47030e695b4136f603d745..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py +++ /dev/null @@ -1,90 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# please install mmcls>=0.22.0 -# import mmcls.models to trigger register_module in mmcls -custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) -checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa - -model = dict( - backbone=dict( - _delete_=True, - type='mmcls.ConvNeXt', - arch='tiny', - out_indices=[0, 1, 2, 3], - drop_path_rate=0.4, - layer_scale_init_value=1.0, - gap_before_final_norm=False, - init_cfg=dict( - type='Pretrained', checkpoint=checkpoint_file, - prefix='backbone.')), - neck=dict(in_channels=[96, 192, 384, 768])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[[ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ]]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline), persistent_workers=True) - -optimizer = dict( - _delete_=True, - constructor='LearningRateDecayOptimizerConstructor', - type='AdamW', - lr=0.0001, - betas=(0.9, 0.999), - weight_decay=0.05, - paramwise_cfg={ - 'decay_rate': 0.95, - 'decay_type': 'layer_wise', - 'num_layers': 6 - }) - -lr_config = dict(warmup_iters=1000, step=[27, 33]) -runner = dict(max_epochs=36) - -# you need to set mode='dynamic' if you are using pytorch<=1.5.0 -fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/cv/detection/co-detr/pytorch/configs/convnext/metafile.yml b/cv/detection/co-detr/pytorch/configs/convnext/metafile.yml deleted file mode 100644 index 84e50e8b7028f911416659f9e746c32c4e226655..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/convnext/metafile.yml +++ /dev/null @@ -1,93 +0,0 @@ -Models: - - Name: mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco - In Collection: Mask R-CNN - Config: configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py - Metadata: - Training Memory (GB): 7.3 - Epochs: 36 - Training Data: COCO - Training Techniques: - - AdamW - - Mixed Precision Training - Training Resources: 8x A100 GPUs - Architecture: - - ConvNeXt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco_20220426_154953-050731f4.pth - Paper: - URL: https://arxiv.org/abs/2201.03545 - Title: 'A ConvNet for the 2020s' - README: configs/convnext/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 - Version: v2.16.0 - - - Name: cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py - Metadata: - Training Memory (GB): 9.0 - Epochs: 36 - Training Data: COCO - Training Techniques: - - AdamW - - Mixed Precision Training - Training Resources: 8x A100 GPUs - Architecture: - - ConvNeXt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 50.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 43.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220509_204200-8f07c40b.pth - Paper: - URL: https://arxiv.org/abs/2201.03545 - Title: 'A ConvNet for the 2020s' - README: configs/convnext/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 - Version: v2.25.0 - - - Name: cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco - In Collection: Cascade Mask R-CNN - Config: configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py - Metadata: - Training Memory (GB): 12.3 - Epochs: 36 - Training Data: COCO - Training Techniques: - - AdamW - - Mixed Precision Training - Training Resources: 8x A100 GPUs - Architecture: - - ConvNeXt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 51.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 44.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco_20220510_201004-3d24f5a4.pth - Paper: - URL: https://arxiv.org/abs/2201.03545 - Title: 'A ConvNet for the 2020s' - README: configs/convnext/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 - Version: v2.25.0 diff --git a/cv/detection/co-detr/pytorch/configs/cornernet/README.md b/cv/detection/co-detr/pytorch/configs/cornernet/README.md deleted file mode 100644 index d0b9e98645f9924d43c8dd63b66e43c1726c197b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cornernet/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# CornerNet - -> [Cornernet: Detecting objects as paired keypoints](https://arxiv.org/abs/1808.01244) - - - -## Abstract - -We propose CornerNet, a new approach to object detection where we detect an object bounding box as a pair of keypoints, the top-left corner and the bottom-right corner, using a single convolution neural network. By detecting objects as paired keypoints, we eliminate the need for designing a set of anchor boxes commonly used in prior single-stage detectors. In addition to our novel formulation, we introduce corner pooling, a new type of pooling layer that helps the network better localize corners. Experiments show that CornerNet achieves a 42.2% AP on MS COCO, outperforming all existing one-stage detectors. - -
- -
- -## Results and Models - -| Backbone | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :--------------: | :---------------------------------------------------------: | :---------------: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| HourglassNet-104 | [10 x 5](./cornernet_hourglass104_mstest_10x5_210e_coco.py) | 180/210 | 13.9 | 4.2 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720.log.json) | -| HourglassNet-104 | [8 x 6](./cornernet_hourglass104_mstest_8x6_210e_coco.py) | 180/210 | 15.9 | 4.2 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618.log.json) | -| HourglassNet-104 | [32 x 3](./cornernet_hourglass104_mstest_32x3_210e_coco.py) | 180/210 | 9.5 | 3.9 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110-1efaea91.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110.log.json) | - -Note: - -- TTA setting is single-scale and `flip=True`. -- Experiments with `images_per_gpu=6` are conducted on Tesla V100-SXM2-32GB, `images_per_gpu=3` are conducted on GeForce GTX 1080 Ti. -- Here are the descriptions of each experiment setting: - - 10 x 5: 10 GPUs with 5 images per gpu. This is the same setting as that reported in the original paper. - - 8 x 6: 8 GPUs with 6 images per gpu. The total batchsize is similar to paper and only need 1 node to train. - - 32 x 3: 32 GPUs with 3 images per gpu. The default setting for 1080TI and need 4 nodes to train. - -## Citation - -```latex -@inproceedings{law2018cornernet, - title={Cornernet: Detecting objects as paired keypoints}, - author={Law, Hei and Deng, Jia}, - booktitle={15th European Conference on Computer Vision, ECCV 2018}, - pages={765--781}, - year={2018}, - organization={Springer Verlag} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py b/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py deleted file mode 100644 index 6cb05a78cbbc38c10428407a5496ac877d69e780..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py +++ /dev/null @@ -1,110 +0,0 @@ -_base_ = [ - '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' -] - -# model settings -model = dict( - type='CornerNet', - backbone=dict( - type='HourglassNet', - downsample_times=5, - num_stacks=2, - stage_channels=[256, 256, 384, 384, 384, 512], - stage_blocks=[2, 2, 2, 2, 2, 4], - norm_cfg=dict(type='BN', requires_grad=True)), - neck=None, - bbox_head=dict( - type='CornerHead', - num_classes=80, - in_channels=256, - num_feat_levels=2, - corner_emb_channels=1, - loss_heatmap=dict( - type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), - loss_embedding=dict( - type='AssociativeEmbeddingLoss', - pull_weight=0.10, - push_weight=0.10), - loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), - # training and testing settings - train_cfg=None, - test_cfg=dict( - corner_topk=100, - local_maximum_kernel=3, - distance_threshold=0.5, - score_thr=0.05, - max_per_img=100, - nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) -# data settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='RandomCenterCropPad', - crop_size=(511, 511), - ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), - test_mode=False, - test_pad_mode=None, - **img_norm_cfg), - dict(type='Resize', img_scale=(511, 511), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict( - type='MultiScaleFlipAug', - scale_factor=1.0, - flip=True, - transforms=[ - dict(type='Resize'), - dict( - type='RandomCenterCropPad', - crop_size=None, - ratios=None, - border=None, - test_mode=True, - test_pad_mode=['logical_or', 127], - **img_norm_cfg), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict( - type='Collect', - keys=['img'], - meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', - 'scale_factor', 'flip', 'img_norm_cfg', 'border')), - ]) -] -data = dict( - samples_per_gpu=5, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='Adam', lr=0.0005) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[180]) -runner = dict(type='EpochBasedRunner', max_epochs=210) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (10 GPUs) x (5 samples per GPU) -auto_scale_lr = dict(base_batch_size=50) diff --git a/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py b/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py deleted file mode 100644 index f539cdb81a15f1d14d11c6f62bb51e0f82ec1544..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py +++ /dev/null @@ -1,110 +0,0 @@ -_base_ = [ - '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' -] - -# model settings -model = dict( - type='CornerNet', - backbone=dict( - type='HourglassNet', - downsample_times=5, - num_stacks=2, - stage_channels=[256, 256, 384, 384, 384, 512], - stage_blocks=[2, 2, 2, 2, 2, 4], - norm_cfg=dict(type='BN', requires_grad=True)), - neck=None, - bbox_head=dict( - type='CornerHead', - num_classes=80, - in_channels=256, - num_feat_levels=2, - corner_emb_channels=1, - loss_heatmap=dict( - type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), - loss_embedding=dict( - type='AssociativeEmbeddingLoss', - pull_weight=0.10, - push_weight=0.10), - loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), - # training and testing settings - train_cfg=None, - test_cfg=dict( - corner_topk=100, - local_maximum_kernel=3, - distance_threshold=0.5, - score_thr=0.05, - max_per_img=100, - nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) -# data settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='RandomCenterCropPad', - crop_size=(511, 511), - ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), - test_mode=False, - test_pad_mode=None, - **img_norm_cfg), - dict(type='Resize', img_scale=(511, 511), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict( - type='MultiScaleFlipAug', - scale_factor=1.0, - flip=True, - transforms=[ - dict(type='Resize'), - dict( - type='RandomCenterCropPad', - crop_size=None, - ratios=None, - border=None, - test_mode=True, - test_pad_mode=['logical_or', 127], - **img_norm_cfg), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict( - type='Collect', - keys=['img'], - meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', - 'scale_factor', 'flip', 'img_norm_cfg', 'border')), - ]) -] -data = dict( - samples_per_gpu=3, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='Adam', lr=0.0005) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[180]) -runner = dict(type='EpochBasedRunner', max_epochs=210) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (32 GPUs) x (3 samples per GPU) -auto_scale_lr = dict(base_batch_size=96) diff --git a/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py b/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py deleted file mode 100644 index 9b115d78165e05d40c5b718f1b944a85b08a02bf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py +++ /dev/null @@ -1,110 +0,0 @@ -_base_ = [ - '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' -] - -# model settings -model = dict( - type='CornerNet', - backbone=dict( - type='HourglassNet', - downsample_times=5, - num_stacks=2, - stage_channels=[256, 256, 384, 384, 384, 512], - stage_blocks=[2, 2, 2, 2, 2, 4], - norm_cfg=dict(type='BN', requires_grad=True)), - neck=None, - bbox_head=dict( - type='CornerHead', - num_classes=80, - in_channels=256, - num_feat_levels=2, - corner_emb_channels=1, - loss_heatmap=dict( - type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), - loss_embedding=dict( - type='AssociativeEmbeddingLoss', - pull_weight=0.10, - push_weight=0.10), - loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)), - # training and testing settings - train_cfg=None, - test_cfg=dict( - corner_topk=100, - local_maximum_kernel=3, - distance_threshold=0.5, - score_thr=0.05, - max_per_img=100, - nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) -# data settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='RandomCenterCropPad', - crop_size=(511, 511), - ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), - test_mode=False, - test_pad_mode=None, - **img_norm_cfg), - dict(type='Resize', img_scale=(511, 511), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict( - type='MultiScaleFlipAug', - scale_factor=1.0, - flip=True, - transforms=[ - dict(type='Resize'), - dict( - type='RandomCenterCropPad', - crop_size=None, - ratios=None, - border=None, - test_mode=True, - test_pad_mode=['logical_or', 127], - **img_norm_cfg), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict( - type='Collect', - keys=['img'], - meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', - 'scale_factor', 'flip', 'img_norm_cfg', 'border')), - ]) -] -data = dict( - samples_per_gpu=6, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='Adam', lr=0.0005) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[180]) -runner = dict(type='EpochBasedRunner', max_epochs=210) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (6 samples per GPU) -auto_scale_lr = dict(base_batch_size=48) diff --git a/cv/detection/co-detr/pytorch/configs/cornernet/metafile.yml b/cv/detection/co-detr/pytorch/configs/cornernet/metafile.yml deleted file mode 100644 index c2f6143a74a36a59c7b54531212cfc51b79636bc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/cornernet/metafile.yml +++ /dev/null @@ -1,83 +0,0 @@ -Collections: - - Name: CornerNet - Metadata: - Training Data: COCO - Training Techniques: - - Adam - Training Resources: 8x V100 GPUs - Architecture: - - Corner Pooling - - Stacked Hourglass Network - Paper: - URL: https://arxiv.org/abs/1808.01244 - Title: 'CornerNet: Detecting Objects as Paired Keypoints' - README: configs/cornernet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.3.0/mmdet/models/detectors/cornernet.py#L9 - Version: v2.3.0 - -Models: - - Name: cornernet_hourglass104_mstest_10x5_210e_coco - In Collection: CornerNet - Config: configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py - Metadata: - Training Resources: 10x V100 GPUs - Batch Size: 50 - Training Memory (GB): 13.9 - inference time (ms/im): - - value: 238.1 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 210 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth - - - Name: cornernet_hourglass104_mstest_8x6_210e_coco - In Collection: CornerNet - Config: configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py - Metadata: - Batch Size: 48 - Training Memory (GB): 15.9 - inference time (ms/im): - - value: 238.1 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 210 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth - - - Name: cornernet_hourglass104_mstest_32x3_210e_coco - In Collection: CornerNet - Config: configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py - Metadata: - Training Resources: 32x V100 GPUs - Batch Size: 96 - Training Memory (GB): 9.5 - inference time (ms/im): - - value: 256.41 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 210 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110-1efaea91.pth diff --git a/cv/detection/co-detr/pytorch/configs/dcn/README.md b/cv/detection/co-detr/pytorch/configs/dcn/README.md deleted file mode 100644 index 745b01cde2ddbb2757db6e2667cf1df2517c6a51..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# DCN - -> [Deformable Convolutional Networks](https://arxiv.org/abs/1703.06211) - - - -## Abstract - -Convolutional neural networks (CNNs) are inherently limited to model geometric transformations due to the fixed geometric structures in its building modules. In this work, we introduce two new modules to enhance the transformation modeling capacity of CNNs, namely, deformable convolution and deformable RoI pooling. Both are based on the idea of augmenting the spatial sampling locations in the modules with additional offsets and learning the offsets from target tasks, without additional supervision. The new modules can readily replace their plain counterparts in existing CNNs and can be easily trained end-to-end by standard back-propagation, giving rise to deformable convolutional networks. Extensive experiments validate the effectiveness of our approach on sophisticated vision tasks of object detection and semantic segmentation. - -
- -
- -## Results and Models - -| Backbone | Model | Style | Conv | Pool | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :----------: | :-----: | :----------: | :---: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 4.0 | 17.8 | 41.3 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130_212941.log.json) | -| R-50-FPN | Faster | pytorch | - | dpool | 1x | 5.0 | 17.2 | 38.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307_203250.log.json) | -| R-101-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 6.0 | 12.5 | 42.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203_230019.log.json) | -| X-101-32x4d-FPN | Faster | pytorch | dconv(c3-c5) | - | 1x | 7.3 | 10.0 | 44.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203_001325.log.json) | -| R-50-FPN | Mask | pytorch | dconv(c3-c5) | - | 1x | 4.5 | 15.4 | 41.8 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203_061339.log.json) | -| R-101-FPN | Mask | pytorch | dconv(c3-c5) | - | 1x | 6.5 | 11.7 | 43.5 | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216_191601.log.json) | -| R-50-FPN | Cascade | pytorch | dconv(c3-c5) | - | 1x | 4.5 | 14.6 | 43.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130_220843.log.json) | -| R-101-FPN | Cascade | pytorch | dconv(c3-c5) | - | 1x | 6.4 | 11.0 | 45.0 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203_224829.log.json) | -| R-50-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 6.0 | 10.0 | 44.4 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202_010309.log.json) | -| R-101-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 8.0 | 8.6 | 45.8 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204_134006.log.json) | -| X-101-32x4d-FPN | Cascade Mask | pytorch | dconv(c3-c5) | - | 1x | 9.2 | | 47.3 | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-20200606_183737.log.json) | -| R-50-FPN (FP16) | Mask | pytorch | dconv(c3-c5) | - | 1x | 3.0 | | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247.log.json) | - -**Notes:** - -- `dconv` denotes deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `dpool` denotes deformable roi pooling. -- The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster. -- (\*) For R-50-FPN (dg=4), dg is short for deformable_group. This model is trained and tested on Amazon EC2 p3dn.24xlarge instance. -- **Memory, Train/Inf time is outdated.** - -## Citation - -```latex -@inproceedings{dai2017deformable, - title={Deformable Convolutional Networks}, - author={Dai, Jifeng and Qi, Haozhi and Xiong, Yuwen and Li, Yi and Zhang, Guodong and Hu, Han and Wei, Yichen}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - year={2017} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index 081b998f6f54d3d805dbab38b26750a378c0d93f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index 3b3683af235f46df36d8793e52c2b9c52e0defeb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index daaa4729c8280107b19107607ec399230713cf93..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index a01df33c94e1f8b5f51a51a780b30a77ce99b2c0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index aa664bd61c78873a74af229caa8f62feca8daa5e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index f5fee7e13cdfd531bf24d7c261e843855124f762..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index 8787088f27a09a3f8fd0d05a1144c0abdedd0a21..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py deleted file mode 100644 index 1b695f0e19049dc91b7656d7684df151896b7727..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - _delete_=True, - type='DeformRoIPoolPack', - output_size=7, - output_channels=256), - out_channels=256, - featmap_strides=[4, 8, 16, 32]))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index e3bea1950ac8b1227b97d9eacafb208c4724f8eb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index cb340022ea27f563b8c4a570cf89b5f09e6434cd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index ababe58dc3fdfbbc6c366f48271db31bf6e2e9e2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py deleted file mode 100644 index ee5cca7d535bc0a3e181f690a46ab42c42f1b9b1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) - -fp16 = dict(loss_scale=512.) diff --git a/cv/detection/co-detr/pytorch/configs/dcn/metafile.yml b/cv/detection/co-detr/pytorch/configs/dcn/metafile.yml deleted file mode 100644 index 36f388714468f756c1ae11da7307e35ce3f87d1e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcn/metafile.yml +++ /dev/null @@ -1,272 +0,0 @@ -Collections: - - Name: Deformable Convolutional Networks - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Deformable Convolution - Paper: - URL: https://arxiv.org/abs/1703.06211 - Title: "Deformable Convolutional Networks" - README: configs/dcn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 - Version: v2.0.0 - -Models: - - Name: faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 4.0 - inference time (ms/im): - - value: 56.18 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth - - - Name: faster_rcnn_r50_fpn_dpool_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py - Metadata: - Training Memory (GB): 5.0 - inference time (ms/im): - - value: 58.14 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth - - - Name: faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 6.0 - inference time (ms/im): - - value: 80 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth - - - Name: faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 7.3 - inference time (ms/im): - - value: 100 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth - - - Name: mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 4.5 - inference time (ms/im): - - value: 64.94 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth - - - Name: mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py - Metadata: - Training Techniques: - - SGD with Momentum - - Weight Decay - - Mixed Precision Training - Training Memory (GB): 3.0 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth - - - Name: mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 6.5 - inference time (ms/im): - - value: 85.47 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth - - - Name: cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 4.5 - inference time (ms/im): - - value: 68.49 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth - - - Name: cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 6.4 - inference time (ms/im): - - value: 90.91 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth - - - Name: cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 6.0 - inference time (ms/im): - - value: 100 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth - - - Name: cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 8.0 - inference time (ms/im): - - value: 116.28 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks - Config: configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 9.2 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth diff --git a/cv/detection/co-detr/pytorch/configs/dcnv2/README.md b/cv/detection/co-detr/pytorch/configs/dcnv2/README.md deleted file mode 100644 index d230f202c778735274d70ea8421049ac6489d8b6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcnv2/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# DCNv2 - -> [Deformable ConvNets v2: More Deformable, Better Results](https://arxiv.org/abs/1811.11168) - - - -## Abstract - -The superior performance of Deformable Convolutional Networks arises from its ability to adapt to the geometric variations of objects. Through an examination of its adaptive behavior, we observe that while the spatial support for its neural features conforms more closely than regular ConvNets to object structure, this support may nevertheless extend well beyond the region of interest, causing features to be influenced by irrelevant image content. To address this problem, we present a reformulation of Deformable ConvNets that improves its ability to focus on pertinent image regions, through increased modeling power and stronger training. The modeling power is enhanced through a more comprehensive integration of deformable convolution within the network, and by introducing a modulation mechanism that expands the scope of deformation modeling. To effectively harness this enriched modeling capability, we guide network training via a proposed feature mimicking scheme that helps the network to learn features that reflect the object focus and classification power of RCNN features. With the proposed contributions, this new version of Deformable ConvNets yields significant performance gains over the original model and produces leading results on the COCO benchmark for object detection and instance segmentation. - -## Results and Models - -| Backbone | Model | Style | Conv | Pool | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :---------------: | :----: | :-----: | :-----------: | :----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | Faster | pytorch | mdconv(c3-c5) | - | 1x | 4.1 | 17.6 | 41.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130_222144.log.json) | -| \*R-50-FPN (dg=4) | Faster | pytorch | mdconv(c3-c5) | - | 1x | 4.2 | 17.4 | 41.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130_222058.log.json) | -| R-50-FPN | Faster | pytorch | - | mdpool | 1x | 5.8 | 16.6 | 38.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307_203304.log.json) | -| R-50-FPN | Mask | pytorch | mdconv(c3-c5) | - | 1x | 4.5 | 15.1 | 41.5 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203_063443.log.json) | -| R-50-FPN (FP16) | Mask | pytorch | mdconv(c3-c5) | - | 1x | 3.1 | | 42.0 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434.log.json) | - -**Notes:** - -- `mdconv` denotes modulated deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `mdpool` denotes modulated deformable roi pooling. -- The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster. -- (\*) For R-50-FPN (dg=4), dg is short for deformable_group. This model is trained and tested on Amazon EC2 p3dn.24xlarge instance. -- **Memory, Train/Inf time is outdated.** - -## Citation - -```latex -@article{zhu2018deformable, - title={Deformable ConvNets v2: More Deformable, Better Results}, - author={Zhu, Xizhou and Hu, Han and Lin, Stephen and Dai, Jifeng}, - journal={arXiv preprint arXiv:1811.11168}, - year={2018} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py deleted file mode 100644 index d1bcf3c102fb660641eda2a1398db3df520caa3a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py deleted file mode 100644 index d0ab89c261f970e16a9c4407620bd16a0df9e9e9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py deleted file mode 100644 index ad7b0346a63dfa3c3ca246b624155fc4fd331a3f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - _delete_=True, - type='ModulatedDeformRoIPoolPack', - output_size=7, - output_channels=256), - out_channels=256, - featmap_strides=[4, 8, 16, 32]))) diff --git a/cv/detection/co-detr/pytorch/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py deleted file mode 100644 index 7e21454bd96e4accdf0693d5fc805622f605be7c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) - -fp16 = dict(loss_scale=512.) diff --git a/cv/detection/co-detr/pytorch/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py deleted file mode 100644 index 5ca2a67cde62bff078b7c4c0d696a585265e4c3a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/dcnv2/metafile.yml b/cv/detection/co-detr/pytorch/configs/dcnv2/metafile.yml deleted file mode 100644 index 90494215d64132ac5c1f55e5f85da4941d15a712..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dcnv2/metafile.yml +++ /dev/null @@ -1,123 +0,0 @@ -Collections: - - Name: Deformable Convolutional Networks v2 - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Deformable Convolution - Paper: - URL: https://arxiv.org/abs/1811.11168 - Title: "Deformable ConvNets v2: More Deformable, Better Results" - README: configs/dcnv2/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 - Version: v2.0.0 - -Models: - - Name: faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks v2 - Config: configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 4.1 - inference time (ms/im): - - value: 56.82 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth - - - Name: faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco - In Collection: Deformable Convolutional Networks v2 - Config: configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py - Metadata: - Training Memory (GB): 4.2 - inference time (ms/im): - - value: 57.47 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth - - - Name: faster_rcnn_r50_fpn_mdpool_1x_coco - In Collection: Deformable Convolutional Networks v2 - Config: configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py - Metadata: - Training Memory (GB): 5.8 - inference time (ms/im): - - value: 60.24 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth - - - Name: mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks v2 - Config: configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 4.5 - inference time (ms/im): - - value: 66.23 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth - - - Name: mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco - In Collection: Deformable Convolutional Networks v2 - Config: configs/dcn/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 3.1 - Training Techniques: - - SGD with Momentum - - Weight Decay - - Mixed Precision Training - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth diff --git a/cv/detection/co-detr/pytorch/configs/ddod/README.md b/cv/detection/co-detr/pytorch/configs/ddod/README.md deleted file mode 100644 index 9ab1f4869a3af9a9573dab94d13861fe42b9ddc7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ddod/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# DDOD - -> [Disentangle Your Dense Object Detector](https://arxiv.org/pdf/2107.02963.pdf) - - - -## Abstract - -Deep learning-based dense object detectors have achieved great success in the past few years and have been applied to numerous multimedia applications such as video understanding. However, the current training pipeline for dense detectors is compromised to lots of conjunctions that may not hold. In this paper, we investigate three such important conjunctions: 1) only samples assigned as positive in classification head are used to train the regression head; 2) classification and regression share the same input feature and computational fields defined by the parallel head architecture; and 3) samples distributed in different feature pyramid layers are treated equally when computing the loss. We first carry out a series of pilot experiments to show disentangling such conjunctions can lead to persistent performance improvement. Then, based on these findings, we propose Disentangled Dense Object Detector(DDOD), in which simple and effective disentanglement mechanisms are designed and integrated into the current state-of-the-art dense object detectors. Extensive experiments on MS COCO benchmark show that our approach can lead to 2.0 mAP, 2.4 mAP and 2.2 mAP absolute improvements on RetinaNet, FCOS, and ATSS baselines with negligible extra overhead. Notably, our best model reaches 55.0 mAP on the COCO test-dev set and 93.5 AP on the hard subset of WIDER FACE, achieving new state-of-the-art performance on these two competitive benchmarks. Code is available at https://github.com/zehuichen123/DDOD. - -
- -
- -## Results and Models - -| Model | Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | -| :-------: | :------: | :-----: | :-----: | :------: | :----: | :--------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| DDOD-ATSS | R-50 | pytorch | 1x | 3.4 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ddod/ddod_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737.log.json) | - -## Citation - -```latex -@inproceedings{chen2021disentangle, -title={Disentangle Your Dense Object Detector}, -author={Chen, Zehui and Yang, Chenhongyi and Li, Qiaofei and Zhao, Feng and Zha, Zheng-Jun and Wu, Feng}, -booktitle={Proceedings of the 29th ACM International Conference on Multimedia}, -pages={4939--4948}, -year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/ddod/ddod_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ddod/ddod_r50_fpn_1x_coco.py deleted file mode 100644 index 02dd2fe891168f1003393611fc96c72d56874aff..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ddod/ddod_r50_fpn_1x_coco.py +++ /dev/null @@ -1,67 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='DDOD', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='DDODHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0), - loss_iou=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - train_cfg=dict( - # assigner is mean cls_assigner - assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8), - reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -# This `persistent_workers` is only valid when PyTorch>=1.7.0 -data = dict(persistent_workers=True) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/ddod/metafile.yml b/cv/detection/co-detr/pytorch/configs/ddod/metafile.yml deleted file mode 100644 index c22395002bd614cd0e75d753320c3f9e7ce54bd1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ddod/metafile.yml +++ /dev/null @@ -1,33 +0,0 @@ -Collections: - - Name: DDOD - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - DDOD - - FPN - - ResNet - Paper: - URL: https://arxiv.org/pdf/2107.02963.pdf - Title: 'Disentangle Your Dense Object Detector' - README: configs/ddod/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/models/detectors/ddod.py#L6 - Version: v2.25.0 - -Models: - - Name: ddod_r50_fpn_1x_coco - In Collection: DDOD - Config: configs/ddod/ddod_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.4 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth diff --git a/cv/detection/co-detr/pytorch/configs/deepfashion/README.md b/cv/detection/co-detr/pytorch/configs/deepfashion/README.md deleted file mode 100644 index 45daec0badfc20d9827ebd822bccdec7991f3e40..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/deepfashion/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# DeepFashion - -> [DeepFashion: Powering Robust Clothes Recognition and Retrieval With Rich Annotations](https://openaccess.thecvf.com/content_cvpr_2016/html/Liu_DeepFashion_Powering_Robust_CVPR_2016_paper.html) - - - -## Abstract - -Recent advances in clothes recognition have been driven by the construction of clothes datasets. Existing datasets are limited in the amount of annotations and are difficult to cope with the various challenges in real-world applications. In this work, we introduce DeepFashion, a large-scale clothes dataset with comprehensive annotations. It contains over 800,000 images, which are richly annotated with massive attributes, clothing landmarks, and correspondence of images taken under different scenarios including store, street snapshot, and consumer. Such rich annotations enable the development of powerful algorithms in clothes recognition and facilitating future researches. To demonstrate the advantages of DeepFashion, we propose a new deep model, namely FashionNet, which learns clothing features by jointly predicting clothing attributes and landmarks. The estimated landmarks are then employed to pool or gate the learned features. It is optimized in an iterative manner. Extensive experiments demonstrate the effectiveness of FashionNet and the usefulness of DeepFashion. - -
- -
- -## Introduction - -[MMFashion](https://github.com/open-mmlab/mmfashion) develops "fashion parsing and segmentation" module -based on the dataset -[DeepFashion-Inshop](https://drive.google.com/drive/folders/0B7EVK8r0v71pVDZFQXRsMDZCX1E?usp=sharing). -Its annotation follows COCO style. -To use it, you need to first download the data. Note that we only use "img_highres" in this task. -The file tree should be like this: - -```sh -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── DeepFashion -│ │ ├── In-shop -│ │ ├── Anno -│ │ │   ├── segmentation -│ │ │   | ├── DeepFashion_segmentation_train.json -│ │ │   | ├── DeepFashion_segmentation_query.json -│ │ │   | ├── DeepFashion_segmentation_gallery.json -│ │ │   ├── list_bbox_inshop.txt -│ │ │   ├── list_description_inshop.json -│ │ │   ├── list_item_inshop.txt -│ │ │   └── list_landmarks_inshop.txt -│ │ ├── Eval -│ │ │ └── list_eval_partition.txt -│ │ ├── Img -│ │ │ ├── img -│ │ │ │ ├──XXX.jpg -│ │ │ ├── img_highres -│ │ │ └── ├──XXX.jpg - -``` - -After that you can train the Mask RCNN r50 on DeepFashion-In-shop dataset by launching training with the `mask_rcnn_r50_fpn_1x.py` config -or creating your own config file. - -## Results and Models - -| Backbone | Model type | Dataset | bbox detection Average Precision | segmentation Average Precision | Config | Download (Google) | -| :------: | :--------: | :-----------------: | :------------------------------: | :----------------------------: | :----------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ResNet50 | Mask RCNN | DeepFashion-In-shop | 0.599 | 0.584 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/mask_rcnn_r50_fpn_15e_deepfashion_20200329_192752.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/20200329_192752.log.json) | - -## Citation - -```latex -@inproceedings{liuLQWTcvpr16DeepFashion, - author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, - title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, - booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2016} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py b/cv/detection/co-detr/pytorch/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py deleted file mode 100644 index c4e86387e3ce4aad3dd68d7613160fced4d3785b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', - '../_base_/default_runtime.py' -] -model = dict( - roi_head=dict( - bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=15) diff --git a/cv/detection/co-detr/pytorch/configs/deformable_detr/README.md b/cv/detection/co-detr/pytorch/configs/deformable_detr/README.md deleted file mode 100644 index 378e1f26a2d1e2e355751ff453912960ca536924..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/deformable_detr/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# Deformable DETR - -> [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) - - - -## Abstract - -DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10 times less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach. - -
- -
- -## Results and Models - -| Backbone | Model | Lr schd | box AP | Config | Download | -| :------: | :---------------------------------: | :-----: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | Deformable DETR | 50e | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.log.json) | -| R-50 | + iterative bounding box refinement | 50e | 46.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.log.json) | -| R-50 | ++ two-stage Deformable DETR | 50e | 46.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.log.json) | - -# NOTE - -1. All models are trained with batch size 32. -2. The performance is unstable. `Deformable DETR` and `iterative bounding box refinement` may fluctuate about 0.3 mAP. `two-stage Deformable DETR` may fluctuate about 0.2 mAP. - -## Citation - -We provide the config files for Deformable DETR: [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159). - -```latex -@inproceedings{ -zhu2021deformable, -title={Deformable DETR: Deformable Transformers for End-to-End Object Detection}, -author={Xizhou Zhu and Weijie Su and Lewei Lu and Bin Li and Xiaogang Wang and Jifeng Dai}, -booktitle={International Conference on Learning Representations}, -year={2021}, -url={https://openreview.net/forum?id=gZ9hCDWe6ke} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py b/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py deleted file mode 100644 index c64d09fe031a0b194a68263cab571294d4eb81cf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py +++ /dev/null @@ -1,177 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] -model = dict( - type='DeformableDETR', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='ChannelMapper', - in_channels=[512, 1024, 2048], - kernel_size=1, - out_channels=256, - act_cfg=None, - norm_cfg=dict(type='GN', num_groups=32), - num_outs=4), - bbox_head=dict( - type='DeformableDETRHead', - num_query=300, - num_classes=80, - in_channels=2048, - sync_cls_avg_factor=True, - as_two_stage=False, - transformer=dict( - type='DeformableDetrTransformer', - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=dict( - type='MultiScaleDeformableAttention', embed_dims=256), - feedforward_channels=1024, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'ffn', 'norm'))), - decoder=dict( - type='DeformableDetrTransformerDecoder', - num_layers=6, - return_intermediate=True, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.1), - dict( - type='MultiScaleDeformableAttention', - embed_dims=256) - ], - feedforward_channels=1024, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - normalize=True, - offset=-0.5), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), - test_cfg=dict(max_per_img=100)) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different -# from the default setting in mmdet. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[ - [ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - # The radio of all image in train dataset < 7 - # follow the original impl - img_scale=[(400, 4200), (500, 4200), (600, 4200)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -# test_pipeline, NOTE the Pad's size_divisor is different from the default -# setting (size_divisor=32). While there is little effect on the performance -# whether we use the default setting or use size_divisor=1. -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(filter_empty_gt=False, pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='AdamW', - lr=2e-4, - weight_decay=0.0001, - paramwise_cfg=dict( - custom_keys={ - 'backbone': dict(lr_mult=0.1), - 'sampling_offsets': dict(lr_mult=0.1), - 'reference_points': dict(lr_mult=0.1) - })) -optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[40]) -runner = dict(type='EpochBasedRunner', max_epochs=50) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (16 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=32) diff --git a/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py b/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py deleted file mode 100644 index 01f13df4886558366625bc4f3a367cb8a5154462..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = 'deformable_detr_r50_16x2_50e_coco.py' -model = dict(bbox_head=dict(with_box_refine=True)) diff --git a/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py b/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py deleted file mode 100644 index 2aa840d9e961f62307f05e8dde2d8520edef8cad..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = 'deformable_detr_refine_r50_16x2_50e_coco.py' -model = dict(bbox_head=dict(as_two_stage=True)) diff --git a/cv/detection/co-detr/pytorch/configs/deformable_detr/metafile.yml b/cv/detection/co-detr/pytorch/configs/deformable_detr/metafile.yml deleted file mode 100644 index 873292db7bc7da32fa6acab9fa8beef7fe1b2266..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/deformable_detr/metafile.yml +++ /dev/null @@ -1,56 +0,0 @@ -Collections: - - Name: Deformable DETR - Metadata: - Training Data: COCO - Training Techniques: - - AdamW - - Multi Scale Train - - Gradient Clip - Training Resources: 8x V100 GPUs - Architecture: - - ResNet - - Transformer - Paper: - URL: https://openreview.net/forum?id=gZ9hCDWe6ke - Title: 'Deformable DETR: Deformable Transformers for End-to-End Object Detection' - README: configs/deformable_detr/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/deformable_detr.py#L6 - Version: v2.12.0 - -Models: - - Name: deformable_detr_r50_16x2_50e_coco - In Collection: Deformable DETR - Config: configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py - Metadata: - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth - - - Name: deformable_detr_refine_r50_16x2_50e_coco - In Collection: Deformable DETR - Config: configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py - Metadata: - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.pth - - - Name: deformable_detr_twostage_refine_r50_16x2_50e_coco - In Collection: Deformable DETR - Config: configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py - Metadata: - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.pth diff --git a/cv/detection/co-detr/pytorch/configs/detectors/README.md b/cv/detection/co-detr/pytorch/configs/detectors/README.md deleted file mode 100644 index baa245fe986a2703825c55ba241a6cc96f00719e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# DetectoRS - -> [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/abs/2006.02334) - - - -## Abstract - -Many modern object detectors demonstrate outstanding performances by using the mechanism of looking and thinking twice. In this paper, we explore this mechanism in the backbone design for object detection. At the macro level, we propose Recursive Feature Pyramid, which incorporates extra feedback connections from Feature Pyramid Networks into the bottom-up backbone layers. At the micro level, we propose Switchable Atrous Convolution, which convolves the features with different atrous rates and gathers the results using switch functions. Combining them results in DetectoRS, which significantly improves the performances of object detection. On COCO test-dev, DetectoRS achieves state-of-the-art 55.7% box AP for object detection, 48.5% mask AP for instance segmentation, and 50.0% PQ for panoptic segmentation. - -
- -
- -## Introduction - -DetectoRS requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. -The directory should be like this. - -```none -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── coco -│ │ ├── annotations -│ │ ├── train2017 -│ │ ├── val2017 -│ │ ├── test2017 -| | ├── stuffthingmaps -``` - -## Results and Models - -DetectoRS includes two major components: - -- Recursive Feature Pyramid (RFP). -- Switchable Atrous Convolution (SAC). - -They can be used independently. -Combining them together results in DetectoRS. -The results on COCO 2017 val are shown in the below table. - -| Method | Detector | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------: | :-----------------: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| RFP | Cascade + ResNet-50 | 1x | 7.5 | - | 44.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco_20200624_104126.log.json) | -| SAC | Cascade + ResNet-50 | 1x | 5.6 | - | 45.0 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco_20200624_104402.log.json) | -| DetectoRS | Cascade + ResNet-50 | 1x | 9.9 | - | 47.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco_20200706_001203.log.json) | -| RFP | HTC + ResNet-50 | 1x | 11.2 | - | 46.6 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/htc_r50_rfp_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco_20200624_103053.log.json) | -| SAC | HTC + ResNet-50 | 1x | 9.3 | - | 46.4 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/htc_r50_sac_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco_20200624_103111.log.json) | -| DetectoRS | HTC + ResNet-50 | 1x | 13.6 | - | 49.1 | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_htc_r50_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco_20200624_103659.log.json) | -| DetectoRS | HTC + ResNet-101 | 20e | 19.6 | | 50.5 | 43.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_htc_r101_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r101_20e_coco/detectors_htc_r101_20e_coco_20210419_203638-348d533b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r101_20e_coco/detectors_htc_r101_20e_coco_20210419_203638.log.json) | - -*Note*: This is a re-implementation based on MMDetection-V2. -The original implementation is based on MMDetection-V1. - -## Citation - -We provide the config files for [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/pdf/2006.02334.pdf). - -```latex -@article{qiao2020detectors, - title={DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution}, - author={Qiao, Siyuan and Chen, Liang-Chieh and Yuille, Alan}, - journal={arXiv preprint arXiv:2006.02334}, - year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py b/cv/detection/co-detr/pytorch/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py deleted file mode 100644 index 4430d8a677e48f84552eb23403bc874c56bda506..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - output_img=True), - neck=dict( - type='RFP', - rfp_steps=2, - aspp_out_channels=64, - aspp_dilations=(1, 3, 6, 1), - rfp_backbone=dict( - rfp_inplanes=256, - type='DetectoRS_ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - conv_cfg=dict(type='ConvAWS'), - pretrained='torchvision://resnet50', - style='pytorch'))) diff --git a/cv/detection/co-detr/pytorch/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py b/cv/detection/co-detr/pytorch/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py deleted file mode 100644 index ccd9319b2d1badebf3b891c8e3bdd55a435a4b7c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py b/cv/detection/co-detr/pytorch/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py deleted file mode 100644 index f76040434f1ff07608c83202f779dfacfe91c323..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py +++ /dev/null @@ -1,32 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - output_img=True), - neck=dict( - type='RFP', - rfp_steps=2, - aspp_out_channels=64, - aspp_dilations=(1, 3, 6, 1), - rfp_backbone=dict( - rfp_inplanes=256, - type='DetectoRS_ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - pretrained='torchvision://resnet50', - style='pytorch'))) diff --git a/cv/detection/co-detr/pytorch/configs/detectors/detectors_htc_r101_20e_coco.py b/cv/detection/co-detr/pytorch/configs/detectors/detectors_htc_r101_20e_coco.py deleted file mode 100644 index 93d7d2b1adeb3fbdb7bac0107edf4433669e8015..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/detectors_htc_r101_20e_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = '../htc/htc_r101_fpn_20e_coco.py' - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - output_img=True), - neck=dict( - type='RFP', - rfp_steps=2, - aspp_out_channels=64, - aspp_dilations=(1, 3, 6, 1), - rfp_backbone=dict( - rfp_inplanes=256, - type='DetectoRS_ResNet', - depth=101, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - pretrained='torchvision://resnet101', - style='pytorch'))) diff --git a/cv/detection/co-detr/pytorch/configs/detectors/detectors_htc_r50_1x_coco.py b/cv/detection/co-detr/pytorch/configs/detectors/detectors_htc_r50_1x_coco.py deleted file mode 100644 index 0d2fc4f77fcca715c1dfb613306d214b636aa0c0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/detectors_htc_r50_1x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_1x_coco.py' - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - output_img=True), - neck=dict( - type='RFP', - rfp_steps=2, - aspp_out_channels=64, - aspp_dilations=(1, 3, 6, 1), - rfp_backbone=dict( - rfp_inplanes=256, - type='DetectoRS_ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True), - pretrained='torchvision://resnet50', - style='pytorch'))) diff --git a/cv/detection/co-detr/pytorch/configs/detectors/htc_r50_rfp_1x_coco.py b/cv/detection/co-detr/pytorch/configs/detectors/htc_r50_rfp_1x_coco.py deleted file mode 100644 index 496104e12550a1985f9c9e3748a343f69d7df6d8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/htc_r50_rfp_1x_coco.py +++ /dev/null @@ -1,24 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_1x_coco.py' - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - output_img=True), - neck=dict( - type='RFP', - rfp_steps=2, - aspp_out_channels=64, - aspp_dilations=(1, 3, 6, 1), - rfp_backbone=dict( - rfp_inplanes=256, - type='DetectoRS_ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - conv_cfg=dict(type='ConvAWS'), - pretrained='torchvision://resnet50', - style='pytorch'))) diff --git a/cv/detection/co-detr/pytorch/configs/detectors/htc_r50_sac_1x_coco.py b/cv/detection/co-detr/pytorch/configs/detectors/htc_r50_sac_1x_coco.py deleted file mode 100644 index 72d4db963ffd95851b945911b3db9941426583ab..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/htc_r50_sac_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_1x_coco.py' - -model = dict( - backbone=dict( - type='DetectoRS_ResNet', - conv_cfg=dict(type='ConvAWS'), - sac=dict(type='SAC', use_deform=True), - stage_with_sac=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/detectors/metafile.yml b/cv/detection/co-detr/pytorch/configs/detectors/metafile.yml deleted file mode 100644 index 4bed56949e738108b7f3479ee1e4a5447d45701f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detectors/metafile.yml +++ /dev/null @@ -1,114 +0,0 @@ -Collections: - - Name: DetectoRS - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ASPP - - FPN - - RFP - - RPN - - ResNet - - RoIAlign - - SAC - Paper: - URL: https://arxiv.org/abs/2006.02334 - Title: 'DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution' - README: configs/detectors/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/backbones/detectors_resnet.py#L205 - Version: v2.2.0 - -Models: - - Name: cascade_rcnn_r50_rfp_1x_coco - In Collection: DetectoRS - Config: configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py - Metadata: - Training Memory (GB): 7.5 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth - - - Name: cascade_rcnn_r50_sac_1x_coco - In Collection: DetectoRS - Config: configs/detectors/cascade_rcnn_r50_sac_1x_coco.py - Metadata: - Training Memory (GB): 5.6 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth - - - Name: detectors_cascade_rcnn_r50_1x_coco - In Collection: DetectoRS - Config: configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py - Metadata: - Training Memory (GB): 9.9 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth - - - Name: htc_r50_rfp_1x_coco - In Collection: DetectoRS - Config: configs/detectors/htc_r50_rfp_1x_coco.py - Metadata: - Training Memory (GB): 11.2 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth - - - Name: htc_r50_sac_1x_coco - In Collection: DetectoRS - Config: configs/detectors/htc_r50_sac_1x_coco.py - Metadata: - Training Memory (GB): 9.3 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth - - - Name: detectors_htc_r50_1x_coco - In Collection: DetectoRS - Config: configs/detectors/detectors_htc_r50_1x_coco.py - Metadata: - Training Memory (GB): 13.6 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 49.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 42.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth diff --git a/cv/detection/co-detr/pytorch/configs/detr/README.md b/cv/detection/co-detr/pytorch/configs/detr/README.md deleted file mode 100644 index 9f2485d0f42771575b21bdca89f72d4cd99de0ce..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detr/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# DETR - -> [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) - - - -## Abstract - -We present a new method that views object detection as a direct set prediction problem. Our approach streamlines the detection pipeline, effectively removing the need for many hand-designed components like a non-maximum suppression procedure or anchor generation that explicitly encode our prior knowledge about the task. The main ingredients of the new framework, called DEtection TRansformer or DETR, are a set-based global loss that forces unique predictions via bipartite matching, and a transformer encoder-decoder architecture. Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. The new model is conceptually simple and does not require a specialized library, unlike many other modern detectors. DETR demonstrates accuracy and run-time performance on par with the well-established and highly-optimized Faster RCNN baseline on the challenging COCO object detection dataset. Moreover, DETR can be easily generalized to produce panoptic segmentation in a unified manner. We show that it significantly outperforms competitive baselines. - -
- -
- -## Results and Models - -| Backbone | Model | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | DETR | 150e | 7.9 | | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detr/detr_r50_8x2_150e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835.log.json) | - -## Citation - -We provide the config files for DETR: [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872). - -```latex -@inproceedings{detr, - author = {Nicolas Carion and - Francisco Massa and - Gabriel Synnaeve and - Nicolas Usunier and - Alexander Kirillov and - Sergey Zagoruyko}, - title = {End-to-End Object Detection with Transformers}, - booktitle = {ECCV}, - year = {2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/detr/detr_r50_8x2_150e_coco.py b/cv/detection/co-detr/pytorch/configs/detr/detr_r50_8x2_150e_coco.py deleted file mode 100644 index 892447dec15f3ac0411c5b8d36725b84a40ecfec..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detr/detr_r50_8x2_150e_coco.py +++ /dev/null @@ -1,150 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] -model = dict( - type='DETR', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(3, ), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - bbox_head=dict( - type='DETRHead', - num_classes=80, - in_channels=2048, - transformer=dict( - type='Transformer', - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.1) - ], - feedforward_channels=2048, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'ffn', 'norm'))), - decoder=dict( - type='DetrTransformerDecoder', - return_intermediate=True, - num_layers=6, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.1), - feedforward_channels=2048, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')), - )), - positional_encoding=dict( - type='SinePositionalEncoding', num_feats=128, normalize=True), - loss_cls=dict( - type='CrossEntropyLoss', - bg_cls_weight=0.1, - use_sigmoid=False, - loss_weight=1.0, - class_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='ClassificationCost', weight=1.), - reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), - test_cfg=dict(max_per_img=100)) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different -# from the default setting in mmdet. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[[ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ]]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -# test_pipeline, NOTE the Pad's size_divisor is different from the default -# setting (size_divisor=32). While there is little effect on the performance -# whether we use the default setting or use size_divisor=1. -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='AdamW', - lr=0.0001, - weight_decay=0.0001, - paramwise_cfg=dict( - custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) -optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[100]) -runner = dict(type='EpochBasedRunner', max_epochs=150) diff --git a/cv/detection/co-detr/pytorch/configs/detr/metafile.yml b/cv/detection/co-detr/pytorch/configs/detr/metafile.yml deleted file mode 100644 index 45622cf9152c6461cd5924605de87c763009f491..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/detr/metafile.yml +++ /dev/null @@ -1,33 +0,0 @@ -Collections: - - Name: DETR - Metadata: - Training Data: COCO - Training Techniques: - - AdamW - - Multi Scale Train - - Gradient Clip - Training Resources: 8x V100 GPUs - Architecture: - - ResNet - - Transformer - Paper: - URL: https://arxiv.org/abs/2005.12872 - Title: 'End-to-End Object Detection with Transformers' - README: configs/detr/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/detectors/detr.py#L7 - Version: v2.7.0 - -Models: - - Name: detr_r50_8x2_150e_coco - In Collection: DETR - Config: configs/detr/detr_r50_8x2_150e_coco.py - Metadata: - Training Memory (GB): 7.9 - Epochs: 150 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth diff --git a/cv/detection/co-detr/pytorch/configs/double_heads/README.md b/cv/detection/co-detr/pytorch/configs/double_heads/README.md deleted file mode 100644 index 4a149b5fc336d1c10d6e58b2c446b8fb36b0bdae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/double_heads/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Double Heads - -> [Rethinking Classification and Localization for Object Detection](https://arxiv.org/abs/1904.06493) - - - -## Abstract - -Two head structures (i.e. fully connected head and convolution head) have been widely used in R-CNN based detectors for classification and localization tasks. However, there is a lack of understanding of how does these two head structures work for these two tasks. To address this issue, we perform a thorough analysis and find an interesting fact that the two head structures have opposite preferences towards the two tasks. Specifically, the fully connected head (fc-head) is more suitable for the classification task, while the convolution head (conv-head) is more suitable for the localization task. Furthermore, we examine the output feature maps of both heads and find that fc-head has more spatial sensitivity than conv-head. Thus, fc-head has more capability to distinguish a complete object from part of an object, but is not robust to regress the whole object. Based upon these findings, we propose a Double-Head method, which has a fully connected head focusing on classification and a convolution head for bounding box regression. Without bells and whistles, our method gains +3.5 and +2.8 AP on MS COCO dataset from Feature Pyramid Network (FPN) baselines with ResNet-50 and ResNet-101 backbones, respectively. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 6.8 | 9.5 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130_220238.log.json) | - -## Citation - -```latex -@article{wu2019rethinking, - title={Rethinking Classification and Localization for Object Detection}, - author={Yue Wu and Yinpeng Chen and Lu Yuan and Zicheng Liu and Lijuan Wang and Hongzhi Li and Yun Fu}, - year={2019}, - eprint={1904.06493}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 9b8118b4b633c78120c370f877f47e951c2fdb38..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - type='DoubleHeadRoIHead', - reg_roi_scale_factor=1.3, - bbox_head=dict( - _delete_=True, - type='DoubleConvFCBBoxHead', - num_convs=4, - num_fcs=2, - in_channels=256, - conv_out_channels=1024, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) diff --git a/cv/detection/co-detr/pytorch/configs/double_heads/metafile.yml b/cv/detection/co-detr/pytorch/configs/double_heads/metafile.yml deleted file mode 100644 index 6fe9b7af952d8d5e1d221862ee3f1098a547355e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/double_heads/metafile.yml +++ /dev/null @@ -1,41 +0,0 @@ -Collections: - - Name: Rethinking Classification and Localization for Object Detection - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - RPN - - ResNet - - RoIAlign - Paper: - URL: https://arxiv.org/pdf/1904.06493 - Title: 'Rethinking Classification and Localization for Object Detection' - README: configs/double_heads/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/roi_heads/double_roi_head.py#L6 - Version: v2.0.0 - -Models: - - Name: dh_faster_rcnn_r50_fpn_1x_coco - In Collection: Rethinking Classification and Localization for Object Detection - Config: configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.8 - inference time (ms/im): - - value: 105.26 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth diff --git a/cv/detection/co-detr/pytorch/configs/dyhead/README.md b/cv/detection/co-detr/pytorch/configs/dyhead/README.md deleted file mode 100644 index 8e6aed3619ba81861b3a9ddf42f69a24508c2003..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dyhead/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# DyHead - -> [Dynamic Head: Unifying Object Detection Heads with Attentions](https://arxiv.org/abs/2106.08322) - - - -## Abstract - -The complex nature of combining localization and classification in object detection has resulted in the flourished development of methods. Previous works tried to improve the performance in various object detection heads but failed to present a unified view. In this paper, we present a novel dynamic head framework to unify object detection heads with attentions. By coherently combining multiple self-attention mechanisms between feature levels for scale-awareness, among spatial locations for spatial-awareness, and within output channels for task-awareness, the proposed approach significantly improves the representation ability of object detection heads without any computational overhead. Further experiments demonstrate that the effectiveness and efficiency of the proposed dynamic head on the COCO benchmark. With a standard ResNeXt-101-DCN backbone, we largely improve the performance over popular object detectors and achieve a new state-of-the-art at 54.0 AP. Furthermore, with latest transformer backbone and extra data, we can push current best COCO result to a new record at 60.6 AP. - -
- -
- -## Results and Models - -| Method | Backbone | Style | Setting | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :----: | :------: | :-----: | :----------: | :-----: | :------: | :------------: | :----: | :----------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ATSS | R-50 | caffe | reproduction | 1x | 5.4 | 13.2 | 42.5 | [config](./atss_r50_caffe_fpn_dyhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939.log.json) | -| ATSS | R-50 | pytorch | simple | 1x | 4.9 | 13.7 | 43.3 | [config](./atss_r50_fpn_dyhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314.log.json) | - -- We trained the above models with 4 GPUs and 4 `samples_per_gpu`. -- The `reproduction` setting aims to reproduce the official implementation based on Detectron2. -- The `simple` setting serves as a minimum example to use DyHead in MMDetection. Specifically, - - it adds `DyHead` to `neck` after `FPN` - - it sets `stacked_convs=0` to `bbox_head` -- The `simple` setting achieves higher AP than the original implementation. - We have not conduct ablation study between the two settings. - `dict(type='Pad', size_divisor=128)` may further improve AP by prefer spatial alignment across pyramid levels, although large padding reduces efficiency. - -We also trained the model with Swin-L backbone. Results are as below. - -| Method | Backbone | Style | Setting | Lr schd | mstrain | box AP | Config | Download | -| :----: | :------: | :---: | :----------: | :-----: | :------: | :----: | :----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| ATSS | Swin-L | caffe | reproduction | 2x | 480~1200 | 56.2 | [config](./atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315.log.json) | - -## Relation to Other Methods - -- DyHead can be regarded as an improved [SEPC](https://arxiv.org/abs/2005.03101) with [DyReLU modules](https://arxiv.org/abs/2003.10027) and simplified [SE blocks](https://arxiv.org/abs/1709.01507). -- Xiyang Dai et al., the author team of DyHead, adopt it for [Dynamic DETR](https://openaccess.thecvf.com/content/ICCV2021/html/Dai_Dynamic_DETR_End-to-End_Object_Detection_With_Dynamic_Attention_ICCV_2021_paper.html). - The description of Dynamic Encoder in Sec. 3.2 will help you understand DyHead. - -## Citation - -```latex -@inproceedings{DyHead_CVPR2021, - author = {Dai, Xiyang and Chen, Yinpeng and Xiao, Bin and Chen, Dongdong and Liu, Mengchen and Yuan, Lu and Zhang, Lei}, - title = {Dynamic Head: Unifying Object Detection Heads With Attentions}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py deleted file mode 100644 index 223b6532607e65672e86587737592115b6a3aa01..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py +++ /dev/null @@ -1,112 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='ATSS', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - neck=[ - dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - dict( - type='DyHead', - in_channels=256, - out_channels=256, - num_blocks=6, - # disable zero_init_offset to follow official implementation - zero_init_offset=False) - ], - bbox_head=dict( - type='ATSSHead', - num_classes=80, - in_channels=256, - pred_kernel_size=1, # follow DyHead official implementation - stacked_convs=0, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128], - center_offset=0.5), # follow DyHead official implementation - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) - -# use caffe img_norm, size_divisor=128, pillow resize -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=(1333, 800), - keep_ratio=True, - backend='pillow'), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=128), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True, backend='pillow'), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=128), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py deleted file mode 100644 index 8c5109d0aff16ad57fc00fadf3a832c3b7e08d89..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py +++ /dev/null @@ -1,65 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='ATSS', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=[ - dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - dict(type='DyHead', in_channels=256, out_channels=256, num_blocks=6) - ], - bbox_head=dict( - type='ATSSHead', - num_classes=80, - in_channels=256, - stacked_convs=0, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py deleted file mode 100644 index dc9c328266af57b3310771724196d51778d0e018..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py +++ /dev/null @@ -1,164 +0,0 @@ -_base_ = '../_base_/default_runtime.py' - -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa -model = dict( - type='ATSS', - backbone=dict( - type='SwinTransformer', - pretrain_img_size=384, - embed_dims=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - window_size=12, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - patch_norm=True, - out_indices=(1, 2, 3), - # Please only add indices that would be used - # in FPN, otherwise some parameter will not be used - with_cp=False, - convert_weights=True, - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - neck=[ - dict( - type='FPN', - in_channels=[384, 768, 1536], - out_channels=256, - start_level=0, - add_extra_convs='on_output', - num_outs=5), - dict( - type='DyHead', - in_channels=256, - out_channels=256, - num_blocks=6, - # disable zero_init_offset to follow official implementation - zero_init_offset=False) - ], - bbox_head=dict( - type='ATSSHead', - num_classes=80, - in_channels=256, - pred_kernel_size=1, # follow DyHead official implementation - stacked_convs=0, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128], - center_offset=0.5), # follow DyHead official implementation - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(2000, 480), (2000, 1200)], - multiscale_mode='range', - keep_ratio=True, - backend='pillow'), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=128), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2000, 1200), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True, backend='pillow'), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=128), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=2, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') - -# optimizer -optimizer_config = dict(grad_clip=None) -optimizer = dict( - type='AdamW', - lr=0.00005, - betas=(0.9, 0.999), - weight_decay=0.05, - paramwise_cfg=dict( - custom_keys={ - 'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.) - })) - -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/dyhead/metafile.yml b/cv/detection/co-detr/pytorch/configs/dyhead/metafile.yml deleted file mode 100644 index 3fb73707f1814320adeb1c235d1ad92e41bbbd48..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dyhead/metafile.yml +++ /dev/null @@ -1,76 +0,0 @@ -Collections: - - Name: DyHead - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 4x T4 GPUs - Architecture: - - ATSS - - DyHead - - FPN - - ResNet - - Deformable Convolution - - Pyramid Convolution - Paper: - URL: https://arxiv.org/abs/2106.08322 - Title: 'Dynamic Head: Unifying Object Detection Heads with Attentions' - README: configs/dyhead/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/necks/dyhead.py#L130 - Version: v2.22.0 - -Models: - - Name: atss_r50_caffe_fpn_dyhead_1x_coco - In Collection: DyHead - Config: configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py - Metadata: - Training Memory (GB): 5.4 - inference time (ms/im): - - value: 75.7 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth - - - Name: atss_r50_fpn_dyhead_1x_coco - In Collection: DyHead - Config: configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py - Metadata: - Training Memory (GB): 4.9 - inference time (ms/im): - - value: 73.1 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth - - - Name: atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco - In Collection: DyHead - Config: configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py - Metadata: - Training Memory (GB): 58.4 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 56.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth diff --git a/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/README.md deleted file mode 100644 index 0045df7b4a678063b4bdd03f4b235306721cbb0f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Dynamic R-CNN - -> [Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training](https://arxiv.org/abs/2004.06002) - - - -## Abstract - -Although two-stage object detectors have continuously advanced the state-of-the-art performance in recent years, the training process itself is far from crystal. In this work, we first point out the inconsistency problem between the fixed network settings and the dynamic training procedure, which greatly affects the performance. For example, the fixed label assignment strategy and regression loss function cannot fit the distribution change of proposals and thus are harmful to training high quality detectors. Consequently, we propose Dynamic R-CNN to adjust the label assignment criteria (IoU threshold) and the shape of regression loss function (parameters of SmoothL1 Loss) automatically based on the statistics of proposals during training. This dynamic design makes better use of the training samples and pushes the detector to fit more high quality samples. Specifically, our method improves upon ResNet-50-FPN baseline with 1.9% AP and 5.5% AP90 on the MS COCO dataset with no extra overhead. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | 1x | 3.8 | | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x_20200618_095048.log.json) | - -## Citation - -```latex -@article{DynamicRCNN, - author = {Hongkai Zhang and Hong Chang and Bingpeng Ma and Naiyan Wang and Xilin Chen}, - title = {Dynamic {R-CNN}: Towards High Quality Object Detection via Dynamic Training}, - journal = {arXiv preprint arXiv:2004.06002}, - year = {2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index f2deb99e44cba92fd79d0a2cd258ddf6927703c0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - type='DynamicRoIHead', - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - train_cfg=dict( - rpn_proposal=dict(nms=dict(iou_threshold=0.85)), - rcnn=dict( - dynamic_rcnn=dict( - iou_topk=75, - beta_topk=10, - update_iter_interval=100, - initial_iou=0.4, - initial_beta=1.0))), - test_cfg=dict(rpn=dict(nms=dict(iou_threshold=0.85)))) diff --git a/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/metafile.yml b/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/metafile.yml deleted file mode 100644 index fec43db44da50bacdfe5aac29518f87194dd4e75..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/dynamic_rcnn/metafile.yml +++ /dev/null @@ -1,35 +0,0 @@ -Collections: - - Name: Dynamic R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Dynamic R-CNN - - FPN - - RPN - - ResNet - - RoIAlign - Paper: - URL: https://arxiv.org/pdf/2004.06002 - Title: 'Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training' - README: configs/dynamic_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/roi_heads/dynamic_roi_head.py#L11 - Version: v2.2.0 - -Models: - - Name: dynamic_rcnn_r50_fpn_1x_coco - In Collection: Dynamic R-CNN - Config: configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.8 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth diff --git a/cv/detection/co-detr/pytorch/configs/efficientnet/README.md b/cv/detection/co-detr/pytorch/configs/efficientnet/README.md deleted file mode 100644 index 99b05722b7f7ef59b6399c09290f80b256ed794f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/efficientnet/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# EfficientNet - -> [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946v5) - - - -## Introduction - -Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. - -To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters. - -## Results and Models - -### RetinaNet - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Efficientnet-b3 | pytorch | 1x | - | - | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806.log.json) | - -## Citation - -```latex -@article{tan2019efficientnet, - title={Efficientnet: Rethinking model scaling for convolutional neural networks}, - author={Tan, Mingxing and Le, Quoc V}, - journal={arXiv preprint arXiv:1905.11946}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/efficientnet/metafile.yml b/cv/detection/co-detr/pytorch/configs/efficientnet/metafile.yml deleted file mode 100644 index de40b953dfa0d84dae1e1ba553bfa4aebaed23e6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/efficientnet/metafile.yml +++ /dev/null @@ -1,19 +0,0 @@ -Models: - - Name: retinanet_effb3_fpn_crop896_8x4_1x_coco - In Collection: RetinaNet - Config: configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth - Paper: - URL: https://arxiv.org/abs/1905.11946v5 - Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' - README: configs/efficientnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/backbones/efficientnet.py#L159 - Version: v2.23.0 diff --git a/cv/detection/co-detr/pytorch/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py deleted file mode 100644 index c90bc1678627a6da163ad19efd6f7d613bb7fb17..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py +++ /dev/null @@ -1,94 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] - -cudnn_benchmark = True -norm_cfg = dict(type='BN', requires_grad=True) -checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa -model = dict( - backbone=dict( - _delete_=True, - type='EfficientNet', - arch='b3', - drop_path_rate=0.2, - out_indices=(3, 4, 5), - frozen_stages=0, - norm_cfg=dict( - type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01), - norm_eval=False, - init_cfg=dict( - type='Pretrained', prefix='backbone', checkpoint=checkpoint)), - neck=dict( - in_channels=[48, 136, 384], - start_level=0, - out_channels=256, - relu_before_extra_convs=True, - no_norm_on_lateral=True, - norm_cfg=norm_cfg), - bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), - # training and testing settings - train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) - -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -img_size = (896, 896) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=img_size, - ratio_range=(0.8, 1.2), - keep_ratio=True), - dict(type='RandomCrop', crop_size=img_size), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=img_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=img_size, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=img_size), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer_config = dict(grad_clip=None) -optimizer = dict( - type='SGD', - lr=0.04, - momentum=0.9, - weight_decay=0.0001, - paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[8, 11]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=12) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (4 samples per GPU) -auto_scale_lr = dict(base_batch_size=32) diff --git a/cv/detection/co-detr/pytorch/configs/empirical_attention/README.md b/cv/detection/co-detr/pytorch/configs/empirical_attention/README.md deleted file mode 100644 index fc2620a52d55ca1a01ff6acd3fb7fa3c0e4e376c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/empirical_attention/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# Empirical Attention - -> [An Empirical Study of Spatial Attention Mechanisms in Deep Networks](https://arxiv.org/abs/1904.05873) - - - -## Abstract - -Attention mechanisms have become a popular component in deep neural networks, yet there has been little examination of how different influencing factors and methods for computing attention from these factors affect performance. Toward a better general understanding of attention mechanisms, we present an empirical study that ablates various spatial attention elements within a generalized attention formulation, encompassing the dominant Transformer attention as well as the prevalent deformable convolution and dynamic convolution modules. Conducted on a variety of applications, the study yields significant findings about spatial attention in deep networks, some of which run counter to conventional understanding. For example, we find that the query and key content comparison in Transformer attention is negligible for self-attention, but vital for encoder-decoder attention. A proper combination of deformable convolution with key content only saliency achieves the best accuracy-efficiency tradeoff in self-attention. Our results suggest that there exists much room for improvement in the design of attention mechanisms. - -
- -
- -## Results and Models - -| Backbone | Attention Component | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :-----------------: | :-: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | 1111 | N | 1x | 8.0 | 13.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130_210344.log.json) | -| R-50 | 0010 | N | 1x | 4.2 | 18.4 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130_210125.log.json) | -| R-50 | 1111 | Y | 1x | 8.0 | 12.7 | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130_204442.log.json) | -| R-50 | 0010 | Y | 1x | 4.2 | 17.1 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130_210410.log.json) | - -## Citation - -```latex -@article{zhu2019empirical, - title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks}, - author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng}, - journal={arXiv preprint arXiv:1904.05873}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py b/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py deleted file mode 100644 index a544e3ab636aea0efe56007a0ea40608b6e71ad4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict( - type='GeneralizedAttention', - spatial_range=-1, - num_heads=8, - attention_type='0010', - kv_stride=2), - stages=(False, False, True, True), - position='after_conv2') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py deleted file mode 100644 index bbefd27aa02f427e27068b37ecf4d30fbd49b519..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - plugins=[ - dict( - cfg=dict( - type='GeneralizedAttention', - spatial_range=-1, - num_heads=8, - attention_type='0010', - kv_stride=2), - stages=(False, False, True, True), - position='after_conv2') - ], - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py b/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py deleted file mode 100644 index 13a4645bfdb50d5a2f04cee49ecc5f7647d10acf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict( - type='GeneralizedAttention', - spatial_range=-1, - num_heads=8, - attention_type='1111', - kv_stride=2), - stages=(False, False, True, True), - position='after_conv2') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py deleted file mode 100644 index b1f26c081da27811f856fe9973eb444c82604727..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - plugins=[ - dict( - cfg=dict( - type='GeneralizedAttention', - spatial_range=-1, - num_heads=8, - attention_type='1111', - kv_stride=2), - stages=(False, False, True, True), - position='after_conv2') - ], - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/cv/detection/co-detr/pytorch/configs/empirical_attention/metafile.yml b/cv/detection/co-detr/pytorch/configs/empirical_attention/metafile.yml deleted file mode 100644 index 923bcb20d926353d5aedae9e08f5a099c19313f0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/empirical_attention/metafile.yml +++ /dev/null @@ -1,103 +0,0 @@ -Collections: - - Name: Empirical Attention - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Deformable Convolution - - FPN - - RPN - - ResNet - - RoIAlign - - Spatial Attention - Paper: - URL: https://arxiv.org/pdf/1904.05873 - Title: 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' - README: configs/empirical_attention/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/generalized_attention.py#L10 - Version: v2.0.0 - -Models: - - Name: faster_rcnn_r50_fpn_attention_1111_1x_coco - In Collection: Empirical Attention - Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py - Metadata: - Training Memory (GB): 8.0 - inference time (ms/im): - - value: 72.46 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth - - - Name: faster_rcnn_r50_fpn_attention_0010_1x_coco - In Collection: Empirical Attention - Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py - Metadata: - Training Memory (GB): 4.2 - inference time (ms/im): - - value: 54.35 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth - - - Name: faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco - In Collection: Empirical Attention - Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py - Metadata: - Training Memory (GB): 8.0 - inference time (ms/im): - - value: 78.74 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth - - - Name: faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco - In Collection: Empirical Attention - Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py - Metadata: - Training Memory (GB): 4.2 - inference time (ms/im): - - value: 58.48 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth diff --git a/cv/detection/co-detr/pytorch/configs/fast_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/fast_rcnn/README.md deleted file mode 100644 index 767f76ca7d8e1dccb60c57582b1515cb4c76ab3b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fast_rcnn/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Fast R-CNN - -> [Fast R-CNN](https://arxiv.org/abs/1504.08083) - - - -## Abstract - -This paper proposes a Fast Region-based Convolutional Network method (Fast R-CNN) for object detection. Fast R-CNN builds on previous work to efficiently classify object proposals using deep convolutional networks. Compared to previous work, Fast R-CNN employs several innovations to improve training and testing speed while also increasing detection accuracy. Fast R-CNN trains the very deep VGG16 network 9x faster than R-CNN, is 213x faster at test-time, and achieves a higher mAP on PASCAL VOC 2012. Compared to SPPnet, Fast R-CNN trains VGG16 3x faster, tests 10x faster, and is more accurate. - -
- -
- -## Introduction - -Before training the Fast R-CNN, users should first train an [RPN](../rpn/README.md), and use the RPN to extract the region proposals. - -- Firstly, extract the region proposals of the val set by this command as below: - -```bash -./tools/dist_test.sh \ - configs/rpn_r50_fpn_1x_coco.py \ - checkpoints/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth \ - 8 \ - --out proposals/rpn_r50_fpn_1x_val2017.pkl -``` - -- Then, change the `ann_file` and `img_prefix` of `data.test` in the RPN config to train set as below: - -```python -data = dict( - test=dict( - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017/')) -``` - -- Extract the region proposals of the train set by this command as below: - -```bash -./tools/dist_test.sh \ - configs/rpn_r50_fpn_1x_coco.py \ - checkpoints/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth \ - 8 \ - --out proposals/rpn_r50_fpn_1x_train2017.pkl -``` - -- Modify the path of `proposal_file` in Fast R-CNN config as below: - -```python -data = dict( - train=dict( - proposal_file='proposals/rpn_r50_fpn_1x_train2017.pkl'), - val=dict( - proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl'), - test=dict( - proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl')) -``` - -Finally, users can start training the Fast R-CNN. - -## Results and Models - -## Citation - -```latex -@inproceedings{girshick2015fast, - title={Fast r-cnn}, - author={Girshick, Ross}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - year={2015} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 3ab8e98104de5af7b5b99e7cb03995736e9ac5a4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index 83852b24e7c8d23f812733f7b2fd24fc0d0f38f8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './fast_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py deleted file mode 100644 index c22088579ea4a5b2d8e32a8349da63d2dc8b5f7f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './fast_rcnn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index f1b29ef30c7662d821921851c994d7ea78aeca34..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,48 +0,0 @@ -_base_ = './fast_rcnn_r50_fpn_1x_coco.py' - -model = dict( - backbone=dict( - norm_cfg=dict(type='BN', requires_grad=False), - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) - -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=2000), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=None), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='ToTensor', keys=['proposals']), - dict( - type='ToDataContainer', - fields=[dict(key='proposals', stack=False)]), - dict(type='Collect', keys=['img', 'proposals']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index d2f080e9d3b1ddade22341aa38c6258eaee78a50..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,52 +0,0 @@ -_base_ = [ - '../_base_/models/fast_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=2000), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=None), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='ToTensor', keys=['proposals']), - dict( - type='ToDataContainer', - fields=[dict(key='proposals', stack=False)]), - dict(type='Collect', keys=['img', 'proposals']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl', - pipeline=train_pipeline), - val=dict( - proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', - pipeline=test_pipeline), - test=dict( - proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', - pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py deleted file mode 100644 index 228e85645c1c7d1556810d209679d49abcd86f8f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './fast_rcnn_r50_fpn_1x_coco.py' - -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/faster_rcnn/README.md deleted file mode 100644 index ec53440e38fd0dfa06043b3046e5bdcb2420c766..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Faster R-CNN - -> [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497) - - - -## Abstract - -State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-C4 | caffe | 1x | - | - | 35.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152-3f885b85.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152.log.json) | -| R-50-DC5 | caffe | 1x | - | - | 37.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909-531f0f43.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909.log.json) | -| R-50-FPN | caffe | 1x | 3.8 | | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_20200504_180032.log.json) | -| R-50-FPN | pytorch | 1x | 4.0 | 21.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | -| R-50-FPN (FP16) | pytorch | 1x | 3.4 | 28.8 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) | -| R-50-FPN | pytorch | 2x | - | - | 38.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_20200504_210434.log.json) | -| R-101-FPN | caffe | 1x | 5.7 | | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.398_20200504_180057-b269e9dd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_20200504_180057.log.json) | -| R-101-FPN | pytorch | 1x | 6.0 | 15.6 | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130_204655.log.json) | -| R-101-FPN | pytorch | 2x | - | - | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_20200504_210455.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 7.2 | 13.8 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203-cff10310.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203_000520.log.json) | -| X-101-32x4d-FPN | pytorch | 2x | - | - | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.412_20200506_041400-64a12c0b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_20200506_041400.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 10.3 | 9.4 | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204_134340.log.json) | -| X-101-64x4d-FPN | pytorch | 2x | - | - | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033-5961fa95.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033.log.json) | - -## Different regression loss - -We trained with R-50-FPN pytorch style backbone for 1x schedule. - -| Backbone | Loss type | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :------------: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | L1Loss | 4.0 | 21.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | -| R-50-FPN | IoULoss | | | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954-938e81f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954.log.json) | -| R-50-FPN | GIoULoss | | | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco-0eada910.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco_20200505_161120.log.json) | -| R-50-FPN | BoundedIoULoss | | | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco-98ad993b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco_20200505_160738.log.json) | - -## Pre-trained Models - -We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :----------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| [R-50-C4](./faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py) | caffe | 1x | - | | 35.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527-db276fed.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527.log.json) | -| [R-50-DC5](./faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py) | caffe | 1x | - | | 37.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851-b33d21b9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851.log.json) | -| [R-50-DC5](./faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py) | caffe | 3x | - | | 38.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107-34a53b2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107.log.json) | -| [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py) | caffe | 2x | 3.7 | | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_bbox_mAP-0.397_20200504_231813-10b2de58.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_20200504_231813.log.json) | -| [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | caffe | 3x | 3.7 | | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054-1f77628b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054.log.json) | -| [R-50-FPN](./faster_rcnn_r50_fpn_mstrain_3x_coco.py) | pytorch | 3x | 3.9 | | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822-e10bd31c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822.log.json) | -| [R-101-FPN](./faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py) | caffe | 3x | 5.6 | | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742-a7ae426d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742.log.json) | -| [R-101-FPN](./faster_rcnn_r101_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.8 | | 41.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822-4d4d2ca8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822.log.json) | -| [X-101-32x4d-FPN](./faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 7.0 | | 42.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151-16b9b260.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151.log.json) | -| [X-101-32x8d-FPN](./faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 10.1 | | 42.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954-002e082a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954.log.json) | -| [X-101-64x4d-FPN](./faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 10.0 | | 43.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528-26c63de6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528.log.json) | - -We further finetune some pre-trained models on the COCO subsets, which only contain only a few of the 80 categories. - -| Backbone | Style | Class name | Pre-traind model | Mem (GB) | box AP | Config | Download | -| ----------------------------------------------------------------------------- | ----- | ------------------ | ------------------------------------------------------------------- | -------- | ------ | --------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py) | caffe | person | [R-50-FPN-Caffe-3x](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | 3.7 | 55.8 | [config](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929.log.json) | -| [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py) | caffe | person-bicycle-car | [R-50-FPN-Caffe-3x](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | 3.7 | 44.1 | [config](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car_20201216_173117-6eda6d92.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car_20201216_173117.log.json) | - -## Torchvision New Receipe (TNR) - -Torchvision released its high-precision ResNet models. The training details can be found on the [Pytorch website](https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/). Here, we have done grid searches on learning rate and weight decay and found the optimal hyper-parameter on the detection task. - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| [R-50-TNR](./faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py) | pytorch | 1x | - | | 40.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147-efedfda4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147.log.json) | - -## Citation - -```latex -@article{Ren_2017, - title={Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks}, - journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, - publisher={Institute of Electrical and Electronics Engineers (IEEE)}, - author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian}, - year={2017}, - month={Jun}, -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index c6f078c771d7b7188a2d66ae73b56206c3e84a95..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py deleted file mode 100644 index 6a13fe9ff692d18927f9ada0604e675b2cd0bea9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,49 +0,0 @@ -_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' - -model = dict( - backbone=dict( - depth=101, - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) - -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index 1de53a6cdfcd64541c2ddf0f4f699b7f8d003029..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py deleted file mode 100644 index 0d41599430ae5ca371969076c6d53706ae92e975..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py deleted file mode 100644 index 0b498bb687c6d3ac941061584aeba3653df97fe1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py deleted file mode 100644 index b071962ef44b4f0411accdcf1caf6eb804ab3959..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_caffe_c4.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py deleted file mode 100644 index f4d83e6b00e3cd6f2e2abcf2a462f39690bd8ee9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py +++ /dev/null @@ -1,38 +0,0 @@ -_base_ = './faster_rcnn_r50_caffe_c4_1x_coco.py' -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py deleted file mode 100644 index ee2010c64a4c24e18b81c0be7e002ea474c57a44..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_caffe_dc5.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py deleted file mode 100644 index 14eaef2dffea606027001b69d12d11cb46693e1c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_caffe_dc5.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py deleted file mode 100644 index 403747f127e0f7a301771e53e75bf0e83a1736c9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py' -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 56c01bdcf55cbbb18b7519a46c9b8ce18797011a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,41 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py deleted file mode 100644 index b5aea6a7275c651b65654893957a4e3312ceb293..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = 'faster_rcnn_r50_caffe_fpn_1x_coco.py' - -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[60000, 80000]) - -# Runner type -runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) - -checkpoint_config = dict(interval=10000) -evaluation = dict(interval=10000, metric='bbox') diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py deleted file mode 100644 index 4f1f376c33a0ad884a8930833c6205339966f82b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' -model = dict(roi_head=dict(bbox_head=dict(num_classes=3))) -classes = ('person', 'bicycle', 'car') -data = dict( - train=dict(classes=classes), - val=dict(classes=classes), - test=dict(classes=classes)) - -load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py deleted file mode 100644 index b5dfb4fe447472b2fabb7d193778dbf2fbf2ce25..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' -model = dict(roi_head=dict(bbox_head=dict(num_classes=1))) -classes = ('person', ) -data = dict( - train=dict(classes=classes), - val=dict(classes=classes), - test=dict(classes=classes)) - -load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py deleted file mode 100644 index f807a19abce803dd99f82c5d1c4cec502d16253f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py deleted file mode 100644 index df58973fc009949d37e8a87e4d3ac39e2c313c65..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py deleted file mode 100644 index 9eeaaceaf5e7533105f83b736ca7ce454159aedb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,47 +0,0 @@ -_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) - -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py deleted file mode 100644 index 74dca24f26422967501e7ba31c3f39ca324e031c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' - -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[60000, 80000]) - -# Runner type -runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) - -checkpoint_config = dict(interval=10000) -evaluation = dict(interval=10000, metric='bbox') diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 009bd93d06b3284c7b31f33f82d636f774e86b74..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py deleted file mode 100644 index e77a7fa8d6b8c1ad7fe293bc932d621464287e0c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py deleted file mode 100644 index 648081f19ca7d3ca9a7362a4a41e514d753ce4e8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - bbox_head=dict( - reg_decoded_bbox=True, - loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0)))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_ciou_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_ciou_1x_coco.py deleted file mode 100644 index 886d5668c3df24a24baa112a3b0aefb15face892..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_ciou_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - bbox_head=dict( - reg_decoded_bbox=True, - loss_bbox=dict(type='CIoULoss', loss_weight=12.0)))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index acd4040c979b2b83e456e5b2f58b9f4514af972a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py deleted file mode 100644 index 5556c4977e221182b013b68fef4b73d1b0605bf3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - bbox_head=dict( - reg_decoded_bbox=True, - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py deleted file mode 100644 index ddf663e4f0e1525490a493674b32b3dc4c781bb2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - bbox_head=dict( - reg_decoded_bbox=True, - loss_bbox=dict(type='IoULoss', loss_weight=10.0)))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py deleted file mode 100644 index faf8f92437d839eda456187a29827907a5a9532b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = [ - '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py deleted file mode 100644 index f897e7c55c8b8f0ef7a5db92f29ef1c2415965db..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict(train_cfg=dict(rcnn=dict(sampler=dict(type='OHEMSampler')))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py deleted file mode 100644 index 759ae3a7acec07daa75213835f1bc41d5c6de4a5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - test_cfg=dict( - rcnn=dict( - score_thr=0.05, - nms=dict(type='soft_nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py deleted file mode 100644 index ecbfb928d8a1c9c611b57752f3772c5a15e03436..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth' -model = dict( - backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint))) - -# `lr` and `weight_decay` have been searched to be optimal. -optimizer = dict( - _delete_=True, - type='AdamW', - lr=0.0001, - weight_decay=0.1, - paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 3808c9f2870d632feae36e521d0537141b7271d5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py deleted file mode 100644 index e93f5d8173dd4b22c1022dadf5258e455d4b3fd5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py deleted file mode 100644 index f55985d61cec9aff95c78c8e287baad6ba1300d9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = [ - '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' -] -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py deleted file mode 100644 index a5d5aebbdebb63b89dcac9e8bf4a4e88f5d980d3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = [ - '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' -] -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=8, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - style='pytorch', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) - -# ResNeXt-101-32x8d model trained with Caffe2 at FB, -# so the mean and std need to be changed. -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], - std=[57.375, 57.120, 58.395], - to_rgb=False) - -# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], -# multiscale_mode='range' -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 8bf2b65a6a97173e2cb563c8f79c501936a2ee09..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py deleted file mode 100644 index 7ea9b2da14da6b86f3497bfc3c56862a5c05730b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py deleted file mode 100644 index 80397f4b18acb094f8f6e132ea21050c75b2de48..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = [ - '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' -] -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/faster_rcnn/metafile.yml b/cv/detection/co-detr/pytorch/configs/faster_rcnn/metafile.yml deleted file mode 100644 index 3011b15798017e074769ec5690705a4ae464427e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/faster_rcnn/metafile.yml +++ /dev/null @@ -1,452 +0,0 @@ -Collections: - - Name: Faster R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - RPN - - ResNet - - RoIPool - Paper: - URL: https://arxiv.org/abs/1506.01497 - Title: "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" - README: configs/faster_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/faster_rcnn.py#L6 - Version: v2.0.0 - -Models: - - Name: faster_rcnn_r50_caffe_c4_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 35.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152-3f885b85.pth - - - Name: faster_rcnn_r50_caffe_c4_mstrain_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 35.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527-db276fed.pth - - - Name: faster_rcnn_r50_caffe_dc5_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909-531f0f43.pth - - - Name: faster_rcnn_r50_caffe_fpn_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.8 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth - - - Name: faster_rcnn_r50_fpn_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.0 - inference time (ms/im): - - value: 46.73 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth - - - Name: faster_rcnn_r50_fpn_fp16_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py - Metadata: - Training Memory (GB): 3.4 - Training Techniques: - - SGD with Momentum - - Weight Decay - - Mixed Precision Training - inference time (ms/im): - - value: 34.72 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP16 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth - - - Name: faster_rcnn_r50_fpn_2x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py - Metadata: - Training Memory (GB): 4.0 - inference time (ms/im): - - value: 46.73 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth - - - Name: faster_rcnn_r101_caffe_fpn_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.7 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.398_20200504_180057-b269e9dd.pth - - - Name: faster_rcnn_r101_fpn_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.0 - inference time (ms/im): - - value: 64.1 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth - - - Name: faster_rcnn_r101_fpn_2x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py - Metadata: - Training Memory (GB): 6.0 - inference time (ms/im): - - value: 64.1 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth - - - Name: faster_rcnn_x101_32x4d_fpn_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.2 - inference time (ms/im): - - value: 72.46 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203-cff10310.pth - - - Name: faster_rcnn_x101_32x4d_fpn_2x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py - Metadata: - Training Memory (GB): 7.2 - inference time (ms/im): - - value: 72.46 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.412_20200506_041400-64a12c0b.pth - - - Name: faster_rcnn_x101_64x4d_fpn_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 10.3 - inference time (ms/im): - - value: 106.38 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth - - - Name: faster_rcnn_x101_64x4d_fpn_2x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py - Metadata: - Training Memory (GB): 10.3 - inference time (ms/im): - - value: 106.38 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033-5961fa95.pth - - - Name: faster_rcnn_r50_fpn_iou_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.9 - # re-release - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954-938e81f0.pth - - - Name: faster_rcnn_r50_fpn_giou_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco-0eada910.pth - - - Name: faster_rcnn_r50_fpn_bounded_iou_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco-98ad993b.pth - - - Name: faster_rcnn_r50_caffe_dc5_mstrain_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851-b33d21b9.pth - - - Name: faster_rcnn_r50_caffe_dc5_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107-34a53b2c.pth - - - Name: faster_rcnn_r50_caffe_fpn_mstrain_2x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py - Metadata: - Training Memory (GB): 4.3 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_bbox_mAP-0.397_20200504_231813-10b2de58.pth - - - Name: faster_rcnn_r50_caffe_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 3.7 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054-1f77628b.pth - - - Name: faster_rcnn_r50_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 3.9 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822-e10bd31c.pth - - - Name: faster_rcnn_r101_caffe_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 5.6 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742-a7ae426d.pth - - - Name: faster_rcnn_r101_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 5.8 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822-4d4d2ca8.pth - - - Name: faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 7.0 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151-16b9b260.pth - - - Name: faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 10.1 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954-002e082a.pth - - - Name: faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 10.0 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528-26c63de6.pth - - - Name: faster_rcnn_r50_fpn_tnr-pretrain_1x_coco - In Collection: Faster R-CNN - Config: configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py - Metadata: - Training Memory (GB): 4.0 - inference time (ms/im): - - value: 46.73 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147-efedfda4.pth diff --git a/cv/detection/co-detr/pytorch/configs/fcos/README.md b/cv/detection/co-detr/pytorch/configs/fcos/README.md deleted file mode 100644 index 76be365e34e27bf6af9d4cee03bd1386ddcce4ef..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# FCOS - -> [FCOS: Fully Convolutional One-Stage Object Detection](https://arxiv.org/abs/1904.01355) - - - -## Abstract - -We propose a fully convolutional one-stage object detector (FCOS) to solve object detection in a per-pixel prediction fashion, analogue to semantic segmentation. Almost all state-of-the-art object detectors such as RetinaNet, SSD, YOLOv3, and Faster R-CNN rely on pre-defined anchor boxes. In contrast, our proposed detector FCOS is anchor box free, as well as proposal free. By eliminating the predefined set of anchor boxes, FCOS completely avoids the complicated computation related to anchor boxes such as calculating overlapping during training. More importantly, we also avoid all hyper-parameters related to anchor boxes, which are often very sensitive to the final detection performance. With the only post-processing non-maximum suppression (NMS), FCOS with ResNeXt-64x4d-101 achieves 44.7% in AP with single-model and single-scale testing, surpassing previous one-stage detectors with the advantage of being much simpler. For the first time, we demonstrate a much simpler and flexible detection framework achieving improved detection accuracy. We hope that the proposed FCOS framework can serve as a simple and strong alternative for many other instance-level tasks. - -
- -
- -## Results and Models - -| Backbone | Style | GN | MS train | Tricks | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :---: | :-: | :------: | :----: | :-: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | caffe | Y | N | N | N | 1x | 3.6 | 22.7 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/20201227_180009.log.json) | -| R-50 | caffe | Y | N | Y | N | 1x | 3.7 | - | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/20210105_135818.log.json) | -| R-50 | caffe | Y | N | Y | Y | 1x | 3.8 | - | 42.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/20210105_224556.log.json) | -| R-101 | caffe | Y | N | N | N | 1x | 5.5 | 17.3 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/20210103_155046.log.json) | - -| Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :-----: | :-: | :------: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | caffe | Y | Y | 2x | 2.6 | 22.9 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20201227_161900.log.json) | -| R-101 | caffe | Y | Y | 2x | 5.5 | 17.3 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/20210103_155046.log.json) | -| X-101 | pytorch | Y | Y | 2x | 10.0 | 9.7 | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/20210114_133041.log.json) | - -**Notes:** - -- The X-101 backbone is X-101-64x4d. -- Tricks means setting `norm_on_bbox`, `centerness_on_reg`, `center_sampling` as `True`. -- DCN means using `DCNv2` in both backbone and head. - -## Citation - -```latex -@article{tian2019fcos, - title={FCOS: Fully Convolutional One-Stage Object Detection}, - author={Tian, Zhi and Shen, Chunhua and Chen, Hao and He, Tong}, - journal={arXiv preprint arXiv:1904.01355}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py deleted file mode 100644 index 2699bdb979bdf2dce3f4f26946304aa1ed2f4751..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py +++ /dev/null @@ -1,54 +0,0 @@ -_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' - -model = dict( - backbone=dict( - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - bbox_head=dict( - norm_on_bbox=True, - centerness_on_reg=True, - dcn_on_last_conv=False, - center_sampling=True, - conv_bias=True, - loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), - # training and testing settings - test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) - -# dataset settings -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -optimizer_config = dict(_delete_=True, grad_clip=None) - -lr_config = dict(warmup='linear') diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py deleted file mode 100644 index cf93c91e7128c277d1263b680beb108cfadbbc49..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py +++ /dev/null @@ -1,56 +0,0 @@ -_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' - -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - bbox_head=dict( - norm_on_bbox=True, - centerness_on_reg=True, - dcn_on_last_conv=True, - center_sampling=True, - conv_bias=True, - loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), - # training and testing settings - test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6))) - -# dataset settings -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -optimizer_config = dict(_delete_=True, grad_clip=None) - -lr_config = dict(warmup='linear') diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py deleted file mode 100644 index 9f502e7b465f789a90100d96e881c60c84d9bf91..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' -model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5)) diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py deleted file mode 100644 index 45bea48dc38881d0a0f41ef820723a1ac854c854..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py deleted file mode 100644 index f4d36f1eeed8fe152c2c4cad702d3736bc56172c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py +++ /dev/null @@ -1,47 +0,0 @@ -_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron/resnet101_caffe'))) -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py deleted file mode 100644 index 955787bab9413f93908cc4542da89f1bdd31c492..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py +++ /dev/null @@ -1,106 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - type='FCOS', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron/resnet50_caffe')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', # use P5 - num_outs=5, - relu_before_extra_convs=True), - bbox_head=dict( - type='FCOSHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='constant', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py deleted file mode 100644 index 2816b16f64dbcbfecd779650aaae0ca6cee0d810..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -# TODO: Remove this config after benchmarking all related configs -_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' - -data = dict(samples_per_gpu=4, workers_per_gpu=4) diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py deleted file mode 100644 index 497d03f6f702ecb47cccbe0089089b5a002ebcca..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py b/cv/detection/co-detr/pytorch/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py deleted file mode 100644 index e70e4651230cbf58129b139d30de68c35e9c0e2d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py +++ /dev/null @@ -1,60 +0,0 @@ -_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/fcos/metafile.yml b/cv/detection/co-detr/pytorch/configs/fcos/metafile.yml deleted file mode 100644 index ae922eb9faf9b9705f8ac66083b466a540f1ff40..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fcos/metafile.yml +++ /dev/null @@ -1,146 +0,0 @@ -Collections: - - Name: FCOS - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - Group Normalization - - ResNet - Paper: - URL: https://arxiv.org/abs/1904.01355 - Title: 'FCOS: Fully Convolutional One-Stage Object Detection' - README: configs/fcos/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fcos.py#L6 - Version: v2.0.0 - -Models: - - Name: fcos_r50_caffe_fpn_gn-head_1x_coco - In Collection: FCOS - Config: configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py - Metadata: - Training Memory (GB): 3.6 - inference time (ms/im): - - value: 44.05 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco/fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth - - - Name: fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco - In Collection: FCOS - Config: configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py - Metadata: - Training Memory (GB): 3.7 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco-0a0d75a8.pth - - - Name: fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco - In Collection: FCOS - Config: configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py - Metadata: - Training Memory (GB): 3.8 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco-ae4d8b3d.pth - - - Name: fcos_r101_caffe_fpn_gn-head_1x_coco - In Collection: FCOS - Config: configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py - Metadata: - Training Memory (GB): 5.5 - inference time (ms/im): - - value: 57.8 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco/fcos_r101_caffe_fpn_gn-head_1x_coco-0e37b982.pth - - - Name: fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco - In Collection: FCOS - Config: configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py - Metadata: - Training Memory (GB): 2.6 - inference time (ms/im): - - value: 43.67 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco-d92ceeea.pth - - - Name: fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco - In Collection: FCOS - Config: configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py - Metadata: - Training Memory (GB): 5.5 - inference time (ms/im): - - value: 57.8 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco-511424d6.pth - - - Name: fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco - In Collection: FCOS - Config: configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py - Metadata: - Training Memory (GB): 10.0 - inference time (ms/im): - - value: 103.09 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco-ede514a8.pth diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/README.md b/cv/detection/co-detr/pytorch/configs/foveabox/README.md deleted file mode 100644 index 7fcd094da6b7b52266fd97de5b989f3e006c04bf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# FoveaBox - -> [FoveaBox: Beyond Anchor-based Object Detector](https://arxiv.org/abs/1904.03797) - - - -## Abstract - -We present FoveaBox, an accurate, flexible, and completely anchor-free framework for object detection. While almost all state-of-the-art object detectors utilize predefined anchors to enumerate possible locations, scales and aspect ratios for the search of the objects, their performance and generalization ability are also limited to the design of anchors. Instead, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. The scales of target boxes are naturally associated with feature pyramid representations. In FoveaBox, an instance is assigned to adjacent feature levels to make the model more accurate.We demonstrate its effectiveness on standard benchmarks and report extensive experimental analysis. Without bells and whistles, FoveaBox achieves state-of-the-art single model performance on the standard COCO and Pascal VOC object detection benchmark. More importantly, FoveaBox avoids all computation and hyper-parameters related to anchor boxes, which are often sensitive to the final detection performance. We believe the simple and effective approach will serve as a solid baseline and help ease future research for object detection. - -
- -
- -## Introduction - -FoveaBox is an accurate, flexible and completely anchor-free object detection system for object detection framework, as presented in our paper [https://arxiv.org/abs/1904.03797](https://arxiv.org/abs/1904.03797): -Different from previous anchor-based methods, FoveaBox directly learns the object existing possibility and the bounding box coordinates without anchor reference. This is achieved by: (a) predicting category-sensitive semantic maps for the object existing possibility, and (b) producing category-agnostic bounding box for each position that potentially contains an object. - -## Results and Models - -### Results on R50/101-FPN - -| Backbone | Style | align | ms-train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :-----: | :---: | :------: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | N | N | 1x | 5.6 | 24.1 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219-ee4d5303.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219_223025.log.json) | -| R-50 | pytorch | N | N | 2x | 5.6 | - | 37.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203-2df792b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203_112043.log.json) | -| R-50 | pytorch | Y | N | 2x | 8.1 | 19.4 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203_134252.log.json) | -| R-50 | pytorch | Y | Y | 2x | 8.1 | 18.3 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205-85ce26cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205_112557.log.json) | -| R-101 | pytorch | N | N | 1x | 9.2 | 17.4 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219-05e38f1c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219_011740.log.json) | -| R-101 | pytorch | N | N | 2x | 11.7 | - | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208-02320ea4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208_202059.log.json) | -| R-101 | pytorch | Y | N | 2x | 11.7 | 14.7 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208-c39a027a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208_203337.log.json) | -| R-101 | pytorch | Y | Y | 2x | 11.7 | 14.7 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208-649c5eb6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208_202124.log.json) | - -\[1\] *1x and 2x mean the model is trained for 12 and 24 epochs, respectively.* \ -\[2\] *Align means utilizing deformable convolution to align the cls branch.* \ -\[3\] *All results are obtained with a single model and without any test time data augmentation.*\ -\[4\] *We use 4 GPUs for training.* - -Any pull requests or issues are welcome. - -## Citation - -Please consider citing our paper in your publications if the project helps your research. BibTeX reference is as follows. - -```latex -@article{kong2019foveabox, - title={FoveaBox: Beyond Anchor-based Object Detector}, - author={Kong, Tao and Sun, Fuchun and Liu, Huaping and Jiang, Yuning and Shi, Jianbo}, - journal={arXiv preprint arXiv:1904.03797}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py deleted file mode 100644 index c5d178492d1031f03915e5a8e273f2b4b12a7e97..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - bbox_head=dict( - with_deform=True, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py deleted file mode 100644 index cc5affefe85150d8e8d372920221e00c27646375..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py +++ /dev/null @@ -1,29 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - bbox_head=dict( - with_deform=True, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py deleted file mode 100644 index e7265bcdbef2a7ab5e8ba6b3fe13f02cb718b40a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict( - bbox_head=dict( - with_deform=True, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py deleted file mode 100644 index 8fc39beaac540a8d3e00bf968f1af08450f9d4cc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict( - bbox_head=dict( - with_deform=True, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py deleted file mode 100644 index 9201af11b88f4c161730f43e957c4d9c53828262..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py deleted file mode 100644 index 1ef5243f93f5df47d9f1dab318655ea757e6c676..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py deleted file mode 100644 index 7e986ebcd59f0fe59c760739d291a693f9b7a02e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py +++ /dev/null @@ -1,52 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - type='FOVEA', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - num_outs=5, - add_extra_convs='on_input'), - bbox_head=dict( - type='FoveaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - base_edge_list=[16, 32, 64, 128, 256], - scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), - sigma=0.4, - with_deform=False, - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=1.50, - alpha=0.4, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), - # training and testing settings - train_cfg=dict(), - test_cfg=dict( - nms_pre=1000, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) -data = dict(samples_per_gpu=4, workers_per_gpu=4) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py deleted file mode 100644 index 68ce4d250ac673a274d1458963eb02614e4f5f98..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './fovea_r50_fpn_4x4_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/foveabox/metafile.yml b/cv/detection/co-detr/pytorch/configs/foveabox/metafile.yml deleted file mode 100644 index fe9a2834643d215a9ff7e6a200d06a362a8a1a4b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/foveabox/metafile.yml +++ /dev/null @@ -1,172 +0,0 @@ -Collections: - - Name: FoveaBox - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 4x V100 GPUs - Architecture: - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1904.03797 - Title: 'FoveaBox: Beyond Anchor-based Object Detector' - README: configs/foveabox/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/fovea.py#L6 - Version: v2.0.0 - -Models: - - Name: fovea_r50_fpn_4x4_1x_coco - In Collection: FoveaBox - Config: configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py - Metadata: - Training Memory (GB): 5.6 - inference time (ms/im): - - value: 41.49 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_1x_coco/fovea_r50_fpn_4x4_1x_coco_20200219-ee4d5303.pth - - - Name: fovea_r50_fpn_4x4_2x_coco - In Collection: FoveaBox - Config: configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py - Metadata: - Training Memory (GB): 5.6 - inference time (ms/im): - - value: 41.49 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r50_fpn_4x4_2x_coco/fovea_r50_fpn_4x4_2x_coco_20200203-2df792b1.pth - - - Name: fovea_align_r50_fpn_gn-head_4x4_2x_coco - In Collection: FoveaBox - Config: configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py - Metadata: - Training Memory (GB): 8.1 - inference time (ms/im): - - value: 51.55 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco/fovea_align_r50_fpn_gn-head_4x4_2x_coco_20200203-8987880d.pth - - - Name: fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco - In Collection: FoveaBox - Config: configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py - Metadata: - Training Memory (GB): 8.1 - inference time (ms/im): - - value: 54.64 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200205-85ce26cb.pth - - - Name: fovea_r101_fpn_4x4_1x_coco - In Collection: FoveaBox - Config: configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py - Metadata: - Training Memory (GB): 9.2 - inference time (ms/im): - - value: 57.47 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_1x_coco/fovea_r101_fpn_4x4_1x_coco_20200219-05e38f1c.pth - - - Name: fovea_r101_fpn_4x4_2x_coco - In Collection: FoveaBox - Config: configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py - Metadata: - Training Memory (GB): 11.7 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_r101_fpn_4x4_2x_coco/fovea_r101_fpn_4x4_2x_coco_20200208-02320ea4.pth - - - Name: fovea_align_r101_fpn_gn-head_4x4_2x_coco - In Collection: FoveaBox - Config: configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py - Metadata: - Training Memory (GB): 11.7 - inference time (ms/im): - - value: 68.03 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco/fovea_align_r101_fpn_gn-head_4x4_2x_coco_20200208-c39a027a.pth - - - Name: fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco - In Collection: FoveaBox - Config: configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py - Metadata: - Training Memory (GB): 11.7 - inference time (ms/im): - - value: 68.03 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco_20200208-649c5eb6.pth diff --git a/cv/detection/co-detr/pytorch/configs/fpg/README.md b/cv/detection/co-detr/pytorch/configs/fpg/README.md deleted file mode 100644 index 0ffd2e72af110adbb61946f6556cb7523a027ab2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# FPG - -> [Feature Pyramid Grids](https://arxiv.org/abs/2004.03580) - - - -## Abstract - -Feature pyramid networks have been widely adopted in the object detection literature to improve feature representations for better handling of variations in scale. In this paper, we present Feature Pyramid Grids (FPG), a deep multi-pathway feature pyramid, that represents the feature scale-space as a regular grid of parallel bottom-up pathways which are fused by multi-directional lateral connections. FPG can improve single-pathway feature pyramid networks by significantly increasing its performance at similar computation cost, highlighting importance of deep pyramid representations. In addition to its general and uniform structure, over complicated structures that have been found with neural architecture search, it also compares favorably against such approaches without relying on search. We hope that FPG with its uniform and effective nature can serve as a strong component for future work in object recognition. - -
- -
- -## Results and Models - -We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. -All backbones are Resnet-50 in pytorch style. - -| Method | Neck | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :----------: | :--------: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Faster R-CNN | FPG | 50e | 20.0 | - | 42.3 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856-74109f42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856.log.json) | -| Faster R-CNN | FPG-chn128 | 50e | 11.9 | - | 41.2 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857-9376aa9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857.log.json) | -| Faster R-CNN | FPN | 50e | 20.0 | - | 38.9 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpn_crop640_50e_coco/faster_rcnn_r50_fpn_crop640_50e_coco_20220311_011857-be7c9f42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpn_crop640_50e_coco/faster_rcnn_r50_fpn_crop640_50e_coco_20220311_011857.log.json) | -| Mask R-CNN | FPG | 50e | 23.2 | - | 43.0 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857.log.json) | -| Mask R-CNN | FPG-chn128 | 50e | 15.3 | - | 41.7 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859-043c9b4e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859.log.json) | -| Mask R-CNN | FPN | 50e | 23.2 | - | 39.6 | 35.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpn_crop640_50e_coco/mask_rcnn_r50_fpn_crop640_50e_coco_20220311_011855-a756664a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpn_crop640_50e_coco/mask_rcnn_r50_fpn_crop640_50e_coco_20220311_011855.log.json) | -| RetinaNet | FPG | 50e | 20.8 | - | 40.5 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809-b0bcf5f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809.log.json) | -| RetinaNet | FPG-chn128 | 50e | 19.9 | - | 39.9 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829-ee99a686.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829.log.json) | - -**Note**: Chn128 means to decrease the number of channels of features and convs from 256 (default) to 128 in -Neck and BBox Head, which can greatly decrease memory consumption without sacrificing much precision. - -## Citation - -```latex -@article{chen2020feature, - title={Feature pyramid grids}, - author={Chen, Kai and Cao, Yuhang and Loy, Chen Change and Lin, Dahua and Feichtenhofer, Christoph}, - journal={arXiv preprint arXiv:2004.03580}, - year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py deleted file mode 100644 index 4535034efa3f4c4a09064a753a2bbde68b6cd2f2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = 'faster_rcnn_r50_fpg_crop640_50e_coco.py' - -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - neck=dict(out_channels=128, inter_channels=128), - rpn_head=dict(in_channels=128), - roi_head=dict( - bbox_roi_extractor=dict(out_channels=128), - bbox_head=dict(in_channels=128))) diff --git a/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py deleted file mode 100644 index 3ab2a2c5ef04fc38a686065167df62eb3d67266d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py +++ /dev/null @@ -1,48 +0,0 @@ -_base_ = 'faster_rcnn_r50_fpn_crop640_50e_coco.py' - -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - neck=dict( - type='FPG', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - inter_channels=256, - num_outs=5, - stack_times=9, - paths=['bu'] * 9, - same_down_trans=None, - same_up_trans=dict( - type='conv', - kernel_size=3, - stride=2, - padding=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - across_lateral_trans=dict( - type='conv', - kernel_size=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - across_down_trans=dict( - type='interpolation_conv', - mode='nearest', - kernel_size=3, - norm_cfg=norm_cfg, - order=('act', 'conv', 'norm'), - inplace=False), - across_up_trans=None, - across_skip_trans=dict( - type='conv', - kernel_size=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - output_trans=dict( - type='last_conv', - kernel_size=3, - order=('act', 'conv', 'norm'), - inplace=False), - norm_cfg=norm_cfg, - skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) diff --git a/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py deleted file mode 100644 index e4ec940a0dd6a4441fa0256cde045722aae7707c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py +++ /dev/null @@ -1,73 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - backbone=dict(norm_cfg=norm_cfg, norm_eval=False), - neck=dict(norm_cfg=norm_cfg), - roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=(640, 640), - ratio_range=(0.8, 1.2), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(640, 640)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=(640, 640)), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(640, 640), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=64), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -optimizer = dict( - type='SGD', - lr=0.08, - momentum=0.9, - weight_decay=0.0001, - paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[30, 40]) -# runtime settings -runner = dict(max_epochs=50) -evaluation = dict(interval=2) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py deleted file mode 100644 index baa4a5affc9b3ead0080d993b14f0d00392c2de5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = 'mask_rcnn_r50_fpg_crop640_50e_coco.py' - -model = dict( - neck=dict(out_channels=128, inter_channels=128), - rpn_head=dict(in_channels=128), - roi_head=dict( - bbox_roi_extractor=dict(out_channels=128), - bbox_head=dict(in_channels=128), - mask_roi_extractor=dict(out_channels=128), - mask_head=dict(in_channels=128))) diff --git a/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py deleted file mode 100644 index 3c9ea27617c85c54309ac454fff253a6d0462735..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py +++ /dev/null @@ -1,48 +0,0 @@ -_base_ = 'mask_rcnn_r50_fpn_crop640_50e_coco.py' - -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - neck=dict( - type='FPG', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - inter_channels=256, - num_outs=5, - stack_times=9, - paths=['bu'] * 9, - same_down_trans=None, - same_up_trans=dict( - type='conv', - kernel_size=3, - stride=2, - padding=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - across_lateral_trans=dict( - type='conv', - kernel_size=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - across_down_trans=dict( - type='interpolation_conv', - mode='nearest', - kernel_size=3, - norm_cfg=norm_cfg, - order=('act', 'conv', 'norm'), - inplace=False), - across_up_trans=None, - across_skip_trans=dict( - type='conv', - kernel_size=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - output_trans=dict( - type='last_conv', - kernel_size=3, - order=('act', 'conv', 'norm'), - inplace=False), - norm_cfg=norm_cfg, - skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) diff --git a/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py deleted file mode 100644 index c6bcc242bf54c56cf69c9c00c6e2b825a3c9e456..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py +++ /dev/null @@ -1,79 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - backbone=dict(norm_cfg=norm_cfg, norm_eval=False), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - norm_cfg=norm_cfg, - num_outs=5), - roi_head=dict( - bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=(640, 640), - ratio_range=(0.8, 1.2), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(640, 640)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=(640, 640)), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(640, 640), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=64), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -optimizer = dict( - type='SGD', - lr=0.08, - momentum=0.9, - weight_decay=0.0001, - paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[30, 40]) -# runtime settings -runner = dict(max_epochs=50) -evaluation = dict(interval=2) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/fpg/metafile.yml b/cv/detection/co-detr/pytorch/configs/fpg/metafile.yml deleted file mode 100644 index 6b0a6a796d3a3eafbc4d6a558903f2f16ab39319..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/metafile.yml +++ /dev/null @@ -1,104 +0,0 @@ -Collections: - - Name: Feature Pyramid Grids - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Feature Pyramid Grids - Paper: - URL: https://arxiv.org/abs/2004.03580 - Title: 'Feature Pyramid Grids' - README: configs/fpg/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.10.0/mmdet/models/necks/fpg.py#L101 - Version: v2.10.0 - -Models: - - Name: faster_rcnn_r50_fpg_crop640_50e_coco - In Collection: Feature Pyramid Grids - Config: configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py - Metadata: - Training Memory (GB): 20.0 - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg_crop640_50e_coco/faster_rcnn_r50_fpg_crop640_50e_coco_20220311_011856-74109f42.pth - - - Name: faster_rcnn_r50_fpg-chn128_crop640_50e_coco - In Collection: Feature Pyramid Grids - Config: configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py - Metadata: - Training Memory (GB): 11.9 - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco/faster_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011857-9376aa9d.pth - - - Name: mask_rcnn_r50_fpg_crop640_50e_coco - In Collection: Feature Pyramid Grids - Config: configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py - Metadata: - Training Memory (GB): 23.2 - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg_crop640_50e_coco/mask_rcnn_r50_fpg_crop640_50e_coco_20220311_011857-233b8334.pth - - - Name: mask_rcnn_r50_fpg-chn128_crop640_50e_coco - In Collection: Feature Pyramid Grids - Config: configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py - Metadata: - Training Memory (GB): 15.3 - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco/mask_rcnn_r50_fpg-chn128_crop640_50e_coco_20220311_011859-043c9b4e.pth - - - Name: retinanet_r50_fpg_crop640_50e_coco - In Collection: Feature Pyramid Grids - Config: configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py - Metadata: - Training Memory (GB): 20.8 - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg_crop640_50e_coco/retinanet_r50_fpg_crop640_50e_coco_20220311_110809-b0bcf5f4.pth - - - Name: retinanet_r50_fpg-chn128_crop640_50e_coco - In Collection: Feature Pyramid Grids - Config: configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py - Metadata: - Training Memory (GB): 19.9 - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco/retinanet_r50_fpg-chn128_crop640_50e_coco_20220313_104829-ee99a686.pth diff --git a/cv/detection/co-detr/pytorch/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py deleted file mode 100644 index 9a6cf7e56a4f23a42d3905560a9b8035d6d935ff..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = 'retinanet_r50_fpg_crop640_50e_coco.py' - -model = dict( - neck=dict(out_channels=128, inter_channels=128), - bbox_head=dict(in_channels=128)) diff --git a/cv/detection/co-detr/pytorch/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py deleted file mode 100644 index 504ed5ec5040559b3d10f7caf8a970005a1a92d7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py +++ /dev/null @@ -1,53 +0,0 @@ -_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py' - -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - neck=dict( - _delete_=True, - type='FPG', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - inter_channels=256, - num_outs=5, - add_extra_convs=True, - start_level=1, - stack_times=9, - paths=['bu'] * 9, - same_down_trans=None, - same_up_trans=dict( - type='conv', - kernel_size=3, - stride=2, - padding=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - across_lateral_trans=dict( - type='conv', - kernel_size=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - across_down_trans=dict( - type='interpolation_conv', - mode='nearest', - kernel_size=3, - norm_cfg=norm_cfg, - order=('act', 'conv', 'norm'), - inplace=False), - across_up_trans=None, - across_skip_trans=dict( - type='conv', - kernel_size=1, - norm_cfg=norm_cfg, - inplace=False, - order=('act', 'conv', 'norm')), - output_trans=dict( - type='last_conv', - kernel_size=3, - order=('act', 'conv', 'norm'), - inplace=False), - norm_cfg=norm_cfg, - skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])) - -evaluation = dict(interval=2) diff --git a/cv/detection/co-detr/pytorch/configs/free_anchor/README.md b/cv/detection/co-detr/pytorch/configs/free_anchor/README.md deleted file mode 100644 index d24c34050f9a75ebe337c9cf59d4317edfc5d635..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/free_anchor/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# FreeAnchor - -> [FreeAnchor: Learning to Match Anchors for Visual Object Detection](https://arxiv.org/abs/1909.02466) - - - -## Abstract - -Modern CNN-based object detectors assign anchors for ground-truth objects under the restriction of object-anchor Intersection-over-Unit (IoU). In this study, we propose a learning-to-match approach to break IoU restriction, allowing objects to match anchors in a flexible manner. Our approach, referred to as FreeAnchor, updates hand-crafted anchor assignment to "free" anchor matching by formulating detector training as a maximum likelihood estimation (MLE) procedure. FreeAnchor targets at learning features which best explain a class of objects in terms of both classification and localization. FreeAnchor is implemented by optimizing detection customized likelihood and can be fused with CNN-based detectors in a plug-and-play manner. Experiments on COCO demonstrate that FreeAnchor consistently outperforms their counterparts with significant margins. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :---------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | 1x | 4.9 | 18.4 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130_095625.log.json) | -| R-101 | pytorch | 1x | 6.8 | 14.9 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130-358324e6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130_100723.log.json) | -| X-101-32x4d | pytorch | 1x | 8.1 | 11.1 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130-d4846968.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130_095627.log.json) | - -**Notes:** - -- We use 8 GPUs with 2 images/GPU. -- For more settings and models, please refer to the [official repo](https://github.com/zhangxiaosong18/FreeAnchor). - -## Citation - -```latex -@inproceedings{zhang2019freeanchor, - title = {{FreeAnchor}: Learning to Match Anchors for Visual Object Detection}, - author = {Zhang, Xiaosong and Wan, Fang and Liu, Chang and Ji, Rongrong and Ye, Qixiang}, - booktitle = {Neural Information Processing Systems}, - year = {2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/free_anchor/metafile.yml b/cv/detection/co-detr/pytorch/configs/free_anchor/metafile.yml deleted file mode 100644 index 170fb5c07a7277f5bf4f0a563284b6504dacebfe..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/free_anchor/metafile.yml +++ /dev/null @@ -1,79 +0,0 @@ -Collections: - - Name: FreeAnchor - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FreeAnchor - - ResNet - Paper: - URL: https://arxiv.org/abs/1909.02466 - Title: 'FreeAnchor: Learning to Match Anchors for Visual Object Detection' - README: configs/free_anchor/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/dense_heads/free_anchor_retina_head.py#L10 - Version: v2.0.0 - -Models: - - Name: retinanet_free_anchor_r50_fpn_1x_coco - In Collection: FreeAnchor - Config: configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.9 - inference time (ms/im): - - value: 54.35 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco/retinanet_free_anchor_r50_fpn_1x_coco_20200130-0f67375f.pth - - - Name: retinanet_free_anchor_r101_fpn_1x_coco - In Collection: FreeAnchor - Config: configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.8 - inference time (ms/im): - - value: 67.11 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco/retinanet_free_anchor_r101_fpn_1x_coco_20200130-358324e6.pth - - - Name: retinanet_free_anchor_x101_32x4d_fpn_1x_coco - In Collection: FreeAnchor - Config: configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 8.1 - inference time (ms/im): - - value: 90.09 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco/retinanet_free_anchor_x101_32x4d_fpn_1x_coco_20200130-d4846968.pth diff --git a/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py deleted file mode 100644 index f4aea53cc39f4fd441ae9c9f3a6f541b2fa36929..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py deleted file mode 100644 index 28f983c29edd071b32a50f18ac7b3f5c1bfdda88..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py +++ /dev/null @@ -1,22 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' -model = dict( - bbox_head=dict( - _delete_=True, - type='FreeAnchorRetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.75))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 65f8a9e2a4d221732dcf55a4a4d4b07041271668..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/fsaf/README.md b/cv/detection/co-detr/pytorch/configs/fsaf/README.md deleted file mode 100644 index 4392a6e46e2a3371d3e09643d4990142cac4aac5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fsaf/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# FSAF - -> [Feature Selective Anchor-Free Module for Single-Shot Object Detection](https://arxiv.org/abs/1903.00621) - - - -## Abstract - -We motivate and present feature selective anchor-free (FSAF) module, a simple and effective building block for single-shot object detectors. It can be plugged into single-shot detectors with feature pyramid structure. The FSAF module addresses two limitations brought up by the conventional anchor-based detection: 1) heuristic-guided feature selection; 2) overlap-based anchor sampling. The general concept of the FSAF module is online feature selection applied to the training of multi-level anchor-free branches. Specifically, an anchor-free branch is attached to each level of the feature pyramid, allowing box encoding and decoding in the anchor-free manner at an arbitrary level. During training, we dynamically assign each instance to the most suitable feature level. At the time of inference, the FSAF module can work jointly with anchor-based branches by outputting predictions in parallel. We instantiate this concept with simple implementations of anchor-free branches and online feature selection strategy. Experimental results on the COCO detection track show that our FSAF module performs better than anchor-based counterparts while being faster. When working jointly with anchor-based branches, the FSAF module robustly improves the baseline RetinaNet by a large margin under various settings, while introducing nearly free inference overhead. And the resulting best model can achieve a state-of-the-art 44.6% mAP, outperforming all existing single-shot detectors on COCO. - -
- -
- -## Introduction - -FSAF is an anchor-free method published in CVPR2019 ([https://arxiv.org/pdf/1903.00621.pdf](https://arxiv.org/pdf/1903.00621.pdf)). -Actually it is equivalent to the anchor-based method with only one anchor at each feature map position in each FPN level. -And this is how we implemented it. -Only the anchor-free branch is released for its better compatibility with the current framework and less computational budget. - -In the original paper, feature maps within the central 0.2-0.5 area of a gt box are tagged as ignored. However, -it is empirically found that a hard threshold (0.2-0.2) gives a further gain on the performance. (see the table below) - -## Results and Models - -### Results on R50/R101/X101-FPN - -| Backbone | ignore range | ms-train | Lr schd | Train Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP | Config | Download | -| :------: | :----------: | :------: | :-----: | :------------: | :-----------------: | :------------: | :---------: | :---------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | 0.2-0.5 | N | 1x | 3.15 | 0.43 | 12.3 | 36.0 (35.9) | | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco_20200715-b555b0e0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco/fsaf_pscale0.2_nscale0.5_r50_fpn_1x_coco_20200715_094657.log.json) | -| R-50 | 0.2-0.2 | N | 1x | 3.15 | 0.43 | 13.0 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco_20200428_072327.log.json) | -| R-101 | 0.2-0.2 | N | 1x | 5.08 | 0.58 | 10.8 | 39.3 (37.9) | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco-9e71098f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco_20200428_160348.log.json) | -| X-101 | 0.2-0.2 | N | 1x | 9.38 | 1.23 | 5.6 | 42.4 (41.0) | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco-e3f6e6fd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco_20200428_160424.log.json) | - -**Notes:** - -- *1x means the model is trained for 12 epochs.* -- *AP values in the brackets represent those reported in the original paper.* -- *All results are obtained with a single model and single-scale test.* -- *X-101 backbone represents ResNext-101-64x4d.* -- *All pretrained backbones use pytorch style.* -- *All models are trained on 8 Titan-XP gpus and tested on a single gpu.* - -## Citation - -BibTeX reference is as follows. - -```latex -@inproceedings{zhu2019feature, - title={Feature Selective Anchor-Free Module for Single-Shot Object Detection}, - author={Zhu, Chenchen and He, Yihui and Savvides, Marios}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={840--849}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_r101_fpn_1x_coco.py deleted file mode 100644 index 12b49fed5b6cd617aa9c05d76ed737d755992a34..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './fsaf_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_r50_fpn_1x_coco.py deleted file mode 100644 index 67f3ec1c4c16fb9bd041dbb3a24d269a83145f26..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_r50_fpn_1x_coco.py +++ /dev/null @@ -1,48 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' -# model settings -model = dict( - type='FSAF', - bbox_head=dict( - type='FSAFHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - reg_decoded_bbox=True, - # Only anchor-free branch is implemented. The anchor generator only - # generates 1 anchor at each feature point, as a substitute of the - # grid of features. - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=1, - scales_per_octave=1, - ratios=[1.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict(_delete_=True, type='TBLRBBoxCoder', normalizer=4.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0, - reduction='none'), - loss_bbox=dict( - _delete_=True, - type='IoULoss', - eps=1e-6, - loss_weight=1.0, - reduction='none')), - # training and testing settings - train_cfg=dict( - assigner=dict( - _delete_=True, - type='CenterRegionAssigner', - pos_scale=0.2, - neg_scale=0.2, - min_pos_iof=0.01), - allowed_border=-1, - pos_weight=-1, - debug=False)) -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=10, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 89c0c6344aba6e6eae5657eff60745645dd1e8dc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './fsaf_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/fsaf/metafile.yml b/cv/detection/co-detr/pytorch/configs/fsaf/metafile.yml deleted file mode 100644 index 5434e9adfa620598c5454de1874371d9d2545981..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/fsaf/metafile.yml +++ /dev/null @@ -1,80 +0,0 @@ -Collections: - - Name: FSAF - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x Titan-XP GPUs - Architecture: - - FPN - - FSAF - - ResNet - Paper: - URL: https://arxiv.org/abs/1903.00621 - Title: 'Feature Selective Anchor-Free Module for Single-Shot Object Detection' - README: configs/fsaf/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/fsaf.py#L6 - Version: v2.1.0 - -Models: - - Name: fsaf_r50_fpn_1x_coco - In Collection: FSAF - Config: configs/fsaf/fsaf_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.15 - inference time (ms/im): - - value: 76.92 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r50_fpn_1x_coco/fsaf_r50_fpn_1x_coco-94ccc51f.pth - - - Name: fsaf_r101_fpn_1x_coco - In Collection: FSAF - Config: configs/fsaf/fsaf_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.08 - inference time (ms/im): - - value: 92.59 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.3 (37.9) - Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_r101_fpn_1x_coco/fsaf_r101_fpn_1x_coco-9e71098f.pth - - - Name: fsaf_x101_64x4d_fpn_1x_coco - In Collection: FSAF - Config: configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 9.38 - inference time (ms/im): - - value: 178.57 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.4 (41.0) - Weights: https://download.openmmlab.com/mmdetection/v2.0/fsaf/fsaf_x101_64x4d_fpn_1x_coco/fsaf_x101_64x4d_fpn_1x_coco-e3f6e6fd.pth diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/README.md b/cv/detection/co-detr/pytorch/configs/gcnet/README.md deleted file mode 100644 index 403e0861569e885c9df3baf9514a976b8a451390..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# GCNet - -> [GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond](https://arxiv.org/abs/1904.11492) - - - -## Abstract - -The Non-Local Network (NLNet) presents a pioneering approach for capturing long-range dependencies, via aggregating query-specific global context to each query position. However, through a rigorous empirical analysis, we have found that the global contexts modeled by non-local network are almost the same for different query positions within an image. In this paper, we take advantage of this finding to create a simplified network based on a query-independent formulation, which maintains the accuracy of NLNet but with significantly less computation. We further observe that this simplified design shares similar structure with Squeeze-Excitation Network (SENet). Hence we unify them into a three-step general framework for global context modeling. Within the general framework, we design a better instantiation, called the global context (GC) block, which is lightweight and can effectively model the global context. The lightweight property allows us to apply it for multiple layers in a backbone network to construct a global context network (GCNet), which generally outperforms both simplified NLNet and SENet on major benchmarks for various recognition tasks. - -
- -
- -## Introduction - -By [Yue Cao](http://yue-cao.me), [Jiarui Xu](http://jerryxu.net), [Stephen Lin](https://scholar.google.com/citations?user=c3PYmxUAAAAJ&hl=en), Fangyun Wei, [Han Hu](https://sites.google.com/site/hanhushomepage/). - -We provide config files to reproduce the results in the paper for -["GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond"](https://arxiv.org/abs/1904.11492) on COCO object detection. - -**GCNet** is initially described in [arxiv](https://arxiv.org/abs/1904.11492). Via absorbing advantages of Non-Local Networks (NLNet) and Squeeze-Excitation Networks (SENet), GCNet provides a simple, fast and effective approach for global context modeling, which generally outperforms both NLNet and SENet on major benchmarks for various recognition tasks. - -## Results and Models - -The results on COCO 2017val are shown in the below table. - -| Backbone | Model | Context | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------: | :---: | :------------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | Mask | GC(c3-c5, r16) | 1x | 5.0 | | 39.7 | 35.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915-187da160.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915.log.json) | -| R-50-FPN | Mask | GC(c3-c5, r4) | 1x | 5.1 | 15.0 | 39.9 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204-17235656.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204_024626.log.json) | -| R-101-FPN | Mask | GC(c3-c5, r16) | 1x | 7.6 | 11.4 | 41.3 | 37.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205-e58ae947.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205_192835.log.json) | -| R-101-FPN | Mask | GC(c3-c5, r4) | 1x | 7.8 | 11.6 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206-af22dc9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206_112128.log.json) | - -| Backbone | Model | Context | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------: | :--------------: | :------------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | Mask | - | 1x | 4.4 | 16.6 | 38.4 | 34.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202-bb3eb55c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202_214122.log.json) | -| R-50-FPN | Mask | GC(c3-c5, r16) | 1x | 5.0 | 15.5 | 40.4 | 36.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202_174907.log.json) | -| R-50-FPN | Mask | GC(c3-c5, r4) | 1x | 5.1 | 15.1 | 40.7 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) | -| R-101-FPN | Mask | - | 1x | 6.4 | 13.3 | 40.5 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210-81658c8a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210_220422.log.json) | -| R-101-FPN | Mask | GC(c3-c5, r16) | 1x | 7.6 | 12.0 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207-945e77ca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207_015330.log.json) | -| R-101-FPN | Mask | GC(c3-c5, r4) | 1x | 7.8 | 11.8 | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) | -| X-101-FPN | Mask | - | 1x | 7.6 | 11.3 | 42.4 | 37.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211-7584841c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211_054326.log.json) | -| X-101-FPN | Mask | GC(c3-c5, r16) | 1x | 8.8 | 9.8 | 43.5 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-cbed3d2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211_164715.log.json) | -| X-101-FPN | Mask | GC(c3-c5, r4) | 1x | 9.0 | 9.7 | 43.9 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212-68164964.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212_070942.log.json) | -| X-101-FPN | Cascade Mask | - | 1x | 9.2 | 8.4 | 44.7 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310-d5ad2a5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310_115217.log.json) | -| X-101-FPN | Cascade Mask | GC(c3-c5, r16) | 1x | 10.3 | 7.7 | 46.2 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-10bf2463.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211_184154.log.json) | -| X-101-FPN | Cascade Mask | GC(c3-c5, r4) | 1x | 10.6 | | 46.4 | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653-ed035291.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653.log.json) | -| X-101-FPN | DCN Cascade Mask | - | 1x | | | 47.5 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019-abbc39ea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019.log.json) | -| X-101-FPN | DCN Cascade Mask | GC(c3-c5, r16) | 1x | | | 48.0 | 41.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648-44aa598a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648.log.json) | -| X-101-FPN | DCN Cascade Mask | GC(c3-c5, r4) | 1x | | | 47.9 | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851-720338ec.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851.log.json) | - -**Notes:** - -- The `SyncBN` is added in the backbone for all models in **Table 2**. -- `GC` denotes Global Context (GC) block is inserted after 1x1 conv of backbone. -- `DCN` denotes replace 3x3 conv with 3x3 Deformable Convolution in `c3-c5` stages of backbone. -- `r4` and `r16` denote ratio 4 and ratio 16 in GC block respectively. - -## Citation - -```latex -@article{cao2019GCNet, - title={GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond}, - author={Cao, Yue and Xu, Jiarui and Lin, Stephen and Wei, Fangyun and Hu, Han}, - journal={arXiv preprint arXiv:1904.11492}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py deleted file mode 100644 index 5118895f00345a42fdbc6d2edba084ccd3f1a3c8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py deleted file mode 100644 index 413499dd6d3fe88e91e357a62461f47f037fcedf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 50689aadf6cab9414aab1a7a9e72ef8231355e4f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 13672312a8f5c57c5799ca6df4d52fed103287b4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 50883ffeb16369ea6210f2ece8fc2d7e084b0134..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 31fdd070595ac0512a39075bb045dd18035d3f14..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index ad6ad47696e6aeb2b3505abab0bd2d49d3b7aa83..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 29f91674c6d54bfa6fdcfcb5b7e2ec2a2bbf81fa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py deleted file mode 100644 index 6e1c5d0cadfb9fb3a4f8645e28a8e67fc499e900..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 781dba78d68e77fa7eee15f5bbcc539731f8378d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 32972de857b3c4f43170dcd3e7fbce76425f094d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index d299b69f576a2547de1f7d9edd171d56ab002d0a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 5ac908e60c1f964bdd6c3e61933a37c04d487bfb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py deleted file mode 100644 index 0308a567c147413688c9da679d06f93b0e154d88..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index e04780c50f96929997c279b23fe5fa427657039b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 980f8191d4c07eb35e338bd87e3b73b06b3214ad..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py deleted file mode 100644 index f0c96e58b6131f2958f28c56b9d8384d5b4746f7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 7fb8e82ece225ab6f88f1f4f83bea56a42cf1a57..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 16), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index b1ddbee3b4b79e79bb2a3faf30604f2465612728..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/cv/detection/co-detr/pytorch/configs/gcnet/metafile.yml b/cv/detection/co-detr/pytorch/configs/gcnet/metafile.yml deleted file mode 100644 index 1281122a776e56c8bfc93aad3efc44df60996ec0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gcnet/metafile.yml +++ /dev/null @@ -1,440 +0,0 @@ -Collections: - - Name: GCNet - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Global Context Block - - FPN - - RPN - - ResNet - - ResNeXt - Paper: - URL: https://arxiv.org/abs/1904.11492 - Title: 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond' - README: configs/gcnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/context_block.py#L13 - Version: v2.0.0 - -Models: - - Name: mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 5.0 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 35.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco_20200515_211915-187da160.pth - - - Name: mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 5.1 - inference time (ms/im): - - value: 66.67 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco_20200204-17235656.pth - - - Name: mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 7.6 - inference time (ms/im): - - value: 87.72 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco_20200205-e58ae947.pth - - - Name: mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 7.8 - inference time (ms/im): - - value: 86.21 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco_20200206-af22dc9d.pth - - - Name: mask_rcnn_r50_fpn_syncbn-backbone_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py - Metadata: - Training Memory (GB): 4.4 - inference time (ms/im): - - value: 60.24 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 34.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_1x_coco_20200202-bb3eb55c.pth - - - Name: mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 5.0 - inference time (ms/im): - - value: 64.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200202-587b99aa.pth - - - Name: mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 5.1 - inference time (ms/im): - - value: 66.23 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth - - - Name: mask_rcnn_r101_fpn_syncbn-backbone_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py - Metadata: - Training Memory (GB): 6.4 - inference time (ms/im): - - value: 75.19 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_1x_coco_20200210-81658c8a.pth - - - Name: mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 7.6 - inference time (ms/im): - - value: 83.33 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200207-945e77ca.pth - - - Name: mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 7.8 - inference time (ms/im): - - value: 84.75 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth - - - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py - Metadata: - Training Memory (GB): 7.6 - inference time (ms/im): - - value: 88.5 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200211-7584841c.pth - - - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 8.8 - inference time (ms/im): - - value: 102.04 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-cbed3d2c.pth - - - Name: mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 9.0 - inference time (ms/im): - - value: 103.09 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200212-68164964.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco - In Collection: GCNet - Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py - Metadata: - Training Memory (GB): 9.2 - inference time (ms/im): - - value: 119.05 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco_20200310-d5ad2a5e.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 10.3 - inference time (ms/im): - - value: 129.87 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco_20200211-10bf2463.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 10.6 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200703_180653-ed035291.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco_20210615_211019-abbc39ea.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 48.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco_20210615_215648-44aa598a.pth - - - Name: cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco - In Collection: GCNet - Config: configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco_20210615_161851-720338ec.pth diff --git a/cv/detection/co-detr/pytorch/configs/gfl/README.md b/cv/detection/co-detr/pytorch/configs/gfl/README.md deleted file mode 100644 index 703936b3691622c20670584cbc205a48ccdd5c65..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gfl/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# GFL - -> [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388) - - - -## Abstract - -One-stage detector basically formulates object detection as dense classification and localization. The classification is usually optimized by Focal Loss and the box location is commonly learned under Dirac delta distribution. A recent trend for one-stage detectors is to introduce an individual prediction branch to estimate the quality of localization, where the predicted quality facilitates the classification to improve detection performance. This paper delves into the representations of the above three fundamental elements: quality estimation, classification and localization. Two problems are discovered in existing practices, including (1) the inconsistent usage of the quality estimation and classification between training and inference and (2) the inflexible Dirac delta distribution for localization when there is ambiguity and uncertainty in complex scenes. To address the problems, we design new representations for these elements. Specifically, we merge the quality estimation into the class prediction vector to form a joint representation of localization quality and classification, and use a vector to represent arbitrary distribution of box locations. The improved representations eliminate the inconsistency risk and accurately depict the flexible distribution in real data, but contain continuous labels, which is beyond the scope of Focal Loss. We then propose Generalized Focal Loss (GFL) that generalizes Focal Loss from its discrete form to the continuous version for successful optimization. On COCO test-dev, GFL achieves 45.0% AP using ResNet-101 backbone, surpassing state-of-the-art SAPD (43.5%) and ATSS (43.6%) with higher or comparable inference speed, under the same backbone and training settings. Notably, our best model can achieve a single-model single-scale AP of 48.2%, at 10 FPS on a single 2080Ti GPU. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Multi-scale Training | Inf time (fps) | box AP | Config | Download | -| :---------------: | :-----: | :-----: | :------------------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | 1x | No | 19.5 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244.log.json) | -| R-50 | pytorch | 2x | Yes | 19.5 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802.log.json) | -| R-101 | pytorch | 2x | Yes | 14.7 | 44.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126.log.json) | -| R-101-dcnv2 | pytorch | 2x | Yes | 12.9 | 47.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002.log.json) | -| X-101-32x4d | pytorch | 2x | Yes | 12.1 | 45.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002.log.json) | -| X-101-32x4d-dcnv2 | pytorch | 2x | Yes | 10.7 | 48.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002.log.json) | - -\[1\] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ -\[2\] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ -\[3\] *`dcnv2` denotes deformable convolutional networks v2.* \ -\[4\] *FPS is tested with a single GeForce RTX 2080Ti GPU, using a batch size of 1.* - -## Citation - -We provide config files to reproduce the object detection results in the paper [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388) - -```latex -@article{li2020generalized, - title={Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection}, - author={Li, Xiang and Wang, Wenhai and Wu, Lijun and Chen, Shuo and Hu, Xiaolin and Li, Jun and Tang, Jinhui and Yang, Jian}, - journal={arXiv preprint arXiv:2006.04388}, - year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py deleted file mode 100644 index b72c2b6eddfb51a0a61610826e00296e2b76f827..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = './gfl_r50_fpn_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='ResNet', - depth=101, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py deleted file mode 100644 index e33b5c0d27883d5b495c4dae88f550ffbb26a318..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './gfl_r50_fpn_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='ResNet', - depth=101, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/gfl/gfl_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gfl/gfl_r50_fpn_1x_coco.py deleted file mode 100644 index cfd4b02391a3d4cae0c060990be1f99b3edebabe..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gfl/gfl_r50_fpn_1x_coco.py +++ /dev/null @@ -1,57 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='GFL', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='GFLHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - loss_cls=dict( - type='QualityFocalLoss', - use_sigmoid=True, - beta=2.0, - loss_weight=1.0), - loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), - reg_max=16, - loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py deleted file mode 100644 index b8be60145758c191543ef0683234e63f02d8fe60..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,22 +0,0 @@ -_base_ = './gfl_r50_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) -# multi-scale training -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 480), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py deleted file mode 100644 index 25398075cb866db8dd49d0bbd48cad19566e77e5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = './gfl_r50_fpn_mstrain_2x_coco.py' -model = dict( - type='GFL', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, False, True, True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py deleted file mode 100644 index effda195cb0f18b3137c2b923d59f8cba025ba8e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = './gfl_r50_fpn_mstrain_2x_coco.py' -model = dict( - type='GFL', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/gfl/metafile.yml b/cv/detection/co-detr/pytorch/configs/gfl/metafile.yml deleted file mode 100644 index 8f049c6bc9209120c5b9526552b3d7b6f157cc93..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gfl/metafile.yml +++ /dev/null @@ -1,134 +0,0 @@ -Collections: - - Name: Generalized Focal Loss - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Generalized Focal Loss - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/2006.04388 - Title: 'Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection' - README: configs/gfl/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/gfl.py#L6 - Version: v2.2.0 - -Models: - - Name: gfl_r50_fpn_1x_coco - In Collection: Generalized Focal Loss - Config: configs/gfl/gfl_r50_fpn_1x_coco.py - Metadata: - inference time (ms/im): - - value: 51.28 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth - - - Name: gfl_r50_fpn_mstrain_2x_coco - In Collection: Generalized Focal Loss - Config: configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py - Metadata: - inference time (ms/im): - - value: 51.28 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth - - - Name: gfl_r101_fpn_mstrain_2x_coco - In Collection: Generalized Focal Loss - Config: configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py - Metadata: - inference time (ms/im): - - value: 68.03 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth - - - Name: gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco - In Collection: Generalized Focal Loss - Config: configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py - Metadata: - inference time (ms/im): - - value: 77.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth - - - Name: gfl_x101_32x4d_fpn_mstrain_2x_coco - In Collection: Generalized Focal Loss - Config: configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py - Metadata: - inference time (ms/im): - - value: 82.64 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth - - - Name: gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco - In Collection: Generalized Focal Loss - Config: configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py - Metadata: - inference time (ms/im): - - value: 93.46 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 48.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth diff --git a/cv/detection/co-detr/pytorch/configs/ghm/README.md b/cv/detection/co-detr/pytorch/configs/ghm/README.md deleted file mode 100644 index cf9fb7370c7ba6d92553e1e0b896e5f9e7516ec2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ghm/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# GHM - -> [Gradient Harmonized Single-stage Detector](https://arxiv.org/abs/1811.05181) - - - -## Abstract - -Despite the great success of two-stage detectors, single-stage detector is still a more elegant and efficient way, yet suffers from the two well-known disharmonies during training, i.e. the huge difference in quantity between positive and negative examples as well as between easy and hard examples. In this work, we first point out that the essential effect of the two disharmonies can be summarized in term of the gradient. Further, we propose a novel gradient harmonizing mechanism (GHM) to be a hedging for the disharmonies. The philosophy behind GHM can be easily embedded into both classification loss function like cross-entropy (CE) and regression loss function like smooth-L1 (SL1) loss. To this end, two novel loss functions called GHM-C and GHM-R are designed to balancing the gradient flow for anchor classification and bounding box refinement, respectively. Ablation study on MS COCO demonstrates that without laborious hyper-parameter tuning, both GHM-C and GHM-R can bring substantial improvement for single-stage detector. Without any whistles and bells, our model achieves 41.6 mAP on COCO test-dev set which surpasses the state-of-the-art method, Focal Loss (FL) + SL1, by 0.8. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 4.0 | 3.3 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130_004213.log.json) | -| R-101-FPN | pytorch | 1x | 6.0 | 4.4 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130-c148ee8f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130_145259.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 7.2 | 5.1 | 40.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131-e4333bd0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131_113653.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 10.3 | 5.2 | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131-dd381cef.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131_113723.log.json) | - -## Citation - -```latex -@inproceedings{li2019gradient, - title={Gradient Harmonized Single-stage Detector}, - author={Li, Buyu and Liu, Yu and Wang, Xiaogang}, - booktitle={AAAI Conference on Artificial Intelligence}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/ghm/metafile.yml b/cv/detection/co-detr/pytorch/configs/ghm/metafile.yml deleted file mode 100644 index b4f488c43659eb25f81ea0e573524ffff3738b4c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ghm/metafile.yml +++ /dev/null @@ -1,101 +0,0 @@ -Collections: - - Name: GHM - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - GHM-C - - GHM-R - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1811.05181 - Title: 'Gradient Harmonized Single-stage Detector' - README: configs/ghm/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/losses/ghm_loss.py#L21 - Version: v2.0.0 - -Models: - - Name: retinanet_ghm_r50_fpn_1x_coco - In Collection: GHM - Config: configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.0 - inference time (ms/im): - - value: 303.03 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r50_fpn_1x_coco/retinanet_ghm_r50_fpn_1x_coco_20200130-a437fda3.pth - - - Name: retinanet_ghm_r101_fpn_1x_coco - In Collection: GHM - Config: configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.0 - inference time (ms/im): - - value: 227.27 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_r101_fpn_1x_coco/retinanet_ghm_r101_fpn_1x_coco_20200130-c148ee8f.pth - - - Name: retinanet_ghm_x101_32x4d_fpn_1x_coco - In Collection: GHM - Config: configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.2 - inference time (ms/im): - - value: 196.08 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco/retinanet_ghm_x101_32x4d_fpn_1x_coco_20200131-e4333bd0.pth - - - Name: retinanet_ghm_x101_64x4d_fpn_1x_coco - In Collection: GHM - Config: configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 10.3 - inference time (ms/im): - - value: 192.31 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco/retinanet_ghm_x101_64x4d_fpn_1x_coco_20200131-dd381cef.pth diff --git a/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py deleted file mode 100644 index aaf6fc26d323a99a92b0ce266c7c7dc8a919d6f3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './retinanet_ghm_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py deleted file mode 100644 index 61b9751057f10f2173b8e7edde12cca53ebbd2d0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' -model = dict( - bbox_head=dict( - loss_cls=dict( - _delete_=True, - type='GHMC', - bins=30, - momentum=0.75, - use_sigmoid=True, - loss_weight=1.0), - loss_bbox=dict( - _delete_=True, - type='GHMR', - mu=0.02, - bins=10, - momentum=0.7, - loss_weight=10.0))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index cd2e4cc34b4526ff32d193c30d5884b16c6adf5c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './retinanet_ghm_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index b6107d8c31bd64dee3a70a1ea5e0167247af6b73..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ghm/retinanet_ghm_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './retinanet_ghm_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/README.md b/cv/detection/co-detr/pytorch/configs/gn+ws/README.md deleted file mode 100644 index 184bed36e1fba6c5c1a244a9674e402a92cfd8d4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# GN + WS - -> [Weight Standardization](https://arxiv.org/abs/1903.10520) - - - -## Abstract - -Batch Normalization (BN) has become an out-of-box technique to improve deep network training. However, its effectiveness is limited for micro-batch training, i.e., each GPU typically has only 1-2 images for training, which is inevitable for many computer vision tasks, e.g., object detection and semantic segmentation, constrained by memory consumption. To address this issue, we propose Weight Standardization (WS) and Batch-Channel Normalization (BCN) to bring two success factors of BN into micro-batch training: 1) the smoothing effects on the loss landscape and 2) the ability to avoid harmful elimination singularities along the training trajectory. WS standardizes the weights in convolutional layers to smooth the loss landscape by reducing the Lipschitz constants of the loss and the gradients; BCN combines batch and channel normalizations and leverages estimated statistics of the activations in convolutional layers to keep networks away from elimination singularities. We validate WS and BCN on comprehensive computer vision tasks, including image classification, object detection, instance segmentation, video recognition and semantic segmentation. All experimental results consistently show that WS and BCN improve micro-batch training significantly. Moreover, using WS and BCN with micro-batch training is even able to match or outperform the performances of BN with large-batch training. - -
- -
- -## Results and Models - -Faster R-CNN - -| Backbone | Style | Normalization | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----------: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | GN+WS | 1x | 5.9 | 11.7 | 39.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130_210936.log.json) | -| R-101-FPN | pytorch | GN+WS | 1x | 8.9 | 9.0 | 41.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205-a93b0d75.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205_232146.log.json) | -| X-50-32x4d-FPN | pytorch | GN+WS | 1x | 7.0 | 10.3 | 40.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203-839c5d9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203_220113.log.json) | -| X-101-32x4d-FPN | pytorch | GN+WS | 1x | 10.8 | 7.6 | 42.1 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212-27da1bc2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212_195302.log.json) | - -Mask R-CNN - -| Backbone | Style | Normalization | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----------: | :-------: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | GN+WS | 2x | 7.3 | 10.5 | 40.6 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226-16acb762.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226_062128.log.json) | -| R-101-FPN | pytorch | GN+WS | 2x | 10.3 | 8.6 | 42.0 | 37.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212-ea357cd9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212_213627.log.json) | -| X-50-32x4d-FPN | pytorch | GN+WS | 2x | 8.4 | 9.3 | 41.1 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216-649fdb6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216_201500.log.json) | -| X-101-32x4d-FPN | pytorch | GN+WS | 2x | 12.2 | 7.1 | 42.1 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319-33fb95b5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319_104101.log.json) | -| R-50-FPN | pytorch | GN+WS | 20-23-24e | 7.3 | - | 41.1 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213-487d1283.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213_035123.log.json) | -| R-101-FPN | pytorch | GN+WS | 20-23-24e | 10.3 | - | 43.1 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213-57b5a50f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213_130142.log.json) | -| X-50-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 8.4 | - | 42.1 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226-969bcb2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226_093732.log.json) | -| X-101-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 12.2 | - | 42.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316-e6cd35ef.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316_013741.log.json) | - -Note: - -- GN+WS requires about 5% more memory than GN, and it is only 5% slower than GN. -- In the paper, a 20-23-24e lr schedule is used instead of 2x. -- The X-50-GN and X-101-GN pretrained models are also shared by the authors. - -## Citation - -```latex -@article{weightstandardization, - author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, - title = {Weight Standardization}, - journal = {arXiv preprint arXiv:1903.10520}, - year = {2019}, -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py deleted file mode 100644 index cd2cb2b6348a9555b8c80c3f1398d8989ef3f7a0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws'))) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py deleted file mode 100644 index 1b326b88e7309ee217646b5550a23a6796ad5c0b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -conv_cfg = dict(type='ConvWS') -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), - neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg))) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py deleted file mode 100644 index f64ae89178ed351dbe4be80318b9a1da385853c2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' -conv_cfg = dict(type='ConvWS') -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws'))) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py deleted file mode 100644 index 246851b9f2be4d0e0f129d20692d22acf194308a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py' -conv_cfg = dict(type='ConvWS') -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - type='ResNeXt', - depth=50, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py deleted file mode 100644 index a790d932152420f5be0a05b21ac122087d315398..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py' -# learning policy -lr_config = dict(step=[20, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py deleted file mode 100644 index a9fa6a2445020979a217ee3b648d49e5577d2357..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://jhu/resnet101_gn_ws'))) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py deleted file mode 100644 index 55168085cd085c241bfbb85a76bb230241378faa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' -# learning policy -lr_config = dict(step=[20, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py deleted file mode 100644 index 63be60ff8c117402aa46811ef86ba16aebc76a45..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py +++ /dev/null @@ -1,20 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -conv_cfg = dict(type='ConvWS') -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')), - neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg), - mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg))) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py deleted file mode 100644 index cfa14c99543382328b2cb4ac7c2d0dbb2a562017..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py' -# learning policy -lr_config = dict(step=[20, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py deleted file mode 100644 index 6498b03fb4fda52a995b5b76da8b02385697ebc1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' -# model settings -conv_cfg = dict(type='ConvWS') -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://jhu/resnext101_32x4d_gn_ws'))) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py deleted file mode 100644 index 79ce0adf1bf760c371bd1a1c3a9b028cef51c4b4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py' -# learning policy -lr_config = dict(step=[20, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py deleted file mode 100644 index 7fac3175e3a4e900f5051bd0385a6dd828cef9c7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py' -# model settings -conv_cfg = dict(type='ConvWS') -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - type='ResNeXt', - depth=50, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))) diff --git a/cv/detection/co-detr/pytorch/configs/gn+ws/metafile.yml b/cv/detection/co-detr/pytorch/configs/gn+ws/metafile.yml deleted file mode 100644 index bc89359cec36e124ff3f31b21c981968f2e21206..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn+ws/metafile.yml +++ /dev/null @@ -1,263 +0,0 @@ -Collections: - - Name: Weight Standardization - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Group Normalization - - Weight Standardization - Paper: - URL: https://arxiv.org/abs/1903.10520 - Title: 'Weight Standardization' - README: configs/gn+ws/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py - Version: v2.0.0 - -Models: - - Name: faster_rcnn_r50_fpn_gn_ws-all_1x_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py - Metadata: - Training Memory (GB): 5.9 - inference time (ms/im): - - value: 85.47 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth - - - Name: faster_rcnn_r101_fpn_gn_ws-all_1x_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py - Metadata: - Training Memory (GB): 8.9 - inference time (ms/im): - - value: 111.11 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205-a93b0d75.pth - - - Name: faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 97.09 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203-839c5d9d.pth - - - Name: faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py - Metadata: - Training Memory (GB): 10.8 - inference time (ms/im): - - value: 131.58 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212-27da1bc2.pth - - - Name: mask_rcnn_r50_fpn_gn_ws-all_2x_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py - Metadata: - Training Memory (GB): 7.3 - inference time (ms/im): - - value: 95.24 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226-16acb762.pth - - - Name: mask_rcnn_r101_fpn_gn_ws-all_2x_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py - Metadata: - Training Memory (GB): 10.3 - inference time (ms/im): - - value: 116.28 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212-ea357cd9.pth - - - Name: mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py - Metadata: - Training Memory (GB): 8.4 - inference time (ms/im): - - value: 107.53 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216-649fdb6f.pth - - - Name: mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py - Metadata: - Training Memory (GB): 12.2 - inference time (ms/im): - - value: 140.85 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319-33fb95b5.pth - - - Name: mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py - Metadata: - Training Memory (GB): 7.3 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213-487d1283.pth - - - Name: mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py - Metadata: - Training Memory (GB): 10.3 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213-57b5a50f.pth - - - Name: mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py - Metadata: - Training Memory (GB): 8.4 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226-969bcb2c.pth - - - Name: mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco - In Collection: Weight Standardization - Config: configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py - Metadata: - Training Memory (GB): 12.2 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316-e6cd35ef.pth diff --git a/cv/detection/co-detr/pytorch/configs/gn/README.md b/cv/detection/co-detr/pytorch/configs/gn/README.md deleted file mode 100644 index 9bb28883e8ec6b77c60598f2a6b4ead68055e3a4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# GN - -> [Group Normalization](https://arxiv.org/abs/1803.08494) - - - -## Abstract - -Batch Normalization (BN) is a milestone technique in the development of deep learning, enabling various networks to train. However, normalizing along the batch dimension introduces problems --- BN's error increases rapidly when the batch size becomes smaller, caused by inaccurate batch statistics estimation. This limits BN's usage for training larger models and transferring features to computer vision tasks including detection, segmentation, and video, which require small batches constrained by memory consumption. In this paper, we present Group Normalization (GN) as a simple alternative to BN. GN divides the channels into groups and computes within each group the mean and variance for normalization. GN's computation is independent of batch sizes, and its accuracy is stable in a wide range of batch sizes. On ResNet-50 trained in ImageNet, GN has 10.6% lower error than its BN counterpart when using a batch size of 2; when using typical batch sizes, GN is comparably good with BN and outperforms other normalization variants. Moreover, GN can be naturally transferred from pre-training to fine-tuning. GN can outperform its BN-based counterparts for object detection and segmentation in COCO, and for video classification in Kinetics, showing that GN can effectively replace the powerful BN in a variety of tasks. GN can be easily implemented by a few lines of code in modern libraries. - -
- -
- -## Results and Models - -| Backbone | model | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-----------: | :--------: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN (d) | Mask R-CNN | 2x | 7.1 | 11.0 | 40.2 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206_050355.log.json) | -| R-50-FPN (d) | Mask R-CNN | 3x | 7.1 | - | 40.5 | 36.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214-8b23b1e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214_063512.log.json) | -| R-101-FPN (d) | Mask R-CNN | 2x | 9.9 | 9.0 | 41.9 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205-d96b1b50.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205_234402.log.json) | -| R-101-FPN (d) | Mask R-CNN | 3x | 9.9 | | 42.1 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609-0df864f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609.log.json) | -| R-50-FPN (c) | Mask R-CNN | 2x | 7.1 | 10.9 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207-20d3e849.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207_225832.log.json) | -| R-50-FPN (c) | Mask R-CNN | 3x | 7.1 | - | 40.1 | 36.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225-542aefbc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225_235135.log.json) | - -**Notes:** - -- (d) means pretrained model converted from Detectron, and (c) means the contributed model pretrained by [@thangvubk](https://github.com/thangvubk). -- The `3x` schedule is epoch \[28, 34, 36\]. -- **Memory, Train/Inf time is outdated.** - -## Citation - -```latex -@inproceedings{wu2018group, - title={Group Normalization}, - author={Wu, Yuxin and He, Kaiming}, - booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, - year={2018} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py deleted file mode 100644 index a505ba0e26246772c9d18874a5552831e2efe33f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron/resnet101_gn'))) diff --git a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py b/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py deleted file mode 100644 index 12a9d17e5592ade405605e3ffb2d4d2fa632d03e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './mask_rcnn_r101_fpn_gn-all_2x_coco.py' - -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py deleted file mode 100644 index 1de7d98e1034f7330552958cae5ef3ad402caed7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py +++ /dev/null @@ -1,49 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - norm_cfg=norm_cfg, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron/resnet50_gn')), - neck=dict(norm_cfg=norm_cfg), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg), - mask_head=dict(norm_cfg=norm_cfg))) -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py b/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py deleted file mode 100644 index f9177196cb91c6bbc6dd4383837819f053b334bb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn-all_2x_coco.py' - -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py b/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py deleted file mode 100644 index 2f430fdab1a825211582b48b0eacab98b55c2167..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - norm_cfg=norm_cfg, - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://contrib/resnet50_gn')), - neck=dict(norm_cfg=norm_cfg), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg), - mask_head=dict(norm_cfg=norm_cfg))) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py b/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py deleted file mode 100644 index 66834f08ba398e7621aa8c5a3bfe12a646aecde2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py' - -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/gn/metafile.yml b/cv/detection/co-detr/pytorch/configs/gn/metafile.yml deleted file mode 100644 index 4a1ecae09a7fd5b4ff51dcde677632ad10a2e0d7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/gn/metafile.yml +++ /dev/null @@ -1,162 +0,0 @@ -Collections: - - Name: Group Normalization - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Group Normalization - Paper: - URL: https://arxiv.org/abs/1803.08494 - Title: 'Group Normalization' - README: configs/gn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py - Version: v2.0.0 - -Models: - - Name: mask_rcnn_r50_fpn_gn-all_2x_coco - In Collection: Group Normalization - Config: configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py - Metadata: - Training Memory (GB): 7.1 - inference time (ms/im): - - value: 90.91 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_2x_coco/mask_rcnn_r50_fpn_gn-all_2x_coco_20200206-8eee02a6.pth - - - Name: mask_rcnn_r50_fpn_gn-all_3x_coco - In Collection: Group Normalization - Config: configs/gn/mask_rcnn_r50_fpn_gn-all_3x_coco.py - Metadata: - Training Memory (GB): 7.1 - inference time (ms/im): - - value: 90.91 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_3x_coco/mask_rcnn_r50_fpn_gn-all_3x_coco_20200214-8b23b1e5.pth - - - Name: mask_rcnn_r101_fpn_gn-all_2x_coco - In Collection: Group Normalization - Config: configs/gn/mask_rcnn_r101_fpn_gn-all_2x_coco.py - Metadata: - Training Memory (GB): 9.9 - inference time (ms/im): - - value: 111.11 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_2x_coco/mask_rcnn_r101_fpn_gn-all_2x_coco_20200205-d96b1b50.pth - - - Name: mask_rcnn_r101_fpn_gn-all_3x_coco - In Collection: Group Normalization - Config: configs/gn/mask_rcnn_r101_fpn_gn-all_3x_coco.py - Metadata: - Training Memory (GB): 9.9 - inference time (ms/im): - - value: 111.11 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r101_fpn_gn-all_3x_coco/mask_rcnn_r101_fpn_gn-all_3x_coco_20200513_181609-0df864f4.pth - - - Name: mask_rcnn_r50_fpn_gn-all_contrib_2x_coco - In Collection: Group Normalization - Config: configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py - Metadata: - Training Memory (GB): 7.1 - inference time (ms/im): - - value: 91.74 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco/mask_rcnn_r50_fpn_gn-all_contrib_2x_coco_20200207-20d3e849.pth - - - Name: mask_rcnn_r50_fpn_gn-all_contrib_3x_coco - In Collection: Group Normalization - Config: configs/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py - Metadata: - Training Memory (GB): 7.1 - inference time (ms/im): - - value: 91.74 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/gn/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco/mask_rcnn_r50_fpn_gn-all_contrib_3x_coco_20200225-542aefbc.pth diff --git a/cv/detection/co-detr/pytorch/configs/grid_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/grid_rcnn/README.md deleted file mode 100644 index e844021782b5d176b43913e61bf46d76f82363d8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/grid_rcnn/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Grid R-CNN - -> [Grid R-CNN](https://arxiv.org/abs/1811.12030) - - - -## Abstract - -This paper proposes a novel object detection framework named Grid R-CNN, which adopts a grid guided localization mechanism for accurate object detection. Different from the traditional regression based methods, the Grid R-CNN captures the spatial information explicitly and enjoys the position sensitive property of fully convolutional architecture. Instead of using only two independent points, we design a multi-point supervision formulation to encode more clues in order to reduce the impact of inaccurate prediction of specific points. To take the full advantage of the correlation of points in a grid, we propose a two-stage information fusion strategy to fuse feature maps of neighbor grid points. The grid guided localization approach is easy to be extended to different state-of-the-art detection frameworks. Grid R-CNN leads to high quality object localization, and experiments demonstrate that it achieves a 4.1% AP gain at IoU=0.8 and a 10.0% AP gain at IoU=0.9 on COCO benchmark compared to Faster R-CNN with Res50 backbone and FPN architecture. - -Grid R-CNN is a well-performed objection detection framework. It transforms the traditional box offset regression problem into a grid point estimation problem. With the guidance of the grid points, it can obtain high-quality localization results. However, the speed of Grid R-CNN is not so satisfactory. In this technical report we present Grid R-CNN Plus, a better and faster version of Grid R-CNN. We have made several updates that significantly speed up the framework and simultaneously improve the accuracy. On COCO dataset, the Res50-FPN based Grid R-CNN Plus detector achieves an mAP of 40.4%, outperforming the baseline on the same model by 3.0 points with similar inference time. - -
- -
- -## Results and Models - -| Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :---------: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | 2x | 5.1 | 15.0 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130_221140.log.json) | -| R-101 | 2x | 7.0 | 12.6 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309-d6eca030.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309_164224.log.json) | -| X-101-32x4d | 2x | 8.3 | 10.8 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130-d8f0e3ff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130_215413.log.json) | -| X-101-64x4d | 2x | 11.3 | 7.7 | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204-ec76a754.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204_080641.log.json) | - -**Notes:** - -- All models are trained with 8 GPUs instead of 32 GPUs in the original paper. -- The warming up lasts for 1 epoch and `2x` here indicates 25 epochs. - -## Citation - -```latex -@inproceedings{lu2019grid, - title={Grid r-cnn}, - author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - year={2019} -} - -@article{lu2019grid, - title={Grid R-CNN Plus: Faster and Better}, - author={Lu, Xin and Li, Buyu and Yue, Yuxin and Li, Quanquan and Yan, Junjie}, - journal={arXiv preprint arXiv:1906.05688}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py b/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py deleted file mode 100644 index 1bb5889bc0ce4013ae3e6bf87d04f94417e84ff5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py deleted file mode 100644 index 4aa00ece55280697fc67bd727077a8c9a58cfa44..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = ['grid_rcnn_r50_fpn_gn-head_2x_coco.py'] -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -checkpoint_config = dict(interval=1) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py b/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py deleted file mode 100644 index df63cd5d82a3c622ffad6d044e80ebe5f7c8c122..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py +++ /dev/null @@ -1,131 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - type='GridRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='GridRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - with_reg=False, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False), - grid_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - grid_head=dict( - type='GridHead', - grid_points=9, - num_convs=8, - in_channels=256, - point_feat_channels=64, - norm_cfg=dict(type='GN', num_groups=36), - loss_grid=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_radius=1, - pos_weight=-1, - max_num_grid=192, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.03, - nms=dict(type='nms', iou_threshold=0.3), - max_per_img=100))) -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=3665, - warmup_ratio=1.0 / 80, - step=[17, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=25) diff --git a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py b/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py deleted file mode 100644 index 3bc8516e223e3f74b003b5566876706ee8398fb1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py +++ /dev/null @@ -1,24 +0,0 @@ -_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=3665, - warmup_ratio=1.0 / 80, - step=[17, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=25) diff --git a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py b/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py deleted file mode 100644 index c78f8f6501130a3e4f76269030b92f7f9e29fe07..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/grid_rcnn/metafile.yml b/cv/detection/co-detr/pytorch/configs/grid_rcnn/metafile.yml deleted file mode 100644 index d1aa85137df3c2d03ad98a5ea6f2990b4c78e15d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/grid_rcnn/metafile.yml +++ /dev/null @@ -1,101 +0,0 @@ -Collections: - - Name: Grid R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RPN - - Dilated Convolution - - ResNet - - RoIAlign - Paper: - URL: https://arxiv.org/abs/1906.05688 - Title: 'Grid R-CNN' - README: configs/grid_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/grid_rcnn.py#L6 - Version: v2.0.0 - -Models: - - Name: grid_rcnn_r50_fpn_gn-head_2x_coco - In Collection: Grid R-CNN - Config: configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py - Metadata: - Training Memory (GB): 5.1 - inference time (ms/im): - - value: 66.67 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco/grid_rcnn_r50_fpn_gn-head_2x_coco_20200130-6cca8223.pth - - - Name: grid_rcnn_r101_fpn_gn-head_2x_coco - In Collection: Grid R-CNN - Config: configs/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 79.37 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_r101_fpn_gn-head_2x_coco/grid_rcnn_r101_fpn_gn-head_2x_coco_20200309-d6eca030.pth - - - Name: grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco - In Collection: Grid R-CNN - Config: configs/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py - Metadata: - Training Memory (GB): 8.3 - inference time (ms/im): - - value: 92.59 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco_20200130-d8f0e3ff.pth - - - Name: grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco - In Collection: Grid R-CNN - Config: configs/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py - Metadata: - Training Memory (GB): 11.3 - inference time (ms/im): - - value: 129.87 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/grid_rcnn/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco/grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco_20200204-ec76a754.pth diff --git a/cv/detection/co-detr/pytorch/configs/groie/README.md b/cv/detection/co-detr/pytorch/configs/groie/README.md deleted file mode 100644 index 126773fecc6caad055d5c7dc64fcc47943665d5f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/groie/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# GRoIE - -> [A novel Region of Interest Extraction Layer for Instance Segmentation](https://arxiv.org/abs/2004.13665) - - - -## Abstract - -Given the wide diffusion of deep neural network architectures for computer vision tasks, several new applications are nowadays more and more feasible. Among them, a particular attention has been recently given to instance segmentation, by exploiting the results achievable by two-stage networks (such as Mask R-CNN or Faster R-CNN), derived from R-CNN. In these complex architectures, a crucial role is played by the Region of Interest (RoI) extraction layer, devoted to extracting a coherent subset of features from a single Feature Pyramid Network (FPN) layer attached on top of a backbone. -This paper is motivated by the need to overcome the limitations of existing RoI extractors which select only one (the best) layer from FPN. Our intuition is that all the layers of FPN retain useful information. Therefore, the proposed layer (called Generic RoI Extractor - GRoIE) introduces non-local building blocks and attention mechanisms to boost the performance. -A comprehensive ablation study at component level is conducted to find the best set of algorithms and parameters for the GRoIE layer. Moreover, GRoIE can be integrated seamlessly with every two-stage architecture for both object detection and instance segmentation tasks. Therefore, the improvements brought about by the use of GRoIE in different state-of-the-art architectures are also evaluated. The proposed layer leads up to gain a 1.1% AP improvement on bounding box detection and 1.7% AP improvement on instance segmentation. - -
- -
- -## Introduction - -By Leonardo Rossi, Akbar Karimi and Andrea Prati from -[IMPLab](http://implab.ce.unipr.it/). - -We provide configs to reproduce the results in the paper for -"*A novel Region of Interest Extraction Layer for Instance Segmentation*" -on COCO object detection. - -This paper is motivated by the need to overcome to the limitations of existing -RoI extractors which select only one (the best) layer from FPN. - -Our intuition is that all the layers of FPN retain useful information. - -Therefore, the proposed layer (called Generic RoI Extractor - **GRoIE**) -introduces non-local building blocks and attention mechanisms to boost the -performance. - -## Results and Models - -The results on COCO 2017 minival (5k images) are shown in the below table. - -### Application of GRoIE to different architectures - -| Backbone | Method | Lr schd | box AP | mask AP | Config | Download | -| :-------: | :-------------: | :-----: | :----: | :-----: | :---------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | Faster Original | 1x | 37.4 | | [config](../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | -| R-50-FPN | + GRoIE | 1x | 38.3 | | [config](./faster_rcnn_r50_fpn_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) | -| R-50-FPN | Grid R-CNN | 1x | 39.1 | | [config](./grid_rcnn_r50_fpn_gn-head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco/grid_rcnn_r50_fpn_gn-head_groie_1x_coco_20200605_202059-4b75d86f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco/grid_rcnn_r50_fpn_gn-head_groie_1x_coco_20200605_202059.log.json) | -| R-50-FPN | + GRoIE | 1x | | | [config](./grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py) | | -| R-50-FPN | Mask R-CNN | 1x | 38.2 | 34.7 | [config](../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | -| R-50-FPN | + GRoIE | 1x | 39.0 | 36.0 | [config](./mask_rcnn_r50_fpn_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715.log.json) | -| R-50-FPN | GC-Net | 1x | 40.7 | 36.5 | [config](../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202-50b90e5c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200202_085547.log.json) | -| R-50-FPN | + GRoIE | 1x | 41.0 | 37.8 | [config](./mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth) | -| R-101-FPN | GC-Net | 1x | 42.2 | 37.8 | [config](../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206-8407a3f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco_20200206_142508.log.json) | -| R-101-FPN | + GRoIE | 1x | 42.6 | 38.7 | [config](./mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507.log.json) | - -## Citation - -If you use this work or benchmark in your research, please cite this project. - -```latex -@inproceedings{rossi2021novel, - title={A novel region of interest extraction layer for instance segmentation}, - author={Rossi, Leonardo and Karimi, Akbar and Prati, Andrea}, - booktitle={2020 25th International Conference on Pattern Recognition (ICPR)}, - pages={2203--2209}, - year={2021}, - organization={IEEE} -} -``` - -## Contact - -The implementation of GRoIE is currently maintained by -[Leonardo Rossi](https://github.com/hachreak/). diff --git a/cv/detection/co-detr/pytorch/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py b/cv/detection/co-detr/pytorch/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py deleted file mode 100644 index 0fc528bfd49bfc9a262692db78a5f94b46c285af..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -# model settings -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='GenericRoIExtractor', - aggregation='sum', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)))) diff --git a/cv/detection/co-detr/pytorch/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py b/cv/detection/co-detr/pytorch/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py deleted file mode 100644 index 8e4b4ab23513a97adf4471ab3b33ca8abdb6dbe5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py +++ /dev/null @@ -1,45 +0,0 @@ -_base_ = '../grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py' -# model settings -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='GenericRoIExtractor', - aggregation='sum', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)), - grid_roi_extractor=dict( - type='GenericRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)))) diff --git a/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py b/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py deleted file mode 100644 index 8b83722197c69a51907f43bcb05883deedc37f0c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py +++ /dev/null @@ -1,45 +0,0 @@ -_base_ = '../gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py' -# model settings -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='GenericRoIExtractor', - aggregation='sum', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)), - mask_roi_extractor=dict( - type='GenericRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)))) diff --git a/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py b/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py deleted file mode 100644 index 81dfb4873bdb587626200a3007dc4d57a92c0fd9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py +++ /dev/null @@ -1,45 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -# model settings -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='GenericRoIExtractor', - aggregation='sum', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)), - mask_roi_extractor=dict( - type='GenericRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)))) diff --git a/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py b/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py deleted file mode 100644 index 852c5ca7c5c4ba04f6a5f7dd6dbaf6b2c357a2fa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py +++ /dev/null @@ -1,45 +0,0 @@ -_base_ = '../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py' -# model settings -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='GenericRoIExtractor', - aggregation='sum', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)), - mask_roi_extractor=dict( - type='GenericRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32], - pre_cfg=dict( - type='ConvModule', - in_channels=256, - out_channels=256, - kernel_size=5, - padding=2, - inplace=False, - ), - post_cfg=dict( - type='GeneralizedAttention', - in_channels=256, - spatial_range=-1, - num_heads=6, - attention_type='0100', - kv_stride=2)))) diff --git a/cv/detection/co-detr/pytorch/configs/groie/metafile.yml b/cv/detection/co-detr/pytorch/configs/groie/metafile.yml deleted file mode 100644 index badf53a7cd6413f66c5a7ac705ec04d38965124e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/groie/metafile.yml +++ /dev/null @@ -1,94 +0,0 @@ -Collections: - - Name: GRoIE - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Generic RoI Extractor - - FPN - - RPN - - ResNet - - RoIAlign - Paper: - URL: https://arxiv.org/abs/2004.13665 - Title: 'A novel Region of Interest Extraction Layer for Instance Segmentation' - README: configs/groie/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/roi_extractors/groie.py#L15 - Version: v2.1.0 - -Models: - - Name: faster_rcnn_r50_fpn_groie_1x_coco - In Collection: GRoIE - Config: configs/groie/faster_rcnn_r50_fpn_groie_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/faster_rcnn_r50_fpn_groie_1x_coco/faster_rcnn_r50_fpn_groie_1x_coco_20200604_211715-66ee9516.pth - - - Name: grid_rcnn_r50_fpn_gn-head_groie_1x_coco - In Collection: GRoIE - Config: configs/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/grid_rcnn_r50_fpn_gn-head_groie_1x_coco/grid_rcnn_r50_fpn_gn-head_groie_1x_coco_20200605_202059-4b75d86f.pth - - - Name: mask_rcnn_r50_fpn_groie_1x_coco - In Collection: GRoIE - Config: configs/groie/mask_rcnn_r50_fpn_groie_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_groie_1x_coco/mask_rcnn_r50_fpn_groie_1x_coco_20200604_211715-50d90c74.pth - - - Name: mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco - In Collection: GRoIE - Config: configs/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200604_211715-42eb79e1.pth - - - Name: mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco - In Collection: GRoIE - Config: configs/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/groie/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco_20200607_224507-8daae01c.pth diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/README.md b/cv/detection/co-detr/pytorch/configs/guided_anchoring/README.md deleted file mode 100644 index 563e43f0a53f8355036b3fdb2ba4243528ff1cc1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# Guided Anchoring - -> [Region Proposal by Guided Anchoring](https://arxiv.org/abs/1901.03278) - - - -## Abstract - -Region anchors are the cornerstone of modern object detection techniques. State-of-the-art detectors mostly rely on a dense anchoring scheme, where anchors are sampled uniformly over the spatial domain with a predefined set of scales and aspect ratios. In this paper, we revisit this foundational stage. Our study shows that it can be done much more effectively and efficiently. Specifically, we present an alternative scheme, named Guided Anchoring, which leverages semantic features to guide the anchoring. The proposed method jointly predicts the locations where the center of objects of interest are likely to exist as well as the scales and aspect ratios at different locations. On top of predicted anchor shapes, we mitigate the feature inconsistency with a feature adaption module. We also study the use of high-quality proposals to improve detection performance. The anchoring scheme can be seamlessly integrated into proposal methods and detectors. With Guided Anchoring, we achieve 9.1% higher recall on MS COCO with 90% fewer anchors than the RPN baseline. We also adopt Guided Anchoring in Fast R-CNN, Faster R-CNN and RetinaNet, respectively improving the detection mAP by 2.2%, 2.7% and 1.2%. - -
- -
- -## Results and Models - -The results on COCO 2017 val is shown in the below table. (results on test-dev are usually slightly higher than val). - -| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AR 1000 | Config | Download | -| :----: | :-------------: | :-----: | :-----: | :------: | :------------: | :-----: | :-----------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| GA-RPN | R-50-FPN | caffe | 1x | 5.3 | 15.8 | 68.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531-899008a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531_011819.log.json) | -| GA-RPN | R-101-FPN | caffe | 1x | 7.3 | 13.0 | 69.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531-ca9ba8fb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531_011812.log.json) | -| GA-RPN | X-101-32x4d-FPN | pytorch | 1x | 8.5 | 10.0 | 70.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220-c28d1b18.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220_221326.log.json) | -| GA-RPN | X-101-64x4d-FPN | pytorch | 1x | 7.1 | 7.5 | 71.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225-3c6e1aa2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225_152704.log.json) | - -| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------------: | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| GA-Faster RCNN | R-50-FPN | caffe | 1x | 5.5 | | 39.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718.log.json) | -| GA-Faster RCNN | R-101-FPN | caffe | 1x | 7.5 | | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_bbox_mAP-0.415_20200505_115528-fb82e499.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_20200505_115528.log.json) | -| GA-Faster RCNN | X-101-32x4d-FPN | pytorch | 1x | 8.7 | 9.7 | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215-1ded9da3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215_184547.log.json) | -| GA-Faster RCNN | X-101-64x4d-FPN | pytorch | 1x | 11.8 | 7.3 | 43.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215-0fa7bde7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215_104455.log.json) | -| GA-RetinaNet | R-50-FPN | caffe | 1x | 3.5 | 16.8 | 36.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020_225450.log.json) | -| GA-RetinaNet | R-101-FPN | caffe | 1x | 5.5 | 12.9 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531-6266453c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531_012847.log.json) | -| GA-RetinaNet | X-101-32x4d-FPN | pytorch | 1x | 6.9 | 10.6 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219-40c56caa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219_223025.log.json) | -| GA-RetinaNet | X-101-64x4d-FPN | pytorch | 1x | 9.9 | 7.7 | 41.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226-ef9f7f1f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226_221123.log.json) | - -- In the Guided Anchoring paper, `score_thr` is set to 0.001 in Fast/Faster RCNN and 0.05 in RetinaNet for both baselines and Guided Anchoring. - -- Performance on COCO test-dev benchmark are shown as follows. - -| Method | Backbone | Style | Lr schd | Aug Train | Score thr | AP | AP_50 | AP_75 | AP_small | AP_medium | AP_large | Download | -| :------------: | :-------: | :---: | :-----: | :-------: | :-------: | :-: | :---: | :---: | :------: | :-------: | :------: | :------: | -| GA-Faster RCNN | R-101-FPN | caffe | 1x | F | 0.05 | | | | | | | | -| GA-Faster RCNN | R-101-FPN | caffe | 1x | F | 0.001 | | | | | | | | -| GA-RetinaNet | R-101-FPN | caffe | 1x | F | 0.05 | | | | | | | | -| GA-RetinaNet | R-101-FPN | caffe | 2x | T | 0.05 | | | | | | | | - -## Citation - -We provide config files to reproduce the results in the CVPR 2019 paper for [Region Proposal by Guided Anchoring](https://arxiv.org/abs/1901.03278). - -```latex -@inproceedings{wang2019region, - title={Region Proposal by Guided Anchoring}, - author={Jiaqi Wang and Kai Chen and Shuo Yang and Chen Change Loy and Dahua Lin}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 8fc203c6ed2b31b4672ae4525c65afbcdc6579ed..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_fast_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,65 +0,0 @@ -_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - roi_head=dict( - bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), - # model training and testing settings - train_cfg=dict( - rcnn=dict( - assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), - sampler=dict(num=256))), - test_cfg=dict(rcnn=dict(score_thr=1e-3))) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=300), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadProposals', num_max_proposals=None), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img', 'proposals']), - ]) -] -data = dict( - train=dict( - proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl', - pipeline=train_pipeline), - val=dict( - proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', - pipeline=test_pipeline), - test=dict( - proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl', - pipeline=test_pipeline)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index a40e7c6fd7e2355081e7a31b40a893314e4eb303..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './ga_faster_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index b0add92c398b62aa8fd2141f595cf0941f55d421..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,65 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - rpn_head=dict( - _delete_=True, - type='GARPNHead', - in_channels=256, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=8, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[8], - strides=[4, 8, 16, 32, 64]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.14, 0.14]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.11, 0.11]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), - roi_head=dict( - bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - center_ratio=0.2, - ignore_ratio=0.5), - rpn_proposal=dict(nms_post=1000, max_per_img=300), - rcnn=dict( - assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), - sampler=dict(type='RandomSampler', num=256))), - test_cfg=dict( - rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py deleted file mode 100644 index e3d8238956f4d4874de1fde662a1a3ded1918189..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_r50_fpn_1x_coco.py +++ /dev/null @@ -1,65 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - rpn_head=dict( - _delete_=True, - type='GARPNHead', - in_channels=256, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=8, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[8], - strides=[4, 8, 16, 32, 64]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.14, 0.14]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.11, 0.11]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), - roi_head=dict( - bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - center_ratio=0.2, - ignore_ratio=0.5), - rpn_proposal=dict(nms_post=1000, max_per_img=300), - rcnn=dict( - assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), - sampler=dict(type='RandomSampler', num=256))), - test_cfg=dict( - rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index f1dda9495c2595b2743e3056abf65a1795ea5971..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './ga_faster_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index fb9e2afc9cff8c8c94b2ace544785a026a61f45e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './ga_faster_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 1b1cccd0dd15123c35044367001e465b691f6f24..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './ga_retinanet_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py deleted file mode 100644 index 260895b401106c91a6133a054260ab94e92c75c5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_mstrain_2x.py +++ /dev/null @@ -1,169 +0,0 @@ -_base_ = '../_base_/default_runtime.py' - -# model settings -model = dict( - type='RetinaNet', - backbone=dict( - type='ResNet', - depth=101, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs=True, - num_outs=5), - bbox_head=dict( - type='GARetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0))) -# training and testing settings -train_cfg = dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.4, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - center_ratio=0.2, - ignore_ratio=0.5, - debug=False) -test_cfg = dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 480), (1333, 960)], - keep_ratio=True, - multiscale_mode='range'), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[16, 22]) -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 33512011abb612ff5c762e75ee4492b382902fa4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py' -model = dict( - bbox_head=dict( - _delete_=True, - type='GARetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.4, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), - center_ratio=0.2, - ignore_ratio=0.5)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py deleted file mode 100644 index 769472352d06a8f2c30d73ae1f57c393f77adfa2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' -model = dict( - bbox_head=dict( - _delete_=True, - type='GARetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.4, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0), - center_ratio=0.2, - ignore_ratio=0.5)) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index c5eb34f5fa2d1061c7eb4a3adfb8b7e1ede51b55..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './ga_retinanet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 5c69a6f848f278b0b81082a8f38b01e154db0e84..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './ga_retinanet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 039703ec6635f6665be16919baf157511c7b3431..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = './ga_rpn_r50_caffe_fpn_1x_coco.py' -# model settings -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 7830894af1b5824d9ff442f6aa90f6e68c9ef29c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,58 +0,0 @@ -_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py' -model = dict( - rpn_head=dict( - _delete_=True, - type='GARPNHead', - in_channels=256, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=8, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[8], - strides=[4, 8, 16, 32, 64]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.14, 0.14]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.11, 0.11]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), - # model training and testing settings - train_cfg=dict( - rpn=dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - center_ratio=0.2, - ignore_ratio=0.5)), - test_cfg=dict(rpn=dict(nms_post=1000))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r50_fpn_1x_coco.py deleted file mode 100644 index 27ab3e733bda1fb1c7c50cbd0f26597650b4c2e7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,58 +0,0 @@ -_base_ = '../rpn/rpn_r50_fpn_1x_coco.py' -model = dict( - rpn_head=dict( - _delete_=True, - type='GARPNHead', - in_channels=256, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=8, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[8], - strides=[4, 8, 16, 32, 64]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.14, 0.14]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.11, 0.11]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), - # model training and testing settings - train_cfg=dict( - rpn=dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - center_ratio=0.2, - ignore_ratio=0.5)), - test_cfg=dict(rpn=dict(nms_post=1000))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index cccc985f9eb2c3e9c06f91af6107ec909aefd9d1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './ga_rpn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 4e134d23ad428eaca19bc8069325a9545683cd8e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './ga_rpn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/guided_anchoring/metafile.yml b/cv/detection/co-detr/pytorch/configs/guided_anchoring/metafile.yml deleted file mode 100644 index f39d1838e718b57ae991a954300296af86066eba..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/guided_anchoring/metafile.yml +++ /dev/null @@ -1,246 +0,0 @@ -Collections: - - Name: Guided Anchoring - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - Guided Anchoring - - ResNet - Paper: - URL: https://arxiv.org/abs/1901.03278 - Title: 'Region Proposal by Guided Anchoring' - README: configs/guided_anchoring/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/dense_heads/ga_retina_head.py#L10 - Version: v2.0.0 - -Models: - - Name: ga_rpn_r50_caffe_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.3 - inference time (ms/im): - - value: 63.29 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Region Proposal - Dataset: COCO - Metrics: - AR@1000: 68.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r50_caffe_fpn_1x_coco/ga_rpn_r50_caffe_fpn_1x_coco_20200531-899008a6.pth - - - Name: ga_rpn_r101_caffe_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.3 - inference time (ms/im): - - value: 76.92 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Region Proposal - Dataset: COCO - Metrics: - AR@1000: 69.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_r101_caffe_fpn_1x_coco/ga_rpn_r101_caffe_fpn_1x_coco_20200531-ca9ba8fb.pth - - - Name: ga_rpn_x101_32x4d_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 8.5 - inference time (ms/im): - - value: 100 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Region Proposal - Dataset: COCO - Metrics: - AR@1000: 70.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco/ga_rpn_x101_32x4d_fpn_1x_coco_20200220-c28d1b18.pth - - - Name: ga_rpn_x101_64x4d_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.1 - inference time (ms/im): - - value: 133.33 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Region Proposal - Dataset: COCO - Metrics: - AR@1000: 70.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_rpn_x101_64x4d_fpn_1x_coco/ga_rpn_x101_64x4d_fpn_1x_coco_20200225-3c6e1aa2.pth - - - Name: ga_faster_r50_caffe_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.5 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco/ga_faster_r50_caffe_fpn_1x_coco_20200702_000718-a11ccfe6.pth - - - Name: ga_faster_r101_caffe_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.5 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_r101_caffe_fpn_1x_coco/ga_faster_r101_caffe_fpn_1x_coco_bbox_mAP-0.415_20200505_115528-fb82e499.pth - - - Name: ga_faster_x101_32x4d_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 8.7 - inference time (ms/im): - - value: 103.09 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_32x4d_fpn_1x_coco/ga_faster_x101_32x4d_fpn_1x_coco_20200215-1ded9da3.pth - - - Name: ga_faster_x101_64x4d_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 11.8 - inference time (ms/im): - - value: 136.99 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco/ga_faster_x101_64x4d_fpn_1x_coco_20200215-0fa7bde7.pth - - - Name: ga_retinanet_r50_caffe_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.5 - inference time (ms/im): - - value: 59.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r50_caffe_fpn_1x_coco/ga_retinanet_r50_caffe_fpn_1x_coco_20201020-39581c6f.pth - - - Name: ga_retinanet_r101_caffe_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.5 - inference time (ms/im): - - value: 77.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_r101_caffe_fpn_1x_coco/ga_retinanet_r101_caffe_fpn_1x_coco_20200531-6266453c.pth - - - Name: ga_retinanet_x101_32x4d_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.9 - inference time (ms/im): - - value: 94.34 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_32x4d_fpn_1x_coco/ga_retinanet_x101_32x4d_fpn_1x_coco_20200219-40c56caa.pth - - - Name: ga_retinanet_x101_64x4d_fpn_1x_coco - In Collection: Guided Anchoring - Config: configs/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 9.9 - inference time (ms/im): - - value: 129.87 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/guided_anchoring/ga_retinanet_x101_64x4d_fpn_1x_coco/ga_retinanet_x101_64x4d_fpn_1x_coco_20200226-ef9f7f1f.pth diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/README.md b/cv/detection/co-detr/pytorch/configs/hrnet/README.md deleted file mode 100644 index e340c784e1c4d4fc0fa318a2f10043d926a3da0d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# HRNet - -> [Deep High-Resolution Representation Learning for Human Pose Estimation](https://arxiv.org/abs/1902.09212) - - - -## Abstract - -This is an official pytorch implementation of Deep High-Resolution Representation Learning for Human Pose Estimation. In this work, we are interested in the human pose estimation problem with a focus on learning reliable high-resolution representations. Most existing methods recover high-resolution representations from low-resolution representations produced by a high-to-low resolution network. Instead, our proposed network maintains high-resolution representations through the whole process. We start from a high-resolution subnetwork as the first stage, gradually add high-to-low resolution subnetworks one by one to form more stages, and connect the mutli-resolution subnetworks in parallel. We conduct repeated multi-scale fusions such that each of the high-to-low resolution representations receives information from other parallel representations over and over, leading to rich high-resolution representations. As a result, the predicted keypoint heatmap is potentially more accurate and spatially more precise. We empirically demonstrate the effectiveness of our network through the superior pose estimation results over two benchmark datasets: the COCO keypoint detection dataset and the MPII Human Pose dataset. - -High-resolution representation learning plays an essential role in many vision problems, e.g., pose estimation and semantic segmentation. The high-resolution network (HRNet), recently developed for human pose estimation, maintains high-resolution representations through the whole process by connecting high-to-low resolution convolutions in parallel and produces strong high-resolution representations by repeatedly conducting fusions across parallel convolutions. -In this paper, we conduct a further study on high-resolution representations by introducing a simple yet effective modification and apply it to a wide range of vision tasks. We augment the high-resolution representation by aggregating the (upsampled) representations from all the parallel convolutions rather than only the representation from the high-resolution convolution as done in HRNet. This simple modification leads to stronger representations, evidenced by superior results. We show top results in semantic segmentation on Cityscapes, LIP, and PASCAL Context, and facial landmark detection on AFLW, COFW, 300W, and WFLW. In addition, we build a multi-level representation from the high-resolution representation and apply it to the Faster R-CNN object detection framework and the extended frameworks. The proposed approach achieves superior results to existing single-model networks on COCO object detection. - -
- -
- -## Results and Models - -### Faster R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| HRNetV2p-W18 | pytorch | 1x | 6.6 | 13.4 | 36.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130_211246.log.json) | -| HRNetV2p-W18 | pytorch | 2x | 6.6 | - | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731.log.json) | -| HRNetV2p-W32 | pytorch | 1x | 9.0 | 12.4 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130_204442.log.json) | -| HRNetV2p-W32 | pytorch | 2x | 9.0 | - | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927.log.json) | -| HRNetV2p-W40 | pytorch | 1x | 10.4 | 10.5 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210_125315.log.json) | -| HRNetV2p-W40 | pytorch | 2x | 10.4 | - | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033.log.json) | - -### Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| HRNetV2p-W18 | pytorch | 1x | 7.0 | 11.7 | 37.7 | 34.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205_232523.log.json) | -| HRNetV2p-W18 | pytorch | 2x | 7.0 | - | 39.8 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212_134222.log.json) | -| HRNetV2p-W32 | pytorch | 1x | 9.4 | 11.3 | 41.2 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207_055017.log.json) | -| HRNetV2p-W32 | pytorch | 2x | 9.4 | - | 42.5 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213_150518.log.json) | -| HRNetV2p-W40 | pytorch | 1x | 10.9 | | 42.1 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646.log.json) | -| HRNetV2p-W40 | pytorch | 2x | 10.9 | | 42.8 | 38.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732.log.json) | - -### Cascade R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| HRNetV2p-W18 | pytorch | 20e | 7.0 | 11.0 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210_105632.log.json) | -| HRNetV2p-W32 | pytorch | 20e | 9.4 | 11.0 | 43.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208_160511.log.json) | -| HRNetV2p-W40 | pytorch | 20e | 10.8 | | 43.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112.log.json) | - -### Cascade Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| HRNetV2p-W18 | pytorch | 20e | 8.5 | 8.5 | 41.6 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210_093149.log.json) | -| HRNetV2p-W32 | pytorch | 20e | | 8.3 | 44.3 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043.log.json) | -| HRNetV2p-W40 | pytorch | 20e | 12.5 | | 45.1 | 39.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922.log.json) | - -### Hybrid Task Cascade (HTC) - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| HRNetV2p-W18 | pytorch | 20e | 10.8 | 4.7 | 42.8 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210_182735.log.json) | -| HRNetV2p-W32 | pytorch | 20e | 13.1 | 4.9 | 45.4 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207_193153.log.json) | -| HRNetV2p-W40 | pytorch | 20e | 14.6 | | 46.4 | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411.log.json) | - -### FCOS - -| Backbone | Style | GN | MS train | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :----------: | :-----: | :-: | :------: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| HRNetV2p-W18 | pytorch | Y | N | 1x | 13.0 | 12.9 | 35.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710.log.json) | -| HRNetV2p-W18 | pytorch | Y | N | 2x | 13.0 | - | 38.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110.log.json) | -| HRNetV2p-W32 | pytorch | Y | N | 1x | 17.5 | 12.9 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730.log.json) | -| HRNetV2p-W32 | pytorch | Y | N | 2x | 17.5 | - | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133.log.json) | -| HRNetV2p-W18 | pytorch | Y | Y | 2x | 13.0 | 12.9 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651.log.json) | -| HRNetV2p-W32 | pytorch | Y | Y | 2x | 17.5 | 12.4 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846.log.json) | -| HRNetV2p-W48 | pytorch | Y | Y | 2x | 20.3 | 10.8 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752.log.json) | - -**Note:** - -- The `28e` schedule in HTC indicates decreasing the lr at 24 and 27 epochs, with a total of 28 epochs. -- HRNetV2 ImageNet pretrained models are in [HRNets for Image Classification](https://github.com/HRNet/HRNet-Image-Classification). - -## Citation - -```latex -@inproceedings{SunXLW19, - title={Deep High-Resolution Representation Learning for Human Pose Estimation}, - author={Ke Sun and Bin Xiao and Dong Liu and Jingdong Wang}, - booktitle={CVPR}, - year={2019} -} - -@article{SunZJCXLMWLW19, - title={High-Resolution Representations for Labeling Pixels and Regions}, - author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao - and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, - journal = {CoRR}, - volume = {abs/1904.04514}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py deleted file mode 100644 index 839cf3eb62590368ab0e99efdadcbdd4ad81eeb5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' -# model settings -model = dict( - backbone=dict( - extra=dict( - stage2=dict(num_channels=(18, 36)), - stage3=dict(num_channels=(18, 36, 72)), - stage4=dict(num_channels=(18, 36, 72, 144))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py deleted file mode 100644 index 9942602762d8eba5d4c3ad20f2190fdb9f1df906..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py +++ /dev/null @@ -1,40 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - _delete_=True, - type='HRNet', - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), - neck=dict( - _delete_=True, - type='HRFPN', - in_channels=[32, 64, 128, 256], - out_channels=256)) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py deleted file mode 100644 index 10d5e83c67ebfb7f3017abc164d9559681185268..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = './cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py' -# model settings -model = dict( - backbone=dict( - type='HRNet', - extra=dict( - stage2=dict(num_channels=(40, 80)), - stage3=dict(num_channels=(40, 80, 160)), - stage4=dict(num_channels=(40, 80, 160, 320))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), - neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py deleted file mode 100644 index ebd5e202d955e87870b3cf8efd94683668dd5929..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' -# model settings -model = dict( - backbone=dict( - extra=dict( - stage2=dict(num_channels=(18, 36)), - stage3=dict(num_channels=(18, 36, 72)), - stage4=dict(num_channels=(18, 36, 72, 144))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py deleted file mode 100644 index e7f89a9edae81d02a2229229b1c66cf50a9282e0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py +++ /dev/null @@ -1,40 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - _delete_=True, - type='HRNet', - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), - neck=dict( - _delete_=True, - type='HRFPN', - in_channels=[32, 64, 128, 256], - out_channels=256)) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py deleted file mode 100644 index 265e8d636f53f448f59372074c9bbe590cb26d9a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py' -# model settings -model = dict( - backbone=dict( - type='HRNet', - extra=dict( - stage2=dict(num_channels=(40, 80)), - stage3=dict(num_channels=(40, 80, 160)), - stage4=dict(num_channels=(40, 80, 160, 320))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), - neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py deleted file mode 100644 index 1df2c3db1c00a6c0c34f96bc71cf35bfc0e0fbe6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' -# model settings -model = dict( - backbone=dict( - extra=dict( - stage2=dict(num_channels=(18, 36)), - stage3=dict(num_channels=(18, 36, 72)), - stage4=dict(num_channels=(18, 36, 72, 144))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py deleted file mode 100644 index a4b987a19ae32453d524fc2f7a4fb6b6b87f1f32..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './faster_rcnn_hrnetv2p_w18_1x_coco.py' - -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py deleted file mode 100644 index be058099a4c59b06ec5598ea25d194163e45601a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - _delete_=True, - type='HRNet', - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), - neck=dict( - _delete_=True, - type='HRFPN', - in_channels=[32, 64, 128, 256], - out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py deleted file mode 100644 index 63c8717182f2284ff1062be31bae43b4360c6887..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py deleted file mode 100644 index 886a7c90a453e684b3c0646b2eb3dea903671358..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py' -model = dict( - backbone=dict( - type='HRNet', - extra=dict( - stage2=dict(num_channels=(40, 80)), - stage3=dict(num_channels=(40, 80, 160)), - stage4=dict(num_channels=(40, 80, 160, 320))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), - neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py deleted file mode 100644 index 585cc2c332fd88a9f0164b14084d45d7a3783b11..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py deleted file mode 100644 index fd662bd10e3eb84fccbda080d9c902084f2fb490..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' -model = dict( - backbone=dict( - extra=dict( - stage2=dict(num_channels=(18, 36)), - stage3=dict(num_channels=(18, 36, 72)), - stage4=dict(num_channels=(18, 36, 72, 144))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py deleted file mode 100644 index 34975959f27f0ef8b985ab7d2857c7f2d70e47ae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py deleted file mode 100644 index 37bfdae98f177914cbaa99d5b117c7928b6f84dd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' -model = dict( - backbone=dict( - extra=dict( - stage2=dict(num_channels=(18, 36)), - stage3=dict(num_channels=(18, 36, 72)), - stage4=dict(num_channels=(18, 36, 72, 144))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py deleted file mode 100644 index 10617f24c46f8dee164f06babecb00ae5d289466..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py +++ /dev/null @@ -1,70 +0,0 @@ -_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' -model = dict( - backbone=dict( - _delete_=True, - type='HRNet', - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), - neck=dict( - _delete_=True, - type='HRFPN', - in_channels=[32, 64, 128, 256], - out_channels=256, - stride=2, - num_outs=5)) -img_norm_cfg = dict( - mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py deleted file mode 100644 index 7b3813071c7591caa72412e5622e4101f7c05920..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py deleted file mode 100644 index 482f88729ff6c08e482a5ca5c6d48b75f14f7ca8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py' -img_norm_cfg = dict( - mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py deleted file mode 100644 index 0ae9dbe3aca8d9d6e0af785dd60131909f420a89..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py' -model = dict( - backbone=dict( - type='HRNet', - extra=dict( - stage2=dict(num_channels=(40, 80)), - stage3=dict(num_channels=(40, 80, 160)), - stage4=dict(num_channels=(40, 80, 160, 320))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), - neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py deleted file mode 100644 index 3c2eb1dd4e08830d0e57ecfe321f0353c8bf6cb1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w18_20e_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './htc_hrnetv2p_w32_20e_coco.py' -model = dict( - backbone=dict( - extra=dict( - stage2=dict(num_channels=(18, 36)), - stage3=dict(num_channels=(18, 36, 72)), - stage4=dict(num_channels=(18, 36, 72, 144))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py deleted file mode 100644 index 545cb83eaca50f9d5de1fa6b3f3e569faab7d5f2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w32_20e_coco.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - _delete_=True, - type='HRNet', - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), - neck=dict( - _delete_=True, - type='HRFPN', - in_channels=[32, 64, 128, 256], - out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py deleted file mode 100644 index 94bff1bc01c09a98579f469dcac19df27cfc60b9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w40_20e_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './htc_hrnetv2p_w32_20e_coco.py' -model = dict( - backbone=dict( - type='HRNet', - extra=dict( - stage2=dict(num_channels=(40, 80)), - stage3=dict(num_channels=(40, 80, 160)), - stage4=dict(num_channels=(40, 80, 160, 320))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), - neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py deleted file mode 100644 index 7067e8b602efb4f61549d376ec393e89deee8c3e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/htc_hrnetv2p_w40_28e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './htc_hrnetv2p_w40_20e_coco.py' -# learning policy -lr_config = dict(step=[24, 27]) -runner = dict(type='EpochBasedRunner', max_epochs=28) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py deleted file mode 100644 index 815f2857f99791232664ecc9e82ea860fdcaa268..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/htc_x101_64x4d_fpn_16x1_28e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py' -# learning policy -lr_config = dict(step=[24, 27]) -runner = dict(type='EpochBasedRunner', max_epochs=28) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py deleted file mode 100644 index cb12200edb5fe0a31b0cba8966e858ad06024b7c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' -model = dict( - backbone=dict( - extra=dict( - stage2=dict(num_channels=(18, 36)), - stage3=dict(num_channels=(18, 36, 72)), - stage4=dict(num_channels=(18, 36, 72, 144))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')), - neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py deleted file mode 100644 index ca62682a3b2d328cc9a8fd08887bcc1bac53104d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py deleted file mode 100644 index d5f0eb56b7e97bc764b98a2b88a277a69633caa6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - _delete_=True, - type='HRNet', - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(32, 64)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(32, 64, 128)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(32, 64, 128, 256))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')), - neck=dict( - _delete_=True, - type='HRFPN', - in_channels=[32, 64, 128, 256], - out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py deleted file mode 100644 index 63d5d139e7b56843f5dcc85bda48945d56cfc49e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py deleted file mode 100644 index 5a76f4b056367f0cc69b5fc601ae5cdb1ac98cf8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py' -model = dict( - backbone=dict( - type='HRNet', - extra=dict( - stage2=dict(num_channels=(40, 80)), - stage3=dict(num_channels=(40, 80, 160)), - stage4=dict(num_channels=(40, 80, 160, 320))), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')), - neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py b/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py deleted file mode 100644 index 3a2a510689308e556af803968a641dcf2594fe77..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/hrnet/metafile.yml b/cv/detection/co-detr/pytorch/configs/hrnet/metafile.yml deleted file mode 100644 index ac36efa9f039e4ad8cfe65c4f5de88d6caac398e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/hrnet/metafile.yml +++ /dev/null @@ -1,971 +0,0 @@ -Models: - - Name: faster_rcnn_hrnetv2p_w18_1x_coco - In Collection: Faster R-CNN - Config: configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.py - Metadata: - Training Memory (GB): 6.6 - inference time (ms/im): - - value: 74.63 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco/faster_rcnn_hrnetv2p_w18_1x_coco_20200130-56651a6d.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: faster_rcnn_hrnetv2p_w18_2x_coco - In Collection: Faster R-CNN - Config: configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.py - Metadata: - Training Memory (GB): 6.6 - inference time (ms/im): - - value: 74.63 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco/faster_rcnn_hrnetv2p_w18_2x_coco_20200702_085731-a4ec0611.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: faster_rcnn_hrnetv2p_w32_1x_coco - In Collection: Faster R-CNN - Config: configs/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco.py - Metadata: - Training Memory (GB): 9.0 - inference time (ms/im): - - value: 80.65 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_1x_coco/faster_rcnn_hrnetv2p_w32_1x_coco_20200130-6e286425.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: faster_rcnn_hrnetv2p_w32_2x_coco - In Collection: Faster R-CNN - Config: configs/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco.py - Metadata: - Training Memory (GB): 9.0 - inference time (ms/im): - - value: 80.65 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w32_2x_coco/faster_rcnn_hrnetv2p_w32_2x_coco_20200529_015927-976a9c15.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: faster_rcnn_hrnetv2p_w40_1x_coco - In Collection: Faster R-CNN - Config: configs/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco.py - Metadata: - Training Memory (GB): 10.4 - inference time (ms/im): - - value: 95.24 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_1x_coco/faster_rcnn_hrnetv2p_w40_1x_coco_20200210-95c1f5ce.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: faster_rcnn_hrnetv2p_w40_2x_coco - In Collection: Faster R-CNN - Config: configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py - Metadata: - Training Memory (GB): 10.4 - inference time (ms/im): - - value: 95.24 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco/faster_rcnn_hrnetv2p_w40_2x_coco_20200512_161033-0f236ef4.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: mask_rcnn_hrnetv2p_w18_1x_coco - In Collection: Mask R-CNN - Config: configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 85.47 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 34.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco/mask_rcnn_hrnetv2p_w18_1x_coco_20200205-1c3d78ed.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: mask_rcnn_hrnetv2p_w18_2x_coco - In Collection: Mask R-CNN - Config: configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 85.47 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco/mask_rcnn_hrnetv2p_w18_2x_coco_20200212-b3c825b1.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: mask_rcnn_hrnetv2p_w32_1x_coco - In Collection: Mask R-CNN - Config: configs/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco.py - Metadata: - Training Memory (GB): 9.4 - inference time (ms/im): - - value: 88.5 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_1x_coco/mask_rcnn_hrnetv2p_w32_1x_coco_20200207-b29f616e.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: mask_rcnn_hrnetv2p_w32_2x_coco - In Collection: Mask R-CNN - Config: configs/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco.py - Metadata: - Training Memory (GB): 9.4 - inference time (ms/im): - - value: 88.5 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w32_2x_coco/mask_rcnn_hrnetv2p_w32_2x_coco_20200213-45b75b4d.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: mask_rcnn_hrnetv2p_w40_1x_coco - In Collection: Mask R-CNN - Config: configs/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco.py - Metadata: - Training Memory (GB): 10.9 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_1x_coco/mask_rcnn_hrnetv2p_w40_1x_coco_20200511_015646-66738b35.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: mask_rcnn_hrnetv2p_w40_2x_coco - In Collection: Mask R-CNN - Config: configs/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco.py - Metadata: - Training Memory (GB): 10.9 - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/mask_rcnn_hrnetv2p_w40_2x_coco/mask_rcnn_hrnetv2p_w40_2x_coco_20200512_163732-aed5e4ab.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: cascade_rcnn_hrnetv2p_w18_20e_coco - In Collection: Cascade R-CNN - Config: configs/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 90.91 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w18_20e_coco/cascade_rcnn_hrnetv2p_w18_20e_coco_20200210-434be9d7.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: cascade_rcnn_hrnetv2p_w32_20e_coco - In Collection: Cascade R-CNN - Config: configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py - Metadata: - Training Memory (GB): 9.4 - inference time (ms/im): - - value: 90.91 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco/cascade_rcnn_hrnetv2p_w32_20e_coco_20200208-928455a4.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: cascade_rcnn_hrnetv2p_w40_20e_coco - In Collection: Cascade R-CNN - Config: configs/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco.py - Metadata: - Training Memory (GB): 10.8 - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_rcnn_hrnetv2p_w40_20e_coco/cascade_rcnn_hrnetv2p_w40_20e_coco_20200512_161112-75e47b04.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: cascade_mask_rcnn_hrnetv2p_w18_20e_coco - In Collection: Cascade R-CNN - Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py - Metadata: - Training Memory (GB): 8.5 - inference time (ms/im): - - value: 117.65 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w18_20e_coco/cascade_mask_rcnn_hrnetv2p_w18_20e_coco_20200210-b543cd2b.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: cascade_mask_rcnn_hrnetv2p_w32_20e_coco - In Collection: Cascade R-CNN - Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py - Metadata: - inference time (ms/im): - - value: 120.48 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco/cascade_mask_rcnn_hrnetv2p_w32_20e_coco_20200512_154043-39d9cf7b.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: cascade_mask_rcnn_hrnetv2p_w40_20e_coco - In Collection: Cascade R-CNN - Config: configs/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py - Metadata: - Training Memory (GB): 12.5 - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/cascade_mask_rcnn_hrnetv2p_w40_20e_coco/cascade_mask_rcnn_hrnetv2p_w40_20e_coco_20200527_204922-969c4610.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: htc_hrnetv2p_w18_20e_coco - In Collection: HTC - Config: configs/hrnet/htc_hrnetv2p_w18_20e_coco.py - Metadata: - Training Memory (GB): 10.8 - inference time (ms/im): - - value: 212.77 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w18_20e_coco/htc_hrnetv2p_w18_20e_coco_20200210-b266988c.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: htc_hrnetv2p_w32_20e_coco - In Collection: HTC - Config: configs/hrnet/htc_hrnetv2p_w32_20e_coco.py - Metadata: - Training Memory (GB): 13.1 - inference time (ms/im): - - value: 204.08 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w32_20e_coco/htc_hrnetv2p_w32_20e_coco_20200207-7639fa12.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: htc_hrnetv2p_w40_20e_coco - In Collection: HTC - Config: configs/hrnet/htc_hrnetv2p_w40_20e_coco.py - Metadata: - Training Memory (GB): 14.6 - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/htc_hrnetv2p_w40_20e_coco/htc_hrnetv2p_w40_20e_coco_20200529_183411-417c4d5b.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: fcos_hrnetv2p_w18_gn-head_4x4_1x_coco - In Collection: FCOS - Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py - Metadata: - Training Resources: 4x V100 GPUs - Batch Size: 16 - Training Memory (GB): 13.0 - inference time (ms/im): - - value: 77.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 35.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco/fcos_hrnetv2p_w18_gn-head_4x4_1x_coco_20201212_100710-4ad151de.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: fcos_hrnetv2p_w18_gn-head_4x4_2x_coco - In Collection: FCOS - Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py - Metadata: - Training Resources: 4x V100 GPUs - Batch Size: 16 - Training Memory (GB): 13.0 - inference time (ms/im): - - value: 77.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_4x4_2x_coco_20201212_101110-5c575fa5.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: fcos_hrnetv2p_w32_gn-head_4x4_1x_coco - In Collection: FCOS - Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py - Metadata: - Training Resources: 4x V100 GPUs - Batch Size: 16 - Training Memory (GB): 17.5 - inference time (ms/im): - - value: 77.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco_20201211_134730-cb8055c0.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: fcos_hrnetv2p_w32_gn-head_4x4_2x_coco - In Collection: FCOS - Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py - Metadata: - Training Resources: 4x V100 GPUs - Batch Size: 16 - Training Memory (GB): 17.5 - inference time (ms/im): - - value: 77.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_4x4_2x_coco_20201212_112133-77b6b9bb.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco - In Collection: FCOS - Config: configs/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py - Metadata: - Training Resources: 4x V100 GPUs - Batch Size: 16 - Training Memory (GB): 13.0 - inference time (ms/im): - - value: 77.52 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco_20201212_111651-441e9d9f.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco - In Collection: FCOS - Config: configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py - Metadata: - Training Resources: 4x V100 GPUs - Batch Size: 16 - Training Memory (GB): 17.5 - inference time (ms/im): - - value: 80.65 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco_20201212_090846-b6f2b49f.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 - - - Name: fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco - In Collection: FCOS - Config: configs/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py - Metadata: - Training Resources: 4x V100 GPUs - Batch Size: 16 - Training Memory (GB): 20.3 - inference time (ms/im): - - value: 92.59 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Architecture: - - HRNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/hrnet/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco/fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco_20201212_124752-f22d2ce5.pth - Paper: - URL: https://arxiv.org/abs/1904.04514 - Title: 'Deep High-Resolution Representation Learning for Visual Recognition' - README: configs/hrnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/backbones/hrnet.py#L195 - Version: v2.0.0 diff --git a/cv/detection/co-detr/pytorch/configs/htc/README.md b/cv/detection/co-detr/pytorch/configs/htc/README.md deleted file mode 100644 index 747f8f608303ecdce610f95ad4ecbca9bdedcd44..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# HTC - -> [Hybrid Task Cascade for Instance Segmentation](https://arxiv.org/abs/1901.07518) - - - -## Abstract - -Cascade is a classic yet powerful architecture that has boosted performance on various tasks. However, how to introduce cascade to instance segmentation remains an open question. A simple combination of Cascade R-CNN and Mask R-CNN only brings limited gain. In exploring a more effective approach, we find that the key to a successful instance segmentation cascade is to fully leverage the reciprocal relationship between detection and segmentation. In this work, we propose a new framework, Hybrid Task Cascade (HTC), which differs in two important aspects: (1) instead of performing cascaded refinement on these two tasks separately, it interweaves them for a joint multi-stage processing; (2) it adopts a fully convolutional branch to provide spatial context, which can help distinguishing hard foreground from cluttered background. Overall, this framework can learn more discriminative features progressively while integrating complementary features together in each stage. Without bells and whistles, a single HTC obtains 38.4 and 1.5 improvement over a strong Cascade Mask R-CNN baseline on MSCOCO dataset. Moreover, our overall system achieves 48.6 mask AP on the test-challenge split, ranking 1st in the COCO 2018 Challenge Object Detection Task. - -
- -
- -## Introduction - -HTC requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. -The directory should be like this. - -```none -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── coco -│ │ ├── annotations -│ │ ├── train2017 -│ │ ├── val2017 -│ │ ├── test2017 -| | ├── stuffthingmaps -``` - -## Results and Models - -The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 8.2 | 5.8 | 42.3 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317_070435.log.json) | -| R-50-FPN | pytorch | 20e | 8.2 | - | 43.3 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319_070313.log.json) | -| R-101-FPN | pytorch | 20e | 10.2 | 5.5 | 44.8 | 39.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317_153107.log.json) | -| X-101-32x4d-FPN | pytorch | 20e | 11.4 | 5.0 | 46.1 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318_034519.log.json) | -| X-101-64x4d-FPN | pytorch | 20e | 14.5 | 4.4 | 47.0 | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318_081711.log.json) | - -- In the HTC paper and COCO 2018 Challenge, `score_thr` is set to 0.001 for both baselines and HTC. -- We use 8 GPUs with 2 images/GPU for R-50 and R-101 models, and 16 GPUs with 1 image/GPU for X-101 models. - If you would like to train X-101 HTC with 8 GPUs, you need to change the lr from 0.02 to 0.01. - -We also provide a powerful HTC with DCN and multi-scale training model. No testing augmentation is used. - -| Backbone | Style | DCN | training scales | Lr schd | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :---: | :-------------: | :-----: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| X-101-64x4d-FPN | pytorch | c3-c5 | 400~1400 | 20e | 50.4 | 43.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312_203410.log.json) | - -## Citation - -We provide config files to reproduce the results in the CVPR 2019 paper for [Hybrid Task Cascade](https://arxiv.org/abs/1901.07518). - -```latex -@inproceedings{chen2019hybrid, - title={Hybrid task cascade for instance segmentation}, - author={Chen, Kai and Pang, Jiangmiao and Wang, Jiaqi and Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and Liu, Ziwei and Shi, Jianping and Ouyang, Wanli and Chen Change Loy and Dahua Lin}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/htc/htc_r101_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/htc/htc_r101_fpn_20e_coco.py deleted file mode 100644 index b42297bf14723f4068ebddaffdeb84a29d2fee44..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/htc_r101_fpn_20e_coco.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './htc_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/htc/htc_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/htc/htc_r50_fpn_1x_coco.py deleted file mode 100644 index 1e8e18a0d6ea2f7572add5af1b2bfd1480fd70af..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/htc_r50_fpn_1x_coco.py +++ /dev/null @@ -1,56 +0,0 @@ -_base_ = './htc_without_semantic_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - semantic_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[8]), - semantic_head=dict( - type='FusedSemanticHead', - num_ins=5, - fusion_level=1, - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=183, - loss_seg=dict( - type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2)))) -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 8), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict( - seg_prefix=data_root + 'stuffthingmaps/train2017/', - pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/htc/htc_r50_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/htc/htc_r50_fpn_20e_coco.py deleted file mode 100644 index 7d2e0116e7d3533d3d6e9567f310a0d1d86cdb42..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/htc_r50_fpn_20e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './htc_r50_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py deleted file mode 100644 index 565104f4aa984eb0685548e3bbdf2497cf72b5e9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/htc_without_semantic_r50_fpn_1x_coco.py +++ /dev/null @@ -1,236 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - type='HybridTaskCascade', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='HybridTaskCascadeRoIHead', - interleaved=True, - mask_info_flow=True, - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=[ - dict( - type='HTCMaskHead', - with_conv_res=False, - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), - dict( - type='HTCMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), - dict( - type='HTCMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)) - ]), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.001, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py b/cv/detection/co-detr/pytorch/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py deleted file mode 100644 index 0c834f28357a506cdf520b2c23cfe396b5c68709..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = './htc_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) -data = dict(samples_per_gpu=1, workers_per_gpu=1) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py b/cv/detection/co-detr/pytorch/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py deleted file mode 100644 index 8b0d962b2e920121f6c31df406e8fb6159cbe9f0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = './htc_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) -data = dict(samples_per_gpu=1, workers_per_gpu=1) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py b/cv/detection/co-detr/pytorch/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py deleted file mode 100644 index c8d870334c31fdbbe16a87b15b34d11b5b90fb81..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py +++ /dev/null @@ -1,43 +0,0 @@ -_base_ = './htc_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), - dict( - type='Resize', - img_scale=[(1600, 400), (1600, 1400)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 8), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -data = dict( - samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/htc/metafile.yml b/cv/detection/co-detr/pytorch/configs/htc/metafile.yml deleted file mode 100644 index acd038c707b85ac107beb80b6addfc202e7650d9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/htc/metafile.yml +++ /dev/null @@ -1,165 +0,0 @@ -Collections: - - Name: HTC - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - HTC - - RPN - - ResNet - - ResNeXt - - RoIAlign - Paper: - URL: https://arxiv.org/abs/1901.07518 - Title: 'Hybrid Task Cascade for Instance Segmentation' - README: configs/htc/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/htc.py#L6 - Version: v2.0.0 - -Models: - - Name: htc_r50_fpn_1x_coco - In Collection: HTC - Config: configs/htc/htc_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 8.2 - inference time (ms/im): - - value: 172.41 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth - - - Name: htc_r50_fpn_20e_coco - In Collection: HTC - Config: configs/htc/htc_r50_fpn_20e_coco.py - Metadata: - Training Memory (GB): 8.2 - inference time (ms/im): - - value: 172.41 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth - - - Name: htc_r101_fpn_20e_coco - In Collection: HTC - Config: configs/htc/htc_r101_fpn_20e_coco.py - Metadata: - Training Memory (GB): 10.2 - inference time (ms/im): - - value: 181.82 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth - - - Name: htc_x101_32x4d_fpn_16x1_20e_coco - In Collection: HTC - Config: configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py - Metadata: - Training Resources: 16x V100 GPUs - Batch Size: 16 - Training Memory (GB): 11.4 - inference time (ms/im): - - value: 200 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth - - - Name: htc_x101_64x4d_fpn_16x1_20e_coco - In Collection: HTC - Config: configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py - Metadata: - Training Resources: 16x V100 GPUs - Batch Size: 16 - Training Memory (GB): 14.5 - inference time (ms/im): - - value: 227.27 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth - - - Name: htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco - In Collection: HTC - Config: configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py - Metadata: - Training Resources: 16x V100 GPUs - Batch Size: 16 - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 50.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 43.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth diff --git a/cv/detection/co-detr/pytorch/configs/instaboost/README.md b/cv/detection/co-detr/pytorch/configs/instaboost/README.md deleted file mode 100644 index 82ed33422aceb2ad13c6690858356d806ea6be32..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/instaboost/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Instaboost - -> [Instaboost: Boosting instance segmentation via probability map guided copy-pasting](https://arxiv.org/abs/1908.07801) - - - -## Abstract - -Instance segmentation requires a large number of training samples to achieve satisfactory performance and benefits from proper data augmentation. To enlarge the training set and increase the diversity, previous methods have investigated using data annotation from other domain (e.g. bbox, point) in a weakly supervised mechanism. In this paper, we present a simple, efficient and effective method to augment the training set using the existing instance mask annotations. Exploiting the pixel redundancy of the background, we are able to improve the performance of Mask R-CNN for 1.7 mAP on COCO dataset and 3.3 mAP on Pascal VOC dataset by simply introducing random jittering to objects. Furthermore, we propose a location probability map based approach to explore the feasible locations that objects can be placed based on local appearance similarity. With the guidance of such map, we boost the performance of R101-Mask R-CNN on instance segmentation from 35.7 mAP to 37.9 mAP without modifying the backbone or network structure. Our method is simple to implement and does not increase the computational complexity. It can be integrated into the training pipeline of any instance segmentation model without affecting the training and inference efficiency. - -
- -
- -## Introduction - -Configs in this directory is the implementation for ICCV2019 paper "InstaBoost: Boosting Instance Segmentation Via Probability Map Guided Copy-Pasting" and provided by the authors of the paper. InstaBoost is a data augmentation method for object detection and instance segmentation. The paper has been released on [`arXiv`](https://arxiv.org/abs/1908.07801). - -## Usage - -### Requirements - -You need to install `instaboostfast` before using it. - -```shell -pip install instaboostfast -``` - -The code and more details can be found [here](https://github.com/GothicAi/Instaboost). - -### Integration with MMDetection - -InstaBoost have been already integrated in the data pipeline, thus all you need is to add or change **InstaBoost** configurations after **LoadImageFromFile**. We have provided examples like [this](mask_rcnn_r50_fpn_instaboost_4x#L121). You can refer to [`InstaBoostConfig`](https://github.com/GothicAi/InstaBoost-pypi#instaboostconfig) for more details. - -## Results and Models - -- All models were trained on `coco_2017_train` and tested on `coco_2017_val` for convenience of evaluation and comparison. In the paper, the results are obtained from `test-dev`. -- To balance accuracy and training time when using InstaBoost, models released in this page are all trained for 48 Epochs. Other training and testing configs strictly follow the original framework. -- For results and models in MMDetection V1.x, please refer to [Instaboost](https://github.com/GothicAi/Instaboost). - -| Network | Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-----------: | :-------------: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Mask R-CNN | R-50-FPN | 4x | 4.4 | 17.5 | 40.6 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307_223635.log.json) | -| Mask R-CNN | R-101-FPN | 4x | 6.4 | | 42.5 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738-f23f3a5f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738.log.json) | -| Mask R-CNN | X-101-64x4d-FPN | 4x | 10.7 | | 44.7 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947-8ed58c1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947.log.json) | -| Cascade R-CNN | R-101-FPN | 4x | 6.0 | 12.0 | 43.7 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-c19d98d9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307_223646.log.json) | - -## Citation - -```latex -@inproceedings{fang2019instaboost, - title={Instaboost: Boosting instance segmentation via probability map guided copy-pasting}, - author={Fang, Hao-Shu and Sun, Jianhua and Wang, Runzhong and Gou, Minghao and Li, Yong-Lu and Lu, Cewu}, - booktitle={Proceedings of the IEEE International Conference on Computer Vision}, - pages={682--691}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py b/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py deleted file mode 100644 index 9d0515d73d4276883f495d8b30b793afd9fa2dc5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py b/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py deleted file mode 100644 index a89a81f5c76586d6d1b15abf74f3740e9f439762..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='InstaBoost', - action_candidate=('normal', 'horizontal', 'skip'), - action_prob=(1, 0, 0), - scale=(0.8, 1.2), - dx=15, - dy=15, - theta=(-1, 1), - color_prob=0.5, - hflag=False, - aug_ratio=0.5), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[32, 44]) -runner = dict(type='EpochBasedRunner', max_epochs=48) diff --git a/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py b/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py deleted file mode 100644 index d67b7992ab1fa5d8190ff1a0d0c52a0e832c205d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py b/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py deleted file mode 100644 index ebbb43e918753e464a8e1e7f9ff1fed702c1b64d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py b/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py deleted file mode 100644 index 55ca62b7bc6c9cdc97018bcfbe5b109038470dd3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='InstaBoost', - action_candidate=('normal', 'horizontal', 'skip'), - action_prob=(1, 0, 0), - scale=(0.8, 1.2), - dx=15, - dy=15, - theta=(-1, 1), - color_prob=0.5, - hflag=False, - aug_ratio=0.5), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# learning policy -lr_config = dict(step=[32, 44]) -runner = dict(type='EpochBasedRunner', max_epochs=48) diff --git a/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py b/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py deleted file mode 100644 index 2010f44819f625f7da5196270f3721274a390881..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/instaboost/metafile.yml b/cv/detection/co-detr/pytorch/configs/instaboost/metafile.yml deleted file mode 100644 index 325283d369628ab51552dabe2ab05a8a9a5f2d13..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/instaboost/metafile.yml +++ /dev/null @@ -1,99 +0,0 @@ -Collections: - - Name: InstaBoost - Metadata: - Training Data: COCO - Training Techniques: - - InstaBoost - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Paper: - URL: https://arxiv.org/abs/1908.07801 - Title: 'Instaboost: Boosting instance segmentation via probability map guided copy-pasting' - README: configs/instaboost/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/datasets/pipelines/instaboost.py#L7 - Version: v2.0.0 - -Models: - - Name: mask_rcnn_r50_fpn_instaboost_4x_coco - In Collection: InstaBoost - Config: configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py - Metadata: - Training Memory (GB): 4.4 - inference time (ms/im): - - value: 57.14 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 48 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco/mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-d025f83a.pth - - - Name: mask_rcnn_r101_fpn_instaboost_4x_coco - In Collection: InstaBoost - Config: configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py - Metadata: - Training Memory (GB): 6.4 - Epochs: 48 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco/mask_rcnn_r101_fpn_instaboost_4x_coco_20200703_235738-f23f3a5f.pth - - - Name: mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco - In Collection: InstaBoost - Config: configs/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py - Metadata: - Training Memory (GB): 10.7 - Epochs: 48 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco/mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco_20200515_080947-8ed58c1b.pth - - - Name: cascade_mask_rcnn_r50_fpn_instaboost_4x_coco - In Collection: InstaBoost - Config: configs/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py - Metadata: - Training Memory (GB): 6.0 - inference time (ms/im): - - value: 83.33 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 48 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/instaboost/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco/cascade_mask_rcnn_r50_fpn_instaboost_4x_coco_20200307-c19d98d9.pth diff --git a/cv/detection/co-detr/pytorch/configs/lad/README.md b/cv/detection/co-detr/pytorch/configs/lad/README.md deleted file mode 100644 index f2b7c20650265bfb20dbc5730dbc588db221c5ae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lad/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# LAD - -> [Improving Object Detection by Label Assignment Distillation](https://arxiv.org/abs/2108.10520) - - - -## Abstract - -Label assignment in object detection aims to assign targets, foreground or background, to sampled regions in an image. Unlike labeling for image classification, this problem is not well defined due to the object's bounding box. In this paper, we investigate the problem from a perspective of distillation, hence we call Label Assignment Distillation (LAD). Our initial motivation is very simple, we use a teacher network to generate labels for the student. This can be achieved in two ways: either using the teacher's prediction as the direct targets (soft label), or through the hard labels dynamically assigned by the teacher (LAD). Our experiments reveal that: (i) LAD is more effective than soft-label, but they are complementary. (ii) Using LAD, a smaller teacher can also improve a larger student significantly, while soft-label can't. We then introduce Co-learning LAD, in which two networks simultaneously learn from scratch and the role of teacher and student are dynamically interchanged. Using PAA-ResNet50 as a teacher, our LAD techniques can improve detectors PAA-ResNet101 and PAA-ResNeXt101 to 46AP and 47.5AP on the COCO test-dev set. With a stronger teacher PAA-SwinB, we improve the students PAA-ResNet50 to 43.7AP by only 1x schedule training and standard setting, and PAA-ResNet101 to 47.9AP, significantly surpassing the current methods. - -
- -
- -## Results and Models - -We provide config files to reproduce the object detection results in the -WACV 2022 paper for Improving Object Detection by Label Assignment -Distillation. - -### PAA with LAD - -| Teacher | Student | Training schedule | AP (val) | Config | Download | -| :-----: | :-----: | :---------------: | :------: | :---------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| -- | R-50 | 1x | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.log.json) | -| -- | R-101 | 1x | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.log.json) | -| R-101 | R-50 | 1x | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r50_paa_r101_fpn_coco_1x/lad_r50_paa_r101_fpn_coco_1x_20220708_124246-74c76ff0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r50_paa_r101_fpn_coco_1x/lad_r50_paa_r101_fpn_coco_1x_20220708_124246.log.json) | -| R-50 | R-101 | 1x | 43.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r101_paa_r50_fpn_coco_1x/lad_r101_paa_r50_fpn_coco_1x_20220708_124357-9407ac54.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r101_paa_r50_fpn_coco_1x/lad_r101_paa_r50_fpn_coco_1x_20220708_124357.log.json) | - -## Note - -- Meaning of Config name: lad_r50(student model)\_paa(based on paa)\_r101(teacher model)\_fpn(neck)\_coco(dataset)\_1x(12 epoch).py -- Results may fluctuate by about 0.2 mAP. - -## Citation - -```latex -@inproceedings{nguyen2021improving, - title={Improving Object Detection by Label Assignment Distillation}, - author={Chuong H. Nguyen and Thuy C. Nguyen and Tuan N. Tang and Nam L. H. Phan}, - booktitle = {WACV}, - year={2022} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py b/cv/detection/co-detr/pytorch/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py deleted file mode 100644 index 4877d95ba6ccf906081b7f5f62966eb090b305ad..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py +++ /dev/null @@ -1,126 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa -model = dict( - type='LAD', - # student - backbone=dict( - type='ResNet', - depth=101, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='LADHead', - reg_decoded_bbox=True, - score_voting=True, - topk=9, - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.3), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), - # teacher - teacher_ckpt=teacher_ckpt, - teacher_backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - teacher_neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - teacher_bbox_head=dict( - type='LADHead', - reg_decoded_bbox=True, - score_voting=True, - topk=9, - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.3), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.1, - neg_iou_thr=0.1, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - score_voting=True, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -data = dict(samples_per_gpu=8, workers_per_gpu=4) -optimizer = dict(lr=0.01) -fp16 = dict(loss_scale=512.) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py b/cv/detection/co-detr/pytorch/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py deleted file mode 100644 index 29bbe693d3c12094a76d78d588e04fa3fdf9552d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lad/lad_r50_paa_r101_fpn_coco_1x.py +++ /dev/null @@ -1,125 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa -model = dict( - type='LAD', - # student - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='LADHead', - reg_decoded_bbox=True, - score_voting=True, - topk=9, - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.3), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), - # teacher - teacher_ckpt=teacher_ckpt, - teacher_backbone=dict( - type='ResNet', - depth=101, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - teacher_neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - teacher_bbox_head=dict( - type='LADHead', - reg_decoded_bbox=True, - score_voting=True, - topk=9, - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.3), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.1, - neg_iou_thr=0.1, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - score_voting=True, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -data = dict(samples_per_gpu=8, workers_per_gpu=4) -optimizer = dict(lr=0.01) -fp16 = dict(loss_scale=512.) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/lad/metafile.yml b/cv/detection/co-detr/pytorch/configs/lad/metafile.yml deleted file mode 100644 index 11a9fa9e7fa2d2215f42a91d56603a9ae2bd0569..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lad/metafile.yml +++ /dev/null @@ -1,45 +0,0 @@ -Collections: - - Name: Label Assignment Distillation - Metadata: - Training Data: COCO - Training Techniques: - - Label Assignment Distillation - - SGD with Momentum - - Weight Decay - Training Resources: 2x V100 GPUs - Architecture: - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/2108.10520 - Title: 'Improving Object Detection by Label Assignment Distillation' - README: configs/lad/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.19.0/mmdet/models/detectors/lad.py#L10 - Version: v2.19.0 - -Models: - - Name: lad_r101_paa_r50_fpn_coco_1x - In Collection: Label Assignment Distillation - Config: configs/lad/lad_r101_paa_r50_fpn_coco_1x.py - Metadata: - Training Memory (GB): 12.4 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r101_paa_r50_fpn_coco_1x/lad_r101_paa_r50_fpn_coco_1x_20220708_124357-9407ac54.pth - - Name: lad_r50_paa_r101_fpn_coco_1x - In Collection: Label Assignment Distillation - Config: configs/lad/lad_r50_paa_r101_fpn_coco_1x.py - Metadata: - Training Memory (GB): 8.9 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/lad/lad_r50_paa_r101_fpn_coco_1x/lad_r50_paa_r101_fpn_coco_1x_20220708_124246-74c76ff0.pth diff --git a/cv/detection/co-detr/pytorch/configs/ld/README.md b/cv/detection/co-detr/pytorch/configs/ld/README.md deleted file mode 100644 index 01097294af979a77044972d79b85d8947bd8f6f7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ld/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# LD - -> [Localization Distillation for Dense Object Detection](https://arxiv.org/abs/2102.12252) - - - -## Abstract - -Knowledge distillation (KD) has witnessed its powerful capability in learning compact models in object detection. Previous KD methods for object detection mostly focus on imitating deep features within the imitation regions instead of mimicking classification logits due to its inefficiency in distilling localization information. In this paper, by reformulating the knowledge distillation process on localization, we present a novel localization distillation (LD) method which can efficiently transfer the localization knowledge from the teacher to the student. Moreover, we also heuristically introduce the concept of valuable localization region that can aid to selectively distill the semantic and localization knowledge for a certain region. Combining these two new components, for the first time, we show that logit mimicking can outperform feature imitation and localization knowledge distillation is more important and efficient than semantic knowledge for distilling object detectors. Our distillation scheme is simple as well as effective and can be easily applied to different dense object detectors. Experiments show that our LD can boost the AP score of GFocal-ResNet-50 with a single-scale 1× training schedule from 40.1 to 42.1 on the COCO benchmark without any sacrifice on the inference speed. - -
- -
- -## Results and Models - -### GFocalV1 with LD - -| Teacher | Student | Training schedule | Mini-batch size | AP (val) | Config | Download | -| :-------: | :-----: | :---------------: | :-------------: | :------: | :-------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| -- | R-18 | 1x | 6 | 35.8 | | | -| R-101 | R-18 | 1x | 6 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206-330e6332.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206.log.json) | -| -- | R-34 | 1x | 6 | 38.9 | | | -| R-101 | R-34 | 1x | 6 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007-9bc69413.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007.log.json) | -| -- | R-50 | 1x | 6 | 40.1 | | | -| R-101 | R-50 | 1x | 6 | 41.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355-8dc5bad8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355.log.json) | -| -- | R-101 | 2x | 6 | 44.6 | | | -| R-101-DCN | R-101 | 2x | 6 | 45.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920-9e658426.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920.log.json) | - -## Note - -- Meaning of Config name: ld_r18(student model)\_gflv1(based on gflv1)\_r101(teacher model)\_fpn(neck)\_coco(dataset)\_1x(12 epoch).py - -## Citation - -```latex -@Inproceedings{zheng2022LD, - title={Localization Distillation for Dense Object Detection}, - author= {Zheng, Zhaohui and Ye, Rongguang and Wang, Ping and Ren, Dongwei and Zuo, Wangmeng and Hou, Qibin and Cheng, Mingming}, - booktitle={CVPR}, - year={2022} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py b/cv/detection/co-detr/pytorch/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py deleted file mode 100644 index 1cbdb4cf5a5d5afa60327d80b31475500d5f3c6c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py +++ /dev/null @@ -1,44 +0,0 @@ -_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] -teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa -model = dict( - teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py', - teacher_ckpt=teacher_ckpt, - backbone=dict( - type='ResNet', - depth=101, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5)) - -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) -# multi-scale training -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 480), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py b/cv/detection/co-detr/pytorch/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py deleted file mode 100644 index 18dce814be9036e6af70389fc60a5b4e42bc8efe..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa -model = dict( - type='KnowledgeDistillationSingleStageDetector', - teacher_config='configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py', - teacher_ckpt=teacher_ckpt, - backbone=dict( - type='ResNet', - depth=18, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), - neck=dict( - type='FPN', - in_channels=[64, 128, 256, 512], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='LDHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - loss_cls=dict( - type='QualityFocalLoss', - use_sigmoid=True, - beta=2.0, - loss_weight=1.0), - loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), - loss_ld=dict( - type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), - reg_max=16, - loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py b/cv/detection/co-detr/pytorch/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py deleted file mode 100644 index 3b6996d49b06ffcd0803e86cb33f8a35b02911dc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] -model = dict( - backbone=dict( - type='ResNet', - depth=34, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet34')), - neck=dict( - type='FPN', - in_channels=[64, 128, 256, 512], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py b/cv/detection/co-detr/pytorch/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py deleted file mode 100644 index 2b18785ae41f6fd11a933ca046a34b967306f9b6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py'] -model = dict( - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/ld/metafile.yml b/cv/detection/co-detr/pytorch/configs/ld/metafile.yml deleted file mode 100644 index 2055e32500d22e37cf1556487ce8e65dad71c07d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ld/metafile.yml +++ /dev/null @@ -1,69 +0,0 @@ -Collections: - - Name: Localization Distillation - Metadata: - Training Data: COCO - Training Techniques: - - Localization Distillation - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/2102.12252 - Title: 'Localization Distillation for Dense Object Detection' - README: configs/ld/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.11.0/mmdet/models/dense_heads/ld_head.py#L11 - Version: v2.11.0 - -Models: - - Name: ld_r18_gflv1_r101_fpn_coco_1x - In Collection: Localization Distillation - Config: configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py - Metadata: - Training Memory (GB): 1.8 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r18_gflv1_r101_fpn_coco_1x/ld_r18_gflv1_r101_fpn_coco_1x_20220702_062206-330e6332.pth - - Name: ld_r34_gflv1_r101_fpn_coco_1x - In Collection: Localization Distillation - Config: configs/ld/ld_r34_gflv1_r101_fpn_coco_1x.py - Metadata: - Training Memory (GB): 2.2 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r34_gflv1_r101_fpn_coco_1x/ld_r34_gflv1_r101_fpn_coco_1x_20220630_134007-9bc69413.pth - - Name: ld_r50_gflv1_r101_fpn_coco_1x - In Collection: Localization Distillation - Config: configs/ld/ld_r50_gflv1_r101_fpn_coco_1x.py - Metadata: - Training Memory (GB): 3.6 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r50_gflv1_r101_fpn_coco_1x/ld_r50_gflv1_r101_fpn_coco_1x_20220629_145355-8dc5bad8.pth - - Name: ld_r101_gflv1_r101dcn_fpn_coco_2x - In Collection: Localization Distillation - Config: configs/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x.py - Metadata: - Training Memory (GB): 5.5 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ld/ld_r101_gflv1_r101dcn_fpn_coco_2x/ld_r101_gflv1_r101dcn_fpn_coco_2x_20220629_185920-9e658426.pth diff --git a/cv/detection/co-detr/pytorch/configs/legacy_1.x/README.md b/cv/detection/co-detr/pytorch/configs/legacy_1.x/README.md deleted file mode 100644 index c48477f035bae7f73c46b8a7a70fe28ff57fbc65..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/legacy_1.x/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Legacy Configs in MMDetection V1.x - - - -Configs in this directory implement the legacy configs used by MMDetection V1.x and its model zoos. - -To help users convert their models from V1.x to MMDetection V2.0, we provide v1.x configs to inference the converted v1.x models. -Due to the BC-breaking changes in MMDetection V2.0 from MMDetection V1.x, running inference with the same model weights in these two version will produce different results. The difference will cause within 1% AP absolute difference as can be found in the following table. - -## Usage - -To upgrade the model version, the users need to do the following steps. - -### 1. Convert model weights - -There are three main difference in the model weights between V1.x and V2.0 codebases. - -1. Since the class order in all the detector's classification branch is reordered, all the legacy model weights need to go through the conversion process. -2. The regression and segmentation head no longer contain the background channel. Weights in these background channels should be removed to fix in the current codebase. -3. For two-stage detectors, their wegihts need to be upgraded since MMDetection V2.0 refactors all the two-stage detectors with `RoIHead`. - -The users can do the same modification as mentioned above for the self-implemented -detectors. We provide a scripts `tools/model_converters/upgrade_model_version.py` to convert the model weights in the V1.x model zoo. - -```bash -python tools/model_converters/upgrade_model_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} --num-classes ${NUM_CLASSES} - -``` - -- OLD_MODEL_PATH: the path to load the model weights in 1.x version. -- NEW_MODEL_PATH: the path to save the converted model weights in 2.0 version. -- NUM_CLASSES: number of classes of the original model weights. Usually it is 81 for COCO dataset, 21 for VOC dataset. - The number of classes in V2.0 models should be equal to that in V1.x models - 1. - -### 2. Use configs with legacy settings - -After converting the model weights, checkout to the v1.2 release to find the corresponding config file that uses the legacy settings. -The V1.x models usually need these three legacy modules: `LegacyAnchorGenerator`, `LegacyDeltaXYWHBBoxCoder`, and `RoIAlign(align=False)`. -For models using ResNet Caffe backbones, they also need to change the pretrain name and the corresponding `img_norm_cfg`. -An example is in [`retinanet_r50_caffe_fpn_1x_coco_v1.py`](retinanet_r50_caffe_fpn_1x_coco_v1.py) -Then use the config to test the model weights. For most models, the obtained results should be close to that in V1.x. -We provide configs of some common structures in this directory. - -## Performance - -The performance change after converting the models in this directory are listed as the following. - -| Method | Style | Lr schd | V1.x box AP | V1.x mask AP | V2.0 box AP | V2.0 mask AP | Config | Download | -| :-------------------------: | :-----: | :-----: | :---------: | :----------: | :---------: | :----------: | :------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------: | -| Mask R-CNN R-50-FPN | pytorch | 1x | 37.3 | 34.2 | 36.8 | 33.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/mask_rcnn_r50_fpn_1x_20181010-069fa190.pth) | -| RetinaNet R-50-FPN | caffe | 1x | 35.8 | - | 35.4 | - | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/retinanet_r50_caffe_1x_coco_v1.py) | | -| RetinaNet R-50-FPN | pytorch | 1x | 35.6 | - | 35.2 | - | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/retinanet_r50_fpn_1x_20181125-7b0c2548.pth) | -| Cascade Mask R-CNN R-50-FPN | pytorch | 1x | 41.2 | 35.7 | 40.8 | 35.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_mask_rcnn_r50_fpn_1x_20181123-88b170c9.pth) | -| SSD300-VGG16 | caffe | 120e | 25.7 | - | 25.4 | - | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/legacy_1.x/ssd300_coco_v1.py) | [model](https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/ssd300_coco_vgg16_caffe_120e_20181221-84d7110b.pth) | diff --git a/cv/detection/co-detr/pytorch/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py b/cv/detection/co-detr/pytorch/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py deleted file mode 100644 index fc9d0048188406348416fe5012af9985f62bbb56..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/legacy_1.x/cascade_mask_rcnn_r50_fpn_1x_coco_v1.py +++ /dev/null @@ -1,79 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='CascadeRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5), - bbox_coder=dict( - type='LegacyDeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0])), - roi_head=dict( - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', - output_size=7, - sampling_ratio=2, - aligned=False)), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - reg_class_agnostic=True, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='LegacyDeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2])), - dict( - type='Shared2FCBBoxHead', - reg_class_agnostic=True, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='LegacyDeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1])), - dict( - type='Shared2FCBBoxHead', - reg_class_agnostic=True, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='LegacyDeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067])), - ], - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', - output_size=14, - sampling_ratio=2, - aligned=False)))) -dist_params = dict(backend='nccl', port=29515) diff --git a/cv/detection/co-detr/pytorch/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py b/cv/detection/co-detr/pytorch/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py deleted file mode 100644 index 8c573bef34628babaee43183b260cd06e22b7c46..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py +++ /dev/null @@ -1,38 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='FasterRCNN', - backbone=dict( - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - rpn_head=dict( - type='RPNHead', - anchor_generator=dict( - type='LegacyAnchorGenerator', - center_offset=0.5, - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', - output_size=7, - sampling_ratio=2, - aligned=False), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn_proposal=dict(max_per_img=2000), - rcnn=dict(assigner=dict(match_low_quality=True)))) diff --git a/cv/detection/co-detr/pytorch/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py b/cv/detection/co-detr/pytorch/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py deleted file mode 100644 index 04581bbc901d0fda0ec8c6b4a8078ae04f21473a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/legacy_1.x/mask_rcnn_r50_fpn_1x_coco_v1.py +++ /dev/null @@ -1,34 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - rpn_head=dict( - anchor_generator=dict(type='LegacyAnchorGenerator', center_offset=0.5), - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', - output_size=7, - sampling_ratio=2, - aligned=False)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', - output_size=14, - sampling_ratio=2, - aligned=False)), - bbox_head=dict( - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - - # model training and testing settings - train_cfg=dict( - rpn_proposal=dict(max_per_img=2000), - rcnn=dict(assigner=dict(match_low_quality=True)))) diff --git a/cv/detection/co-detr/pytorch/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py b/cv/detection/co-detr/pytorch/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py deleted file mode 100644 index a63d248c435c8b7035f00299a6f97f1fc18e3be5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py +++ /dev/null @@ -1,41 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco_v1.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py b/cv/detection/co-detr/pytorch/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py deleted file mode 100644 index 6198b9717957374ce734ca74de5f54dda44123b9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - bbox_head=dict( - type='RetinaHead', - anchor_generator=dict( - type='LegacyAnchorGenerator', - center_offset=0.5, - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) diff --git a/cv/detection/co-detr/pytorch/configs/legacy_1.x/ssd300_coco_v1.py b/cv/detection/co-detr/pytorch/configs/legacy_1.x/ssd300_coco_v1.py deleted file mode 100644 index 65ccc1e542cbfadec76f4f0bf300075ab8d3f31d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/legacy_1.x/ssd300_coco_v1.py +++ /dev/null @@ -1,84 +0,0 @@ -_base_ = [ - '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] -# model settings -input_size = 300 -model = dict( - bbox_head=dict( - type='SSDHead', - anchor_generator=dict( - type='LegacySSDAnchorGenerator', - scale_major=False, - input_size=input_size, - basesize_ratio_range=(0.15, 0.9), - strides=[8, 16, 32, 64, 100, 300], - ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), - bbox_coder=dict( - type='LegacyDeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]))) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(300, 300), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(300, 300), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=3, - train=dict( - _delete_=True, - type='RepeatDataset', - times=5, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict(_delete_=True) -dist_params = dict(backend='nccl', port=29555) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/libra_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/libra_rcnn/README.md deleted file mode 100644 index 87a128aa2039c0db240c2fa24f416f1f0099860e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/libra_rcnn/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Libra R-CNN - -> [Libra R-CNN: Towards Balanced Learning for Object Detection](https://arxiv.org/abs/1904.02701) - - - -## Abstract - -Compared with model architectures, the training process, which is also crucial to the success of detectors, has received relatively less attention in object detection. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple but effective framework towards balanced learning for object detection. It integrates three novel components: IoU-balanced sampling, balanced feature pyramid, and balanced L1 loss, respectively for reducing the imbalance at sample, feature, and objective level. Benefitted from the overall balanced design, Libra R-CNN significantly improves the detection performance. Without bells and whistles, it achieves 2.5 points and 2.0 points higher Average Precision (AP) than FPN Faster R-CNN and RetinaNet respectively on MSCOCO. - -Instance recognition is rapidly advanced along with the developments of various deep convolutional neural networks. Compared to the architectures of networks, the training process, which is also crucial to the success of detectors, has received relatively less attention. In this work, we carefully revisit the standard training practice of detectors, and find that the detection performance is often limited by the imbalance during the training process, which generally consists in three levels - sample level, feature level, and objective level. To mitigate the adverse effects caused thereby, we propose Libra R-CNN, a simple yet effective framework towards balanced learning for instance recognition. It integrates IoU-balanced sampling, balanced feature pyramid, and objective re-weighting, respectively for reducing the imbalance at sample, feature, and objective level. Extensive experiments conducted on MS COCO, LVIS and Pascal VOC datasets prove the effectiveness of the overall balanced design. - -
- -
- -## Results and Models - -The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) - -| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :----------: | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Faster R-CNN | R-50-FPN | pytorch | 1x | 4.6 | 19.0 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | -| Fast R-CNN | R-50-FPN | pytorch | 1x | | | | | | -| Faster R-CNN | R-101-FPN | pytorch | 1x | 6.5 | 14.4 | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203-8dba6a5a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203_001405.log.json) | -| Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 10.8 | 8.5 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315_231625.log.json) | -| RetinaNet | R-50-FPN | pytorch | 1x | 4.2 | 17.7 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205-804d94ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205_112757.log.json) | - -## Citation - -We provide config files to reproduce the results in the CVPR 2019 paper [Libra R-CNN](https://arxiv.org/pdf/1904.02701.pdf). - -The extended version of [Libra R-CNN](https://arxiv.org/pdf/2108.10175.pdf) is accpeted by IJCV. - -```latex -@inproceedings{pang2019libra, - title={Libra R-CNN: Towards Balanced Learning for Object Detection}, - author={Pang, Jiangmiao and Chen, Kai and Shi, Jianping and Feng, Huajun and Ouyang, Wanli and Dahua Lin}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2019} -} - -@article{pang2021towards, - title={Towards Balanced Learning for Instance Recognition}, - author={Pang, Jiangmiao and Chen, Kai and Li, Qi and Xu, Zhihai and Feng, Huajun and Shi, Jianping and Ouyang, Wanli and Lin, Dahua}, - journal={International Journal of Computer Vision}, - volume={129}, - number={5}, - pages={1376--1393}, - year={2021}, - publisher={Springer} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index efbedc863c7eeeaef331121416141334906fef3d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,50 +0,0 @@ -_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' -# model settings -model = dict( - neck=[ - dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - dict( - type='BFP', - in_channels=256, - num_levels=5, - refine_level=2, - refine_type='non_local') - ], - roi_head=dict( - bbox_head=dict( - loss_bbox=dict( - _delete_=True, - type='BalancedL1Loss', - alpha=0.5, - gamma=1.5, - beta=1.0, - loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rcnn=dict( - sampler=dict( - _delete_=True, - type='CombinedSampler', - num=512, - pos_fraction=0.25, - add_gt_as_proposals=True, - pos_sampler=dict(type='InstanceBalancedPosSampler'), - neg_sampler=dict( - type='IoUBalancedNegSampler', - floor_thr=-1, - floor_fraction=0, - num_bins=3))))) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -data = dict( - train=dict(proposal_file=data_root + - 'libra_proposals/rpn_r50_fpn_1x_train2017.pkl'), - val=dict(proposal_file=data_root + - 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'), - test=dict(proposal_file=data_root + - 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl')) diff --git a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index e899706b8ca7780a95b41de14b85b05b427f9595..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 89a0d7b2bd83216dfc4db120fe9f610b23376681..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,41 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -# model settings -model = dict( - neck=[ - dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - dict( - type='BFP', - in_channels=256, - num_levels=5, - refine_level=2, - refine_type='non_local') - ], - roi_head=dict( - bbox_head=dict( - loss_bbox=dict( - _delete_=True, - type='BalancedL1Loss', - alpha=0.5, - gamma=1.5, - beta=1.0, - loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict(sampler=dict(neg_pos_ub=5), allowed_border=-1), - rcnn=dict( - sampler=dict( - _delete_=True, - type='CombinedSampler', - num=512, - pos_fraction=0.25, - add_gt_as_proposals=True, - pos_sampler=dict(type='InstanceBalancedPosSampler'), - neg_sampler=dict( - type='IoUBalancedNegSampler', - floor_thr=-1, - floor_fraction=0, - num_bins=3))))) diff --git a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 06740a778f821d74b5206a9cada969bfee0a84cf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py deleted file mode 100644 index be2742098fb8f1e46bbb16c9d3e2e20c2e3083aa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,26 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' -# model settings -model = dict( - neck=[ - dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_input', - num_outs=5), - dict( - type='BFP', - in_channels=256, - num_levels=5, - refine_level=1, - refine_type='non_local') - ], - bbox_head=dict( - loss_bbox=dict( - _delete_=True, - type='BalancedL1Loss', - alpha=0.5, - gamma=1.5, - beta=0.11, - loss_weight=1.0))) diff --git a/cv/detection/co-detr/pytorch/configs/libra_rcnn/metafile.yml b/cv/detection/co-detr/pytorch/configs/libra_rcnn/metafile.yml deleted file mode 100644 index 8c3279595f5743a6c616a9199b4c8c9614b89e80..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/libra_rcnn/metafile.yml +++ /dev/null @@ -1,99 +0,0 @@ -Collections: - - Name: Libra R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - IoU-Balanced Sampling - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Balanced Feature Pyramid - Paper: - URL: https://arxiv.org/abs/1904.02701 - Title: 'Libra R-CNN: Towards Balanced Learning for Object Detection' - README: configs/libra_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/bfp.py#L10 - Version: v2.0.0 - -Models: - - Name: libra_faster_rcnn_r50_fpn_1x_coco - In Collection: Libra R-CNN - Config: configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.6 - inference time (ms/im): - - value: 52.63 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth - - - Name: libra_faster_rcnn_r101_fpn_1x_coco - In Collection: Libra R-CNN - Config: configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.5 - inference time (ms/im): - - value: 69.44 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203-8dba6a5a.pth - - - Name: libra_faster_rcnn_x101_64x4d_fpn_1x_coco - In Collection: Libra R-CNN - Config: configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 10.8 - inference time (ms/im): - - value: 117.65 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth - - - Name: libra_retinanet_r50_fpn_1x_coco - In Collection: Libra R-CNN - Config: configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.2 - inference time (ms/im): - - value: 56.5 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205-804d94ce.pth diff --git a/cv/detection/co-detr/pytorch/configs/lvis/README.md b/cv/detection/co-detr/pytorch/configs/lvis/README.md deleted file mode 100644 index 0c2760e29aedd8315c4914a77c22b0e267797a56..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# LVIS - -> [LVIS: A Dataset for Large Vocabulary Instance Segmentation](https://arxiv.org/abs/1908.03195) - - - -## Abstract - -Progress on object detection is enabled by datasets that focus the research community's attention on open challenges. This process led us from simple images to complex scenes and from bounding boxes to segmentation masks. In this work, we introduce LVIS (pronounced \`el-vis'): a new dataset for Large Vocabulary Instance Segmentation. We plan to collect ~2 million high-quality instance segmentation masks for over 1000 entry-level object categories in 164k images. Due to the Zipfian distribution of categories in natural images, LVIS naturally has a long tail of categories with few training samples. Given that state-of-the-art deep learning methods for object detection perform poorly in the low-sample regime, we believe that our dataset poses an important and exciting new scientific challenge. - -
- -
- -## Common Setting - -- Please follow [install guide](../../docs/get_started.md#install-mmdetection) to install open-mmlab forked cocoapi first. - -- Run following scripts to install our forked lvis-api. - - ```shell - pip install git+https://github.com/lvis-dataset/lvis-api.git - ``` - -- All experiments use oversample strategy [here](../../docs/tutorials/customize_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`. - -- The size of LVIS v0.5 is half of COCO, so schedule `2x` in LVIS is roughly the same iterations as `1x` in COCO. - -## Results and models of LVIS v0.5 - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 2x | - | - | 26.1 | 25.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis-dbd06831.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_20200531_160435.log.json) | -| R-101-FPN | pytorch | 2x | - | - | 27.1 | 27.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis-54582ee2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_20200601_134748.log.json) | -| X-101-32x4d-FPN | pytorch | 2x | - | - | 26.7 | 26.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis-3cf55ea2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_20200531_221749.log.json) | -| X-101-64x4d-FPN | pytorch | 2x | - | - | 26.4 | 26.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis-1c99a5ad.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_20200601_194651.log.json) | - -## Results and models of LVIS v1 - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 9.1 | - | 22.5 | 21.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-aa78ac3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_061305.log.json) | -| R-101-FPN | pytorch | 1x | 10.8 | - | 24.6 | 23.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-ec55ce32.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_070959.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 11.8 | - | 26.7 | 25.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-ebbc5c81.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_071317.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 14.6 | - | 27.2 | 25.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-43d9edfe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200830_060206.log.json) | - -## Citation - -```latex -@inproceedings{gupta2019lvis, - title={{LVIS}: A Dataset for Large Vocabulary Instance Segmentation}, - author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross}, - booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py deleted file mode 100644 index 0f017f585c78d9d8e1eebaeca0a9a6c518a6295a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py deleted file mode 100644 index 637f4a63a55d24133a994eacc1e7a6521bfa3b9f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py deleted file mode 100644 index 92ddb526d7ea7a011e10aa82cbd1bd62773b35d6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py +++ /dev/null @@ -1,31 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/lvis_v1_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - roi_head=dict( - bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)), - test_cfg=dict( - rcnn=dict( - score_thr=0.0001, - # LVIS allows up to 300 - max_per_img=300))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py deleted file mode 100644 index d53c5dc6a1470e4cca209a26c8261dd66c60e9b1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py +++ /dev/null @@ -1,31 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/lvis_v0.5_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] -model = dict( - roi_head=dict( - bbox_head=dict(num_classes=1230), mask_head=dict(num_classes=1230)), - test_cfg=dict( - rcnn=dict( - score_thr=0.0001, - # LVIS allows up to 300 - max_per_img=300))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) diff --git a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py deleted file mode 100644 index a6115c1ad03317e32915102212cf878101fa671d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py deleted file mode 100644 index 96b625230f37906e32ad872b6e947285432f60d6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py deleted file mode 100644 index 0f95a7321d9a7b7f9cb98adf31d6238156c21de6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py deleted file mode 100644 index 986acda589899e49c7d22df6455200e22bc5a940..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/README.md b/cv/detection/co-detr/pytorch/configs/mask2former/README.md deleted file mode 100644 index ebce50d84b5f857938280ed2817ab76f0f63146f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Mask2Former - -> [Masked-attention Mask Transformer for Universal Image Segmentation](http://arxiv.org/abs/2112.01527) - - - -## Abstract - -Image segmentation is about grouping pixels with different semantics, e.g., category or instance membership, where each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K). - -
- -
- -## Introduction - -Mask2Former requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. -The directory should be like this. - -```none -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── coco -│ │ ├── annotations -| | | ├── instances_train2017.json -| | | ├── instances_val2017.json -│ │ │ ├── panoptic_train2017.json -│ │ │ ├── panoptic_train2017 -│ │ │ ├── panoptic_val2017.json -│ │ │ ├── panoptic_val2017 -│ │ ├── train2017 -│ │ ├── val2017 -│ │ ├── test2017 -``` - -## Results and Models - -### Panoptic segmentation - -| Backbone | style | Pretrain | Lr schd | Mem (GB) | Inf time (fps) | PQ | box mAP | mask mAP | Config | Download | -| :------: | :-----: | :----------: | :-----: | :------: | :------------: | :--: | :-----: | :------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | ImageNet-1K | 50e | 13.9 | - | 51.9 | 44.8 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516.log.json) | -| R-101 | pytorch | ImageNet-1K | 50e | 16.1 | - | 52.4 | 45.3 | 42.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104-c54e64c9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104.log.json) | -| Swin-T | - | ImageNet-1K | 50e | 15.9 | - | 53.4 | 46.3 | 43.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553-fc567107.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553.log.json) | -| Swin-S | - | ImageNet-1K | 50e | 19.1 | - | 54.5 | 47.8 | 44.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200-c7b94355.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200.log.json) | -| Swin-B | - | ImageNet-1K | 50e | 26.0 | - | 55.1 | 48.2 | 44.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244-c149a9e9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244.log.json) | -| Swin-B | - | ImageNet-21K | 50e | 25.8 | - | 56.3 | 50.0 | 46.3 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021-3bb8b482.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021.log.json) | -| Swin-L | - | ImageNet-21K | 100e | 21.1 | - | 57.6 | 52.2 | 48.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949-d4919c44.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949.log.json) | - -### Instance segmentation - -| Backbone | style | Pretrain | Lr schd | Mem (GB) | Inf time (fps) | box mAP | mask mAP | Config | Download | -| -------- | ------- | ----------- | ------- | -------- | -------------- | ------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| R-50 | pytorch | ImageNet-1K | 50e | 13.7 | - | 45.7 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028.log.json) | -| R-101 | pytorch | ImageNet-1K | 50e | 15.5 | - | 46.7 | 44.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250.log.json) | -| Swin-T | - | ImageNet-1K | 50e | 15.3 | - | 47.7 | 44.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649.log.json) | -| Swin-S | - | ImageNet-1K | 50e | 18.8 | - | 49.3 | 46.1 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756.log.json) | - -Note: We have trained the instance segmentation models many times (see more details in [PR 7571](https://github.com/open-mmlab/mmdetection/pull/7571)). The results of the trained models are relatively stable (+- 0.2), and have a certain gap (about 0.2 AP) in comparison with the results in the [paper](http://arxiv.org/abs/2112.01527). However, the performance of the model trained with the official code is unstable and may also be slightly lower than the reported results as mentioned in the [issue](https://github.com/facebookresearch/Mask2Former/issues/46). - -## Citation - -```latex -@article{cheng2021mask2former, - title={Masked-attention Mask Transformer for Universal Image Segmentation}, - author={Bowen Cheng and Ishan Misra and Alexander G. Schwing and Alexander Kirillov and Rohit Girdhar}, - journal={arXiv}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py deleted file mode 100644 index 33fdde6ccc1ff608bf86bad2c7de5429adc24932..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './mask2former_r50_lsj_8x2_50e_coco-panoptic.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py deleted file mode 100644 index 5543fb0ebf9aa1609d6d6f14f0fd7c9a9919a62c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py deleted file mode 100644 index 2c23625e139391f6341bb7a4826b13803c80c6b2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py +++ /dev/null @@ -1,253 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' -] -num_things_classes = 80 -num_stuff_classes = 53 -num_classes = num_things_classes + num_stuff_classes -model = dict( - type='Mask2Former', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - panoptic_head=dict( - type='Mask2FormerHead', - in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside - strides=[4, 8, 16, 32], - feat_channels=256, - out_channels=256, - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - num_queries=100, - num_transformer_feat_level=3, - pixel_decoder=dict( - type='MSDeformAttnPixelDecoder', - num_outs=3, - norm_cfg=dict(type='GN', num_groups=32), - act_cfg=dict(type='ReLU'), - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=dict( - type='MultiScaleDeformableAttention', - embed_dims=256, - num_heads=8, - num_levels=3, - num_points=4, - im2col_step=64, - dropout=0.0, - batch_first=False, - norm_cfg=None, - init_cfg=None), - ffn_cfgs=dict( - type='FFN', - embed_dims=256, - feedforward_channels=1024, - num_fcs=2, - ffn_drop=0.0, - act_cfg=dict(type='ReLU', inplace=True)), - operation_order=('self_attn', 'norm', 'ffn', 'norm')), - init_cfg=None), - positional_encoding=dict( - type='SinePositionalEncoding', num_feats=128, normalize=True), - init_cfg=None), - enforce_decoder_input_project=False, - positional_encoding=dict( - type='SinePositionalEncoding', num_feats=128, normalize=True), - transformer_decoder=dict( - type='DetrTransformerDecoder', - return_intermediate=True, - num_layers=9, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - attn_drop=0.0, - proj_drop=0.0, - dropout_layer=None, - batch_first=False), - ffn_cfgs=dict( - embed_dims=256, - feedforward_channels=2048, - num_fcs=2, - act_cfg=dict(type='ReLU', inplace=True), - ffn_drop=0.0, - dropout_layer=None, - add_identity=True), - feedforward_channels=2048, - operation_order=('cross_attn', 'norm', 'self_attn', 'norm', - 'ffn', 'norm')), - init_cfg=None), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=2.0, - reduction='mean', - class_weight=[1.0] * num_classes + [0.1]), - loss_mask=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='mean', - loss_weight=5.0), - loss_dice=dict( - type='DiceLoss', - use_sigmoid=True, - activate=True, - reduction='mean', - naive_dice=True, - eps=1.0, - loss_weight=5.0)), - panoptic_fusion_head=dict( - type='MaskFormerFusionHead', - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - loss_panoptic=None, - init_cfg=None), - train_cfg=dict( - num_points=12544, - oversample_ratio=3.0, - importance_sample_ratio=0.75, - assigner=dict( - type='MaskHungarianAssigner', - cls_cost=dict(type='ClassificationCost', weight=2.0), - mask_cost=dict( - type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), - dice_cost=dict( - type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), - sampler=dict(type='MaskPseudoSampler')), - test_cfg=dict( - panoptic_on=True, - # For now, the dataset does not support - # evaluating semantic segmentation metric. - semantic_on=False, - instance_on=True, - # max_per_image is for instance segmentation. - max_per_image=100, - iou_thr=0.8, - # In Mask2Former's panoptic postprocessing, - # it will filter mask area where score is less than 0.5 . - filter_low_score=True), - init_cfg=None) - -# dataset settings -image_size = (1024, 1024) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict( - type='LoadPanopticAnnotations', - with_bbox=True, - with_mask=True, - with_seg=True), - dict(type='RandomFlip', flip_ratio=0.5), - # large scale jittering - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_size=image_size, - crop_type='absolute', - recompute_bbox=True, - allow_negative_crop=True), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=image_size), - dict(type='DefaultFormatBundle', img_to_float=True), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data_root = 'data/coco/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict( - pipeline=test_pipeline, - ins_ann_file=data_root + 'annotations/instances_val2017.json', - ), - test=dict( - pipeline=test_pipeline, - ins_ann_file=data_root + 'annotations/instances_val2017.json', - )) - -embed_multi = dict(lr_mult=1.0, decay_mult=0.0) -# optimizer -optimizer = dict( - type='AdamW', - lr=0.0001, - weight_decay=0.05, - eps=1e-8, - betas=(0.9, 0.999), - paramwise_cfg=dict( - custom_keys={ - 'backbone': dict(lr_mult=0.1, decay_mult=1.0), - 'query_embed': embed_multi, - 'query_feat': embed_multi, - 'level_embed': embed_multi, - }, - norm_decay_mult=0.0)) -optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) - -# learning policy -lr_config = dict( - policy='step', - gamma=0.1, - by_epoch=False, - step=[327778, 355092], - warmup='linear', - warmup_by_epoch=False, - warmup_ratio=1.0, # no warmup - warmup_iters=10) - -max_iters = 368750 -runner = dict(type='IterBasedRunner', max_iters=max_iters) - -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook', by_epoch=False), - dict(type='TensorboardLoggerHook', by_epoch=False) - ]) -interval = 5000 -workflow = [('train', interval)] -checkpoint_config = dict( - by_epoch=False, interval=interval, save_last=True, max_keep_ckpts=3) - -# Before 365001th iteration, we do evaluation every 5000 iterations. -# After 365000th iteration, we do evaluation every 368750 iterations, -# which means that we do evaluation at the end of training. -dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] -evaluation = dict( - interval=interval, - dynamic_intervals=dynamic_intervals, - metric=['PQ', 'bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py deleted file mode 100644 index eca6135ba7cb1c28eeb5bf5f031176dc77952630..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py +++ /dev/null @@ -1,79 +0,0 @@ -_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] -num_things_classes = 80 -num_stuff_classes = 0 -num_classes = num_things_classes + num_stuff_classes -model = dict( - panoptic_head=dict( - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])), - panoptic_fusion_head=dict( - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes), - test_cfg=dict(panoptic_on=False)) - -# dataset settings -image_size = (1024, 1024) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -pad_cfg = dict(img=(128, 128, 128), masks=0, seg=255) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - # large scale jittering - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_size=image_size, - crop_type='absolute', - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True), - dict(type='Pad', size=image_size, pad_val=pad_cfg), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle', img_to_float=True), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Pad', size_divisor=32, pad_val=pad_cfg), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -data = dict( - _delete_=True, - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py deleted file mode 100644 index f13f5e178438b7a334665be18460da2cde80125c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa - -model = dict( - backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained))) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py deleted file mode 100644 index 33a805c35eb1aa0adf81b3b69889d3f0a96cf4fa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa - -depths = [2, 2, 18, 2] -model = dict( - backbone=dict( - pretrain_img_size=384, - embed_dims=128, - depths=depths, - num_heads=[4, 8, 16, 32], - window_size=12, - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - panoptic_head=dict(in_channels=[128, 256, 512, 1024])) - -# set all layers in backbone to lr_mult=0.1 -# set all norm layers, position_embeding, -# query_embeding, level_embeding to decay_multi=0.0 -backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) -backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) -embed_multi = dict(lr_mult=1.0, decay_mult=0.0) -custom_keys = { - 'backbone': dict(lr_mult=0.1, decay_mult=1.0), - 'backbone.patch_embed.norm': backbone_norm_multi, - 'backbone.norm': backbone_norm_multi, - 'absolute_pos_embed': backbone_embed_multi, - 'relative_position_bias_table': backbone_embed_multi, - 'query_embed': embed_multi, - 'query_feat': embed_multi, - 'level_embed': embed_multi -} -custom_keys.update({ - f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi - for stage_id, num_blocks in enumerate(depths) - for block_id in range(num_blocks) -}) -custom_keys.update({ - f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi - for stage_id in range(len(depths) - 1) -}) -# optimizer -optimizer = dict( - paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py deleted file mode 100644 index 91a180d4b19f32e14d53df1b9c7ba508304c9ba5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py +++ /dev/null @@ -1,26 +0,0 @@ -_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa - -model = dict( - backbone=dict( - embed_dims=192, - num_heads=[6, 12, 24, 48], - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536])) - -data = dict(samples_per_gpu=1, workers_per_gpu=1) - -lr_config = dict(step=[655556, 710184]) - -max_iters = 737500 -runner = dict(type='IterBasedRunner', max_iters=max_iters) - -# Before 735001th iteration, we do evaluation every 5000 iterations. -# After 735000th iteration, we do evaluation every 737500 iterations, -# which means that we do evaluation at the end of training.' -interval = 5000 -dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] -evaluation = dict( - interval=interval, - dynamic_intervals=dynamic_intervals, - metric=['PQ', 'bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py deleted file mode 100644 index b2b621ce7819bf01cc580f7af0f1589525e7eac2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa - -depths = [2, 2, 18, 2] -model = dict( - backbone=dict( - depths=depths, init_cfg=dict(type='Pretrained', - checkpoint=pretrained))) - -# set all layers in backbone to lr_mult=0.1 -# set all norm layers, position_embeding, -# query_embeding, level_embeding to decay_multi=0.0 -backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) -backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) -embed_multi = dict(lr_mult=1.0, decay_mult=0.0) -custom_keys = { - 'backbone': dict(lr_mult=0.1, decay_mult=1.0), - 'backbone.patch_embed.norm': backbone_norm_multi, - 'backbone.norm': backbone_norm_multi, - 'absolute_pos_embed': backbone_embed_multi, - 'relative_position_bias_table': backbone_embed_multi, - 'query_embed': embed_multi, - 'query_feat': embed_multi, - 'level_embed': embed_multi -} -custom_keys.update({ - f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi - for stage_id, num_blocks in enumerate(depths) - for block_id in range(num_blocks) -}) -custom_keys.update({ - f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi - for stage_id in range(len(depths) - 1) -}) -# optimizer -optimizer = dict( - paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py deleted file mode 100644 index 7b1b05abafe6133eb79b1537dad08d9d9f205deb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py'] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa - -depths = [2, 2, 18, 2] -model = dict( - backbone=dict( - depths=depths, init_cfg=dict(type='Pretrained', - checkpoint=pretrained))) - -# set all layers in backbone to lr_mult=0.1 -# set all norm layers, position_embeding, -# query_embeding, level_embeding to decay_multi=0.0 -backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) -backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) -embed_multi = dict(lr_mult=1.0, decay_mult=0.0) -custom_keys = { - 'backbone': dict(lr_mult=0.1, decay_mult=1.0), - 'backbone.patch_embed.norm': backbone_norm_multi, - 'backbone.norm': backbone_norm_multi, - 'absolute_pos_embed': backbone_embed_multi, - 'relative_position_bias_table': backbone_embed_multi, - 'query_embed': embed_multi, - 'query_feat': embed_multi, - 'level_embed': embed_multi -} -custom_keys.update({ - f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi - for stage_id, num_blocks in enumerate(depths) - for block_id in range(num_blocks) -}) -custom_keys.update({ - f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi - for stage_id in range(len(depths) - 1) -}) -# optimizer -optimizer = dict( - paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py deleted file mode 100644 index 04b2f10eddc1eed312da2fb18612f84e87101adc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa - -depths = [2, 2, 6, 2] -model = dict( - type='Mask2Former', - backbone=dict( - _delete_=True, - type='SwinTransformer', - embed_dims=96, - depths=depths, - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.3, - patch_norm=True, - out_indices=(0, 1, 2, 3), - with_cp=False, - convert_weights=True, - frozen_stages=-1, - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - panoptic_head=dict( - type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), - init_cfg=None) - -# set all layers in backbone to lr_mult=0.1 -# set all norm layers, position_embeding, -# query_embeding, level_embeding to decay_multi=0.0 -backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) -backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) -embed_multi = dict(lr_mult=1.0, decay_mult=0.0) -custom_keys = { - 'backbone': dict(lr_mult=0.1, decay_mult=1.0), - 'backbone.patch_embed.norm': backbone_norm_multi, - 'backbone.norm': backbone_norm_multi, - 'absolute_pos_embed': backbone_embed_multi, - 'relative_position_bias_table': backbone_embed_multi, - 'query_embed': embed_multi, - 'query_feat': embed_multi, - 'level_embed': embed_multi -} -custom_keys.update({ - f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi - for stage_id, num_blocks in enumerate(depths) - for block_id in range(num_blocks) -}) -custom_keys.update({ - f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi - for stage_id in range(len(depths) - 1) -}) -# optimizer -optimizer = dict( - type='AdamW', - lr=0.0001, - weight_decay=0.05, - eps=1e-8, - betas=(0.9, 0.999), - paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py b/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py deleted file mode 100644 index 0ccbe91c683de4e272125b24f348ffc080134f50..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py +++ /dev/null @@ -1,61 +0,0 @@ -_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa -depths = [2, 2, 6, 2] -model = dict( - type='Mask2Former', - backbone=dict( - _delete_=True, - type='SwinTransformer', - embed_dims=96, - depths=depths, - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.3, - patch_norm=True, - out_indices=(0, 1, 2, 3), - with_cp=False, - convert_weights=True, - frozen_stages=-1, - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - panoptic_head=dict( - type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), - init_cfg=None) - -# set all layers in backbone to lr_mult=0.1 -# set all norm layers, position_embeding, -# query_embeding, level_embeding to decay_multi=0.0 -backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) -backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) -embed_multi = dict(lr_mult=1.0, decay_mult=0.0) -custom_keys = { - 'backbone': dict(lr_mult=0.1, decay_mult=1.0), - 'backbone.patch_embed.norm': backbone_norm_multi, - 'backbone.norm': backbone_norm_multi, - 'absolute_pos_embed': backbone_embed_multi, - 'relative_position_bias_table': backbone_embed_multi, - 'query_embed': embed_multi, - 'query_feat': embed_multi, - 'level_embed': embed_multi -} -custom_keys.update({ - f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi - for stage_id, num_blocks in enumerate(depths) - for block_id in range(num_blocks) -}) -custom_keys.update({ - f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi - for stage_id in range(len(depths) - 1) -}) -# optimizer -optimizer = dict( - type='AdamW', - lr=0.0001, - weight_decay=0.05, - eps=1e-8, - betas=(0.9, 0.999), - paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) diff --git a/cv/detection/co-detr/pytorch/configs/mask2former/metafile.yml b/cv/detection/co-detr/pytorch/configs/mask2former/metafile.yml deleted file mode 100644 index d9f469292c28999d2a9ea64abdba87fbf5d87fd1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask2former/metafile.yml +++ /dev/null @@ -1,223 +0,0 @@ -Collections: - - Name: Mask2Former - Metadata: - Training Data: COCO - Training Techniques: - - AdamW - - Weight Decay - Training Resources: 8x A100 GPUs - Architecture: - - Mask2Former - Paper: - URL: https://arxiv.org/pdf/2112.01527 - Title: 'Masked-attention Mask Transformer for Universal Image Segmentation' - README: configs/mask2former/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/detectors/mask2former.py#L7 - Version: v2.23.0 - -Models: -- Name: mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic - In Collection: Mask2Former - Config: configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py - Metadata: - Training Memory (GB): 19.1 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 44.5 - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 54.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200-c7b94355.pth -- Name: mask2former_r101_lsj_8x2_50e_coco - In Collection: Mask2Former - Config: configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py - Metadata: - Training Memory (GB): 15.5 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 44.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth -- Name: mask2former_r101_lsj_8x2_50e_coco-panoptic - In Collection: Mask2Former - Config: configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py - Metadata: - Training Memory (GB): 16.1 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 42.4 - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 52.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104-c54e64c9.pth -- Name: mask2former_r50_lsj_8x2_50e_coco-panoptic - In Collection: Mask2Former - Config: configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py - Metadata: - Training Memory (GB): 13.9 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.9 - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 51.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth -- Name: mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic - In Collection: Mask2Former - Config: configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py - Metadata: - Training Memory (GB): 15.9 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 43.4 - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 53.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553-fc567107.pth -- Name: mask2former_r50_lsj_8x2_50e_coco - In Collection: Mask2Former - Config: configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py - Metadata: - Training Memory (GB): 13.7 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 42.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth -- Name: mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic - In Collection: Mask2Former - Config: configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py - Metadata: - Training Memory (GB): 21.1 - Iterations: 737500 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 52.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 48.5 - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 57.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949-d4919c44.pth -- Name: mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic - In Collection: Mask2Former - Config: configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py - Metadata: - Training Memory (GB): 25.8 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 50.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 46.3 - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 56.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021-3bb8b482.pth -- Name: mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic - In Collection: Mask2Former - Config: configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py - Metadata: - Training Memory (GB): 26.0 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 48.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 44.9 - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 55.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244-c149a9e9.pth -- Name: mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco - In Collection: Mask2Former - Config: configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py - Metadata: - Training Memory (GB): 15.3 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 44.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth -- Name: mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco - In Collection: Mask2Former - Config: configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py - Metadata: - Training Memory (GB): 18.8 - Iterations: 368750 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 49.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 46.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/mask_rcnn/README.md deleted file mode 100644 index 11a39b056675984e50f3625f77f12dc9009794ce..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# Mask R-CNN - -> [Mask R-CNN](https://arxiv.org/abs/1703.06870) - - - -## Abstract - -We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | caffe | 1x | 4.3 | | 38.0 | 34.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_20200504_231812.log.json) | -| R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 38.2 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | -| R-50-FPN (FP16) | pytorch | 1x | 3.6 | 24.1 | 38.1 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) | -| R-50-FPN | pytorch | 2x | - | - | 39.2 | 35.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_20200505_003907.log.json) | -| R-101-FPN | caffe | 1x | | | 40.4 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758.log.json) | -| R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | -| R-101-FPN | pytorch | 2x | - | - | 40.8 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_20200505_071027.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 7.6 | 11.3 | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) | -| X-101-32x4d-FPN | pytorch | 2x | - | - | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_20200506_004702.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 10.7 | 8.0 | 42.8 | 38.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201_124310.log.json) | -| X-101-64x4d-FPN | pytorch | 2x | - | - | 42.7 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208.log.json) | -| X-101-32x8d-FPN | pytorch | 1x | 10.6 | - | 42.8 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841.log.json) | - -## Pre-trained Models - -We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| [R-50-FPN](./mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py) | caffe | 2x | 4.3 | | 40.3 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_20200504_231822.log.json) | -| [R-50-FPN](./mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py) | caffe | 3x | 4.3 | | 40.8 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_20200504_163245.log.json) | -| [R-50-FPN](./mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 4.1 | | 40.9 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154.log.json) | -| [R-101-FPN](./mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py) | caffe | 3x | 5.9 | | 42.9 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339.log.json) | -| [R-101-FPN](./mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 6.1 | | 42.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244.log.json) | -| [x101-32x4d-FPN](./mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 7.3 | | 43.6 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410.log.json) | -| [X-101-32x8d-FPN](./mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | pytorch | 1x | 10.4 | | 43.4 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346-b4637974.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346.log.json) | -| [X-101-32x8d-FPN](./mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 10.3 | | 44.3 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042.log.json) | -| [X-101-64x4d-FPN](./mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 10.4 | | 44.5 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447.log.json) | - -## Citation - -```latex -@article{He_2017, - title={Mask R-CNN}, - journal={2017 IEEE International Conference on Computer Vision (ICCV)}, - publisher={IEEE}, - author={He, Kaiming and Gkioxari, Georgia and Dollar, Piotr and Girshick, Ross}, - year={2017}, - month={Oct} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 95b324f59144e6a894ad30e01859af148aa699d6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index e39781dcffff82430cbee16e15af8f54c2e44814..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,55 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - depth=101, - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index b7986e8576642e631cfcdc9b274c49a17671e8b1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py deleted file mode 100644 index c9059d5385a960172dfe01c6d9a25d3089d96649..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index 0696cbe756a1f885e2660adc626ef055542fb54f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py deleted file mode 100644 index a44c01831b508da0a5e1ca3720bb437bcea086d1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_caffe_c4.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 5a23f8c7cd21ef5025def03d4743d03103d821c5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,40 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py deleted file mode 100644 index 6308e40416a2be6ab5cbcc6826faff8556bc0b16..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py +++ /dev/null @@ -1,49 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py deleted file mode 100644 index 4f7150ca718e2ead46eb63e74b6be06f50aa0fce..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index 1b48a2104baf0df935954897ae4a991b38684d78..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py deleted file mode 100644 index bebbaaab05c099f575d94fbb5ae6bef57d4b4177..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py +++ /dev/null @@ -1,45 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py deleted file mode 100644 index 3f8079d3629e35c246819136cf2f292865b99d41..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py +++ /dev/null @@ -1,61 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - rpn_head=dict( - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - bbox_roi_extractor=dict( - roi_layer=dict( - type='RoIAlign', - output_size=7, - sampling_ratio=2, - aligned=False)), - bbox_head=dict( - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), - mask_roi_extractor=dict( - roi_layer=dict( - type='RoIAlign', - output_size=14, - sampling_ratio=2, - aligned=False)))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 6a6c92460f1d58b8e8d361fb56ee123f2668ad9f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_wandb_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_wandb_coco.py deleted file mode 100644 index 88c85767a928ad68cfb1d09b24da740e4ab78227..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_wandb_coco.py +++ /dev/null @@ -1,26 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# Set evaluation interval -evaluation = dict(interval=2) -# Set checkpoint interval -checkpoint_config = dict(interval=4) - -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='MMDetWandbHook', - init_kwargs={ - 'project': 'mmdetection', - 'group': 'maskrcnn-r50-fpn-1x-coco' - }, - interval=50, - log_checkpoint=True, - log_checkpoint_metadata=True, - num_eval_images=100) - ]) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py deleted file mode 100644 index 932b1f905155a0d3285daefc4891f5194705e30d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index fb8289b06c40697db5d42f37f80a0e67ff4fb4e7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index b3d9242cd222d9da0bb7cc531130456f1031266f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py deleted file mode 100644 index 9eb6d57e0d25370a59472a4ceb1a3b9da6574608..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index a8b3799b3482c840a4fcb5201a7dede23a0e073c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py deleted file mode 100644 index 2cd3cee5a102b49750e5b265ec6775907f1a9545..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_r101_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index b698a7d219320ca93e49b8dd5fd807005f469dce..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py deleted file mode 100644 index 108ea4e34717953be59795b63f4f932f4329468f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py +++ /dev/null @@ -1,65 +0,0 @@ -_base_ = './mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=8, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - style='pytorch', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) - -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], - std=[57.375, 57.120, 58.395], - to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py deleted file mode 100644 index 6b912f692b7a833556e6f7ef02b483c4e33781ef..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py +++ /dev/null @@ -1,60 +0,0 @@ -_base_ = './mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=8, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - style='pytorch', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) - -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], - std=[57.375, 57.120, 58.395], - to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index 8ba0e9c200fdc4ff196184c0b8e2320804037fbb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,85 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=8, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - style='pytorch', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) - -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], - std=[57.375, 57.120, 58.395], - to_rgb=False) - -# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], -# multiscale_mode='range' -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 2333b03a835a7d1d09df09749ebdc492db499f63..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py deleted file mode 100644 index 6074cca29f462e821206a6509ff8869ec86b5a68..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './mask_rcnn_x101_32x4d_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index 9f9cb1c4393b344fd9c5a25c04047e7e7a3cb54d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/mask_rcnn/metafile.yml b/cv/detection/co-detr/pytorch/configs/mask_rcnn/metafile.yml deleted file mode 100644 index 30938ea3201598bccc10933617a8b87015c1ae17..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/mask_rcnn/metafile.yml +++ /dev/null @@ -1,443 +0,0 @@ -Collections: - - Name: Mask R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Softmax - - RPN - - Convolution - - Dense Connections - - FPN - - ResNet - - RoIAlign - Paper: - URL: https://arxiv.org/abs/1703.06870v3 - Title: "Mask R-CNN" - README: configs/mask_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_rcnn.py#L6 - Version: v2.0.0 - -Models: - - Name: mask_rcnn_r50_caffe_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.3 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 34.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth - - - Name: mask_rcnn_r50_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.4 - inference time (ms/im): - - value: 62.11 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 34.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth - - - Name: mask_rcnn_r50_fpn_fp16_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py - Metadata: - Training Memory (GB): 3.6 - Training Techniques: - - SGD with Momentum - - Weight Decay - - Mixed Precision Training - inference time (ms/im): - - value: 41.49 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP16 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 34.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth - - - Name: mask_rcnn_r50_fpn_2x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py - Metadata: - Training Memory (GB): 4.4 - inference time (ms/im): - - value: 62.11 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 35.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth - - - Name: mask_rcnn_r101_caffe_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth - - - Name: mask_rcnn_r101_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.4 - inference time (ms/im): - - value: 74.07 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth - - - Name: mask_rcnn_r101_fpn_2x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py - Metadata: - Training Memory (GB): 6.4 - inference time (ms/im): - - value: 74.07 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth - - - Name: mask_rcnn_x101_32x4d_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.6 - inference time (ms/im): - - value: 88.5 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth - - - Name: mask_rcnn_x101_32x4d_fpn_2x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py - Metadata: - Training Memory (GB): 7.6 - inference time (ms/im): - - value: 88.5 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth - - - Name: mask_rcnn_x101_64x4d_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 10.7 - inference time (ms/im): - - value: 125 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth - - - Name: mask_rcnn_x101_64x4d_fpn_2x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py - Metadata: - Training Memory (GB): 10.7 - inference time (ms/im): - - value: 125 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth - - - Name: mask_rcnn_x101_32x8d_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 10.6 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth - - - Name: mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py - Metadata: - Training Memory (GB): 4.3 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth - - - Name: mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 4.3 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth - - - Name: mask_rcnn_r50_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 4.1 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth - - - Name: mask_rcnn_r101_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 6.1 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth - - - Name: mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 5.9 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth - - - Name: mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 7.3 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth - - - Name: mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py - Metadata: - Training Memory (GB): 10.4 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346-b4637974.pth - - - Name: mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 10.3 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth - - - Name: mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py - Metadata: - Epochs: 36 - Training Memory (GB): 10.4 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth diff --git a/cv/detection/co-detr/pytorch/configs/maskformer/README.md b/cv/detection/co-detr/pytorch/configs/maskformer/README.md deleted file mode 100644 index 5d8daa2ff01274cbc3cef054d1e17342c528f3e1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/maskformer/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# MaskFormer - -> [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) - - - -## Abstract - -Modern approaches typically formulate semantic segmentation as a per-pixel classification task, while instance-level segmentation is handled with an alternative mask classification. Our key insight: mask classification is sufficiently general to solve both semantic- and instance-level segmentation tasks in a unified manner using the exact same model, loss, and training procedure. Following this observation, we propose MaskFormer, a simple mask classification model which predicts a set of binary masks, each associated with a single global class label prediction. Overall, the proposed mask classification-based method simplifies the landscape of effective approaches to semantic and panoptic segmentation tasks and shows excellent empirical results. In particular, we observe that MaskFormer outperforms per-pixel classification baselines when the number of classes is large. Our mask classification-based method outperforms both current state-of-the-art semantic (55.6 mIoU on ADE20K) and panoptic segmentation (52.7 PQ on COCO) models. - -
- -
- -## Introduction - -MaskFormer requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. -The directory should be like this. - -```none -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── coco -│ │ ├── annotations -│ │ │ ├── panoptic_train2017.json -│ │ │ ├── panoptic_train2017 -│ │ │ ├── panoptic_val2017.json -│ │ │ ├── panoptic_val2017 -│ │ ├── train2017 -│ │ ├── val2017 -│ │ ├── test2017 -``` - -## Results and Models - -| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | PQ | SQ | RQ | PQ_th | SQ_th | RQ_th | PQ_st | SQ_st | RQ_st | Config | Download | detail | -| :------: | :-----: | :-----: | :------: | :------------: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | 75e | 16.2 | - | 46.854 | 80.617 | 57.085 | 51.089 | 81.511 | 61.853 | 40.463 | 79.269 | 49.888 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956.log.json) | This version was mentioned in Table XI, in paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) | -| Swin-L | pytorch | 300e | 27.2 | - | 53.249 | 81.704 | 64.231 | 58.798 | 82.923 | 70.282 | 44.874 | 79.863 | 55.097 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612-061b4eb8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612.log.json) | - | - -## Citation - -```latex -@inproceedings{cheng2021maskformer, - title={Per-Pixel Classification is Not All You Need for Semantic Segmentation}, - author={Bowen Cheng and Alexander G. Schwing and Alexander Kirillov}, - journal={NeurIPS}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py b/cv/detection/co-detr/pytorch/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py deleted file mode 100644 index 46b3c135dd800dc77843b5c0050e57a99be82e8b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py +++ /dev/null @@ -1,238 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' -] -num_things_classes = 80 -num_stuff_classes = 53 -num_classes = num_things_classes + num_stuff_classes -model = dict( - type='MaskFormer', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - panoptic_head=dict( - type='MaskFormerHead', - in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside - feat_channels=256, - out_channels=256, - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - num_queries=100, - pixel_decoder=dict( - type='TransformerEncoderPixelDecoder', - norm_cfg=dict(type='GN', num_groups=32), - act_cfg=dict(type='ReLU'), - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - attn_drop=0.1, - proj_drop=0.1, - dropout_layer=None, - batch_first=False), - ffn_cfgs=dict( - embed_dims=256, - feedforward_channels=2048, - num_fcs=2, - act_cfg=dict(type='ReLU', inplace=True), - ffn_drop=0.1, - dropout_layer=None, - add_identity=True), - operation_order=('self_attn', 'norm', 'ffn', 'norm'), - norm_cfg=dict(type='LN'), - init_cfg=None, - batch_first=False), - init_cfg=None), - positional_encoding=dict( - type='SinePositionalEncoding', num_feats=128, normalize=True)), - enforce_decoder_input_project=False, - positional_encoding=dict( - type='SinePositionalEncoding', num_feats=128, normalize=True), - transformer_decoder=dict( - type='DetrTransformerDecoder', - return_intermediate=True, - num_layers=6, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - attn_drop=0.1, - proj_drop=0.1, - dropout_layer=None, - batch_first=False), - ffn_cfgs=dict( - embed_dims=256, - feedforward_channels=2048, - num_fcs=2, - act_cfg=dict(type='ReLU', inplace=True), - ffn_drop=0.1, - dropout_layer=None, - add_identity=True), - # the following parameter was not used, - # just make current api happy - feedforward_channels=2048, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')), - init_cfg=None), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0, - reduction='mean', - class_weight=[1.0] * num_classes + [0.1]), - loss_mask=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - reduction='mean', - loss_weight=20.0), - loss_dice=dict( - type='DiceLoss', - use_sigmoid=True, - activate=True, - reduction='mean', - naive_dice=True, - eps=1.0, - loss_weight=1.0)), - panoptic_fusion_head=dict( - type='MaskFormerFusionHead', - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - loss_panoptic=None, - init_cfg=None), - train_cfg=dict( - assigner=dict( - type='MaskHungarianAssigner', - cls_cost=dict(type='ClassificationCost', weight=1.0), - mask_cost=dict( - type='FocalLossCost', weight=20.0, binary_input=True), - dice_cost=dict( - type='DiceCost', weight=1.0, pred_act=True, eps=1.0)), - sampler=dict(type='MaskPseudoSampler')), - test_cfg=dict( - panoptic_on=True, - # For now, the dataset does not support - # evaluating semantic segmentation metric. - semantic_on=False, - instance_on=False, - # max_per_image is for instance segmentation. - max_per_image=100, - object_mask_thr=0.8, - iou_thr=0.8, - # In MaskFormer's panoptic postprocessing, - # it will not filter masks whose score is smaller than 0.5 . - filter_low_score=False), - init_cfg=None) - -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadPanopticAnnotations', - with_bbox=True, - with_mask=True, - with_seg=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[[ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ]]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=1, - workers_per_gpu=1, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict( - type='AdamW', - lr=0.0001, - weight_decay=0.0001, - eps=1e-8, - betas=(0.9, 0.999), - paramwise_cfg=dict( - custom_keys={ - 'backbone': dict(lr_mult=0.1, decay_mult=1.0), - 'query_embed': dict(lr_mult=1.0, decay_mult=0.0) - }, - norm_decay_mult=0.0)) -optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) - -# learning policy -lr_config = dict( - policy='step', - gamma=0.1, - by_epoch=True, - step=[50], - warmup='linear', - warmup_by_epoch=False, - warmup_ratio=1.0, # no warmup - warmup_iters=10) -runner = dict(type='EpochBasedRunner', max_epochs=75) diff --git a/cv/detection/co-detr/pytorch/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py b/cv/detection/co-detr/pytorch/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py deleted file mode 100644 index bc23c54d677e9f7f545995e2deff3d2c59c25d01..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py +++ /dev/null @@ -1,67 +0,0 @@ -_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py' - -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa -depths = [2, 2, 18, 2] -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformer', - pretrain_img_size=384, - embed_dims=192, - patch_size=4, - window_size=12, - mlp_ratio=4, - depths=depths, - num_heads=[6, 12, 24, 48], - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.3, - patch_norm=True, - out_indices=(0, 1, 2, 3), - with_cp=False, - convert_weights=True, - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - panoptic_head=dict( - in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside - pixel_decoder=dict( - _delete_=True, - type='PixelDecoder', - norm_cfg=dict(type='GN', num_groups=32), - act_cfg=dict(type='ReLU')), - enforce_decoder_input_project=True)) - -# weight_decay = 0.01 -# norm_weight_decay = 0.0 -# embed_weight_decay = 0.0 -embed_multi = dict(lr_mult=1.0, decay_mult=0.0) -norm_multi = dict(lr_mult=1.0, decay_mult=0.0) -custom_keys = { - 'norm': norm_multi, - 'absolute_pos_embed': embed_multi, - 'relative_position_bias_table': embed_multi, - 'query_embed': embed_multi -} - -# optimizer -optimizer = dict( - type='AdamW', - lr=6e-5, - weight_decay=0.01, - eps=1e-8, - betas=(0.9, 0.999), - paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) -optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) - -# learning policy -lr_config = dict( - policy='step', - gamma=0.1, - by_epoch=True, - step=[250], - warmup='linear', - warmup_by_epoch=False, - warmup_ratio=1e-6, - warmup_iters=1500) -runner = dict(type='EpochBasedRunner', max_epochs=300) diff --git a/cv/detection/co-detr/pytorch/configs/maskformer/metafile.yml b/cv/detection/co-detr/pytorch/configs/maskformer/metafile.yml deleted file mode 100644 index 6530fa144145b5be93b78733685e9591e4a05138..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/maskformer/metafile.yml +++ /dev/null @@ -1,43 +0,0 @@ -Collections: - - Name: MaskFormer - Metadata: - Training Data: COCO - Training Techniques: - - AdamW - - Weight Decay - Training Resources: 16x V100 GPUs - Architecture: - - MaskFormer - Paper: - URL: https://arxiv.org/pdf/2107.06278 - Title: 'Per-Pixel Classification is Not All You Need for Semantic Segmentation' - README: configs/maskformer/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/detectors/maskformer.py#L7 - Version: v2.22.0 - -Models: - - Name: maskformer_r50_mstrain_16x1_75e_coco - In Collection: MaskFormer - Config: configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py - Metadata: - Training Memory (GB): 16.2 - Epochs: 75 - Results: - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 46.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth - - Name: maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco - In Collection: MaskFormer - Config: configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py - Metadata: - Training Memory (GB): 27.2 - Epochs: 300 - Results: - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 53.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612-061b4eb8.pth diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/ms_rcnn/README.md deleted file mode 100644 index 97bca052d38a9deeed85f739dbd5ce715e3847b0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# MS R-CNN - -> [Mask Scoring R-CNN](https://arxiv.org/abs/1903.00241) - - - -## Abstract - -Letting a deep network be aware of the quality of its own predictions is an interesting yet important problem. In the task of instance segmentation, the confidence of instance classification is used as mask quality score in most instance segmentation frameworks. However, the mask quality, quantified as the IoU between the instance mask and its ground truth, is usually not well correlated with classification score. In this paper, we study this problem and propose Mask Scoring R-CNN which contains a network block to learn the quality of the predicted instance masks. The proposed network block takes the instance feature and the corresponding predicted mask together to regress the mask IoU. The mask scoring strategy calibrates the misalignment between mask quality and mask score, and improves instance segmentation performance by prioritizing more accurate mask predictions during COCO AP evaluation. By extensive evaluations on the COCO dataset, Mask Scoring R-CNN brings consistent and noticeable gain with different models, and outperforms the state-of-the-art Mask R-CNN. We hope our simple and effective approach will provide a new direction for improving instance segmentation. - -
- -
- -## Results and Models - -| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | caffe | 1x | 4.5 | | 38.2 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848.log.json) | -| R-50-FPN | caffe | 2x | - | - | 38.8 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_20200506_004738.log.json) | -| R-101-FPN | caffe | 1x | 6.5 | | 40.4 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_20200506_004755.log.json) | -| R-101-FPN | caffe | 2x | - | - | 41.1 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_20200506_011134.log.json) | -| R-X101-32x4d | pytorch | 2x | 7.9 | 11.0 | 41.8 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206_100113.log.json) | -| R-X101-64x4d | pytorch | 1x | 11.0 | 8.0 | 43.0 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206_091744.log.json) | -| R-X101-64x4d | pytorch | 2x | 11.0 | 8.0 | 42.6 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308_012247.log.json) | - -## Citation - -```latex -@inproceedings{huang2019msrcnn, - title={Mask Scoring R-CNN}, - author={Zhaojin Huang and Lichao Huang and Yongchao Gong and Chang Huang and Xinggang Wang}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2019}, -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/metafile.yml b/cv/detection/co-detr/pytorch/configs/ms_rcnn/metafile.yml deleted file mode 100644 index a6c7dc595c7ded3bcf1933ea77fa34cb353bca30..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/metafile.yml +++ /dev/null @@ -1,159 +0,0 @@ -Collections: - - Name: Mask Scoring R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RPN - - FPN - - ResNet - - RoIAlign - Paper: - URL: https://arxiv.org/abs/1903.00241 - Title: 'Mask Scoring R-CNN' - README: configs/ms_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_scoring_rcnn.py#L6 - Version: v2.0.0 - -Models: - - Name: ms_rcnn_r50_caffe_fpn_1x_coco - In Collection: Mask Scoring R-CNN - Config: configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.5 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth - - - Name: ms_rcnn_r50_caffe_fpn_2x_coco - In Collection: Mask Scoring R-CNN - Config: configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth - - - Name: ms_rcnn_r101_caffe_fpn_1x_coco - In Collection: Mask Scoring R-CNN - Config: configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.5 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth - - - Name: ms_rcnn_r101_caffe_fpn_2x_coco - In Collection: Mask Scoring R-CNN - Config: configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth - - - Name: ms_rcnn_x101_32x4d_fpn_1x_coco - In Collection: Mask Scoring R-CNN - Config: configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.9 - inference time (ms/im): - - value: 90.91 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth - - - Name: ms_rcnn_x101_64x4d_fpn_1x_coco - In Collection: Mask Scoring R-CNN - Config: configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 11.0 - inference time (ms/im): - - value: 125 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth - - - Name: ms_rcnn_x101_64x4d_fpn_2x_coco - In Collection: Mask Scoring R-CNN - Config: configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py - Metadata: - Training Memory (GB): 11.0 - inference time (ms/im): - - value: 125 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 9b7dcbbf145bb9705ae9628440349f6a5fecc438..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py deleted file mode 100644 index 202bccedae84657737b0315394199208d0307ae4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 5845125a7b3ee70deeaa545c16d1211b4fcb1d06..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - type='MaskScoringRCNN', - roi_head=dict( - type='MaskScoringRoIHead', - mask_iou_head=dict( - type='MaskIoUHead', - num_convs=4, - num_fcs=2, - roi_feat_size=14, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - num_classes=80)), - # model training and testing settings - train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py deleted file mode 100644 index 008a70ae67454c3fd470c29ffd000b18db391c8e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 0a163ce445c35d51a9d8940e46697c5c6a39d354..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - type='MaskScoringRCNN', - roi_head=dict( - type='MaskScoringRoIHead', - mask_iou_head=dict( - type='MaskIoUHead', - num_convs=4, - num_fcs=2, - roi_feat_size=14, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - num_classes=80)), - # model training and testing settings - train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 20479bbd70ce039789d8df346d270fde898bbc26..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './ms_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index ee5b7341663049f6eb8b99c8fec1f54964c698aa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './ms_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py deleted file mode 100644 index 54c605b94aa5fc8b1ddf2267ed349c2fcd08cc9e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/nas_fcos/README.md b/cv/detection/co-detr/pytorch/configs/nas_fcos/README.md deleted file mode 100644 index def883174e3e73cf0b71eec820f3e3767080d765..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/nas_fcos/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# NAS-FCOS - -> [NAS-FCOS: Fast Neural Architecture Search for Object Detection](https://arxiv.org/abs/1906.04423) - - - -## Abstract - -The success of deep neural networks relies on significant architecture engineering. Recently neural architecture search (NAS) has emerged as a promise to greatly reduce manual effort in network design by automatically searching for optimal architectures, although typically such algorithms need an excessive amount of computational resources, e.g., a few thousand GPU-days. To date, on challenging vision tasks such as object detection, NAS, especially fast versions of NAS, is less studied. Here we propose to search for the decoder structure of object detectors with search efficiency being taken into consideration. To be more specific, we aim to efficiently search for the feature pyramid network (FPN) as well as the prediction head of a simple anchor-free object detector, namely FCOS, using a tailored reinforcement learning paradigm. With carefully designed search space, search algorithms and strategies for evaluating network quality, we are able to efficiently search a top-performing detection architecture within 4 days using 8 V100 GPUs. The discovered architecture surpasses state-of-the-art object detection models (such as Faster R-CNN, RetinaNet and FCOS) by 1.5 to 3.5 points in AP on the COCO dataset, with comparable computation complexity and memory footprint, demonstrating the efficacy of the proposed NAS for object detection. - -
- -
- -## Results and Models - -| Head | Backbone | Style | GN-head | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :----------: | :------: | :---: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| NAS-FCOSHead | R-50 | caffe | Y | 1x | | | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520.log.json) | -| FCOSHead | R-50 | caffe | Y | 1x | | | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521.log.json) | - -**Notes:** - -- To be consistent with the author's implementation, we use 4 GPUs with 4 images/GPU. - -## Citation - -```latex -@article{wang2019fcos, - title={Nas-fcos: Fast neural architecture search for object detection}, - author={Wang, Ning and Gao, Yang and Chen, Hao and Wang, Peng and Tian, Zhi and Shen, Chunhua}, - journal={arXiv preprint arXiv:1906.04423}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/nas_fcos/metafile.yml b/cv/detection/co-detr/pytorch/configs/nas_fcos/metafile.yml deleted file mode 100644 index 1ea28cfc34034c75d96f8d2f52b54dffa13c75d5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/nas_fcos/metafile.yml +++ /dev/null @@ -1,44 +0,0 @@ -Collections: - - Name: NAS-FCOS - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 4x V100 GPUs - Architecture: - - FPN - - NAS-FCOS - - ResNet - Paper: - URL: https://arxiv.org/abs/1906.04423 - Title: 'NAS-FCOS: Fast Neural Architecture Search for Object Detection' - README: configs/nas_fcos/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/nasfcos.py#L6 - Version: v2.1.0 - -Models: - - Name: nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco - In Collection: NAS-FCOS - Config: configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth - - - Name: nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco - In Collection: NAS-FCOS - Config: configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth diff --git a/cv/detection/co-detr/pytorch/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py deleted file mode 100644 index a455c9285cc892c8766df28d526fcd106272a09e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py +++ /dev/null @@ -1,100 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='NASFCOS', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False, eps=0), - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - neck=dict( - type='NASFCOS_FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs=True, - num_outs=5, - norm_cfg=dict(type='BN'), - conv_cfg=dict(type='DCNv2', deform_groups=2)), - bbox_head=dict( - type='FCOSHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - norm_cfg=dict(type='GN', num_groups=32), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - samples_per_gpu=4, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -optimizer = dict( - lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) diff --git a/cv/detection/co-detr/pytorch/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py deleted file mode 100644 index b779492527850ca8ea52f7aa8c17d6c3543fa368..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py +++ /dev/null @@ -1,99 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='NASFCOS', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False, eps=0), - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - neck=dict( - type='NASFCOS_FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs=True, - num_outs=5, - norm_cfg=dict(type='BN'), - conv_cfg=dict(type='DCNv2', deform_groups=2)), - bbox_head=dict( - type='NASFCOSHead', - num_classes=80, - in_channels=256, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - norm_cfg=dict(type='GN', num_groups=32), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - samples_per_gpu=4, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -optimizer = dict( - lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) diff --git a/cv/detection/co-detr/pytorch/configs/nas_fpn/README.md b/cv/detection/co-detr/pytorch/configs/nas_fpn/README.md deleted file mode 100644 index c5acf4053e00ec3b194721b51fbcf40400880730..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/nas_fpn/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# NAS-FPN - -> [NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection](https://arxiv.org/abs/1904.07392) - - - -## Abstract - -Current state-of-the-art convolutional architectures for object detection are manually designed. Here we aim to learn a better architecture of feature pyramid network for object detection. We adopt Neural Architecture Search and discover a new feature pyramid architecture in a novel scalable search space covering all cross-scale connections. The discovered architecture, named NAS-FPN, consists of a combination of top-down and bottom-up connections to fuse features across scales. NAS-FPN, combined with various backbone models in the RetinaNet framework, achieves better accuracy and latency tradeoff compared to state-of-the-art object detection models. NAS-FPN improves mobile detection accuracy by 2 AP compared to state-of-the-art SSDLite with MobileNetV2 model in \[32\] and achieves 48.3 AP which surpasses Mask R-CNN \[10\] detection accuracy with less computation time. - -
- -
- -## Results and Models - -We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. RetinaNet is used in the paper. - -| Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :---------: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | 50e | 12.9 | 22.9 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco_20200529_095329.log.json) | -| R-50-NASFPN | 50e | 13.2 | 23.0 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco_20200528_230008.log.json) | - -**Note**: We find that it is unstable to train NAS-FPN and there is a small chance that results can be 3% mAP lower. - -## Citation - -```latex -@inproceedings{ghiasi2019fpn, - title={Nas-fpn: Learning scalable feature pyramid architecture for object detection}, - author={Ghiasi, Golnaz and Lin, Tsung-Yi and Le, Quoc V}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={7036--7045}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/nas_fpn/metafile.yml b/cv/detection/co-detr/pytorch/configs/nas_fpn/metafile.yml deleted file mode 100644 index ab8d649795d7847bdb1596f8aee845dbe02fb291..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/nas_fpn/metafile.yml +++ /dev/null @@ -1,59 +0,0 @@ -Collections: - - Name: NAS-FPN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - NAS-FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1904.07392 - Title: 'NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection' - README: configs/nas_fpn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/nas_fpn.py#L67 - Version: v2.0.0 - -Models: - - Name: retinanet_r50_fpn_crop640_50e_coco - In Collection: NAS-FPN - Config: configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py - Metadata: - Training Memory (GB): 12.9 - inference time (ms/im): - - value: 43.67 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth - - - Name: retinanet_r50_nasfpn_crop640_50e_coco - In Collection: NAS-FPN - Config: configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py - Metadata: - Training Memory (GB): 13.2 - inference time (ms/im): - - value: 43.48 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 50 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth diff --git a/cv/detection/co-detr/pytorch/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py deleted file mode 100644 index e4408fe8f2110021bb99705de16f32613f22c1b3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py +++ /dev/null @@ -1,85 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] -cudnn_benchmark = True -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - relu_before_extra_convs=True, - no_norm_on_lateral=True, - norm_cfg=norm_cfg), - bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), - # training and testing settings - train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=(640, 640), - ratio_range=(0.8, 1.2), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(640, 640)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=(640, 640)), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(640, 640), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=64), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='SGD', - lr=0.08, - momentum=0.9, - weight_decay=0.0001, - paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[30, 40]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=50) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py b/cv/detection/co-detr/pytorch/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py deleted file mode 100644 index 1387a10fc033bb69e731f3ca971304f16e94cba2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py +++ /dev/null @@ -1,84 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] -cudnn_benchmark = True -# model settings -norm_cfg = dict(type='BN', requires_grad=True) -model = dict( - type='RetinaNet', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg), - bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), - # training and testing settings - train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=(640, 640), - ratio_range=(0.8, 1.2), - keep_ratio=True), - dict(type='RandomCrop', crop_size=(640, 640)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=(640, 640)), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(640, 640), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=128), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='SGD', - lr=0.08, - momentum=0.9, - weight_decay=0.0001, - paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[30, 40]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=50) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/openimages/README.md b/cv/detection/co-detr/pytorch/configs/openimages/README.md deleted file mode 100644 index e5c1c274a8f59c4b3d9aad636aa34b85910991b2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/openimages/README.md +++ /dev/null @@ -1,148 +0,0 @@ -# Open Images Dataset - -> [Open Images Dataset](https://arxiv.org/abs/1811.00982) - - - -## Abstract - - - -#### Open Images v6 - -[Open Images](https://storage.googleapis.com/openimages/web/index.html) is a dataset of ~9M images annotated with image-level labels, -object bounding boxes, object segmentation masks, visual relationships, -and localized narratives: - -- It contains a total of 16M bounding boxes for 600 object classes on - 1.9M images, making it the largest existing dataset with object location - annotations. The boxes have been largely manually drawn by professional - annotators to ensure accuracy and consistency. The images are very diverse - and often contain complex scenes with several objects (8.3 per image on - average). - -- Open Images also offers visual relationship annotations, indicating pairs - of objects in particular relations (e.g. "woman playing guitar", "beer on - table"), object properties (e.g. "table is wooden"), and human actions (e.g. - "woman is jumping"). In total it has 3.3M annotations from 1,466 distinct - relationship triplets. - -- In V5 we added segmentation masks for 2.8M object instances in 350 classes. - Segmentation masks mark the outline of objects, which characterizes their - spatial extent to a much higher level of detail. - -- In V6 we added 675k localized narratives: multimodal descriptions of images - consisting of synchronized voice, text, and mouse traces over the objects being - described. (Note we originally launched localized narratives only on train in V6, - but since July 2020 we also have validation and test covered.) - -- Finally, the dataset is annotated with 59.9M image-level labels spanning 19,957 - classes. - -We believe that having a single dataset with unified annotations for image -classification, object detection, visual relationship detection, instance -segmentation, and multimodal image descriptions will enable to study these -tasks jointly and stimulate progress towards genuine scene understanding. - - - -
- -
- -#### Open Images Challenge 2019 - -[Open Images Challenges 2019](https://storage.googleapis.com/openimages/web/challenge2019.html) is based on the V5 release of the Open -Images dataset. The images of the dataset are very varied and -often contain complex scenes with several objects (explore the dataset). - -## Citation - -``` -@article{OpenImages, - author = {Alina Kuznetsova and Hassan Rom and Neil Alldrin and Jasper Uijlings and Ivan Krasin and Jordi Pont-Tuset and Shahab Kamali and Stefan Popov and Matteo Malloci and Alexander Kolesnikov and Tom Duerig and Vittorio Ferrari}, - title = {The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale}, - year = {2020}, - journal = {IJCV} -} -``` - -## Prepare Dataset - -1. You need to download and extract Open Images dataset. - -2. The Open Images dataset does not have image metas (width and height of the image), - which will be used during evaluation. We suggest to get test image metas before - training/testing by using `tools/misc/get_image_metas.py`. - - **Usage** - - ```shell - python tools/misc/get_image_metas.py ${CONFIG} \ - --out ${OUTPUT FILE NAME} - ``` - -3. The directory should be like this: - - ```none - mmdetection - ├── mmdet - ├── tools - ├── configs - ├── data - │ ├── OpenImages - │ │ ├── annotations - │ │ │ ├── bbox_labels_600_hierarchy.json - │ │ │ ├── class-descriptions-boxable.csv - │ │ │ ├── oidv6-train-annotations-bbox.scv - │ │ │ ├── validation-annotations-bbox.csv - │ │ │ ├── validation-annotations-human-imagelabels-boxable.csv - │ │ │ ├── validation-image-metas.pkl # get from script - │ │ ├── challenge2019 - │ │ │ ├── challenge-2019-train-detection-bbox.txt - │ │ │ ├── challenge-2019-validation-detection-bbox.txt - │ │ │ ├── class_label_tree.np - │ │ │ ├── class_sample_train.pkl - │ │ │ ├── challenge-2019-validation-detection-human-imagelabels.csv # download from official website - │ │ │ ├── challenge-2019-validation-metas.pkl # get from script - │ │ ├── OpenImages - │ │ │ ├── train # training images - │ │ │ ├── test # testing images - │ │ │ ├── validation # validation images - ``` - -**Note**: - -1. The training and validation images of Open Images Challenge dataset are based on - Open Images v6, but the test images are different. -2. The Open Images Challenges annotations are obtained from [TSD](https://github.com/Sense-X/TSD). - You can also download the annotations from [official website](https://storage.googleapis.com/openimages/web/challenge2019_downloads.html), - and set data.train.type=OpenImagesDataset, data.val.type=OpenImagesDataset, and data.test.type=OpenImagesDataset in the config -3. If users do not want to use `validation-annotations-human-imagelabels-boxable.csv` and `challenge-2019-validation-detection-human-imagelabels.csv` - users can set `data.val.load_image_level_labels=False` and `data.test.load_image_level_labels=False` in the config. - Please note that loading image-levels label is the default of Open Images evaluation metric. - More details please refer to the [official website](https://storage.googleapis.com/openimages/web/evaluation.html) - -## Results and Models - -| Architecture | Backbone | Style | Lr schd | Sampler | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :---------------------------: | :------: | :-----: | :-----: | :-----------------: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Faster R-CNN | R-50 | pytorch | 1x | Group Sampler | 7.7 | - | 51.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159.log.json) | -| Faster R-CNN | R-50 | pytorch | 1x | Class Aware Sampler | 7.7 | - | 60.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424.log.json) | -| Faster R-CNN (Challenge 2019) | R-50 | pytorch | 1x | Group Sampler | 7.7 | - | 54.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100.log.json) | -| Faster R-CNN (Challenge 2019) | R-50 | pytorch | 1x | Class Aware Sampler | 7.1 | - | 65.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021.log.json) | -| Retinanet | R-50 | pytorch | 1x | Group Sampler | 6.6 | - | 61.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954.log.json) | -| SSD | VGG16 | pytorch | 36e | Group Sampler | 10.8 | - | 35.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/ssd300_32x8_36e_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232.log.json) | - -**Notes:** - -- 'cas' is short for 'Class Aware Sampler' - -### Results of consider image level labels - -| Architecture | Sampler | Consider Image Level Labels | box AP | -| :-------------------------------: | :-----------------: | :-------------------------: | :----: | -| Faster R-CNN r50 (Challenge 2019) | Group Sampler | w/o | 62.19 | -| Faster R-CNN r50 (Challenge 2019) | Group Sampler | w/ | 54.87 | -| Faster R-CNN r50 (Challenge 2019) | Class Aware Sampler | w/o | 71.77 | -| Faster R-CNN r50 (Challenge 2019) | Class Aware Sampler | w/ | 64.98 | diff --git a/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py b/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py deleted file mode 100644 index 3dfc341bb5828e4373eccc456b61e3a10569b304..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/openimages_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict(roi_head=dict(bbox_head=dict(num_classes=601))) - -# Using 32 GPUS while training -optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=26000, - warmup_ratio=1.0 / 64, - step=[8, 11]) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (32 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py b/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py deleted file mode 100644 index c8900adc3463a5dc087c53fe92556429f28898c1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py +++ /dev/null @@ -1,47 +0,0 @@ -_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] - -model = dict( - roi_head=dict(bbox_head=dict(num_classes=500)), - test_cfg=dict(rcnn=dict(score_thr=0.01))) - -# dataset settings -dataset_type = 'OpenImagesChallengeDataset' -data_root = 'data/OpenImages/' -data = dict( - train=dict( - type=dataset_type, - ann_file=data_root + - 'challenge2019/challenge-2019-train-detection-bbox.txt', - img_prefix=data_root + 'OpenImages/', - label_file=data_root + 'challenge2019/cls-label-description.csv', - hierarchy_file=data_root + 'challenge2019/class_label_tree.np'), - val=dict( - type=dataset_type, - ann_file=data_root + - 'challenge2019/challenge-2019-validation-detection-bbox.txt', - img_prefix=data_root + 'OpenImages/', - label_file=data_root + 'challenge2019/cls-label-description.csv', - hierarchy_file=data_root + 'challenge2019/class_label_tree.np', - meta_file=data_root + - 'challenge2019/challenge-2019-validation-metas.pkl', - image_level_ann_file=data_root + - 'challenge2019/challenge-2019-validation-detection-' - 'human-imagelabels.csv'), - test=dict( - type=dataset_type, - ann_file=data_root + - 'challenge2019/challenge-2019-validation-detection-bbox.txt', - img_prefix=data_root + 'OpenImages/', - label_file=data_root + 'challenge2019/cls-label-description.csv', - hierarchy_file=data_root + 'challenge2019/class_label_tree.np', - meta_file=data_root + - 'challenge2019/challenge-2019-validation-metas.pkl', - image_level_ann_file=data_root + - 'challenge2019/challenge-2019-validation-detection-' - 'human-imagelabels.csv')) -evaluation = dict(interval=1, metric='mAP') - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (32 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py b/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py deleted file mode 100644 index 88d029d699db844f6a5fe0e08008314e5990e671..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] - -# Use ClassAwareSampler -data = dict( - train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1))) diff --git a/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py b/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py deleted file mode 100644 index 26bd64e6126d28f536200ca2b57fa5c6f87bb1e3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py'] - -# Use ClassAwareSampler -data = dict( - train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1))) diff --git a/cv/detection/co-detr/pytorch/configs/openimages/metafile.yml b/cv/detection/co-detr/pytorch/configs/openimages/metafile.yml deleted file mode 100644 index d9f924ed1e514e695fb0c73242c9d6c672068c9e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/openimages/metafile.yml +++ /dev/null @@ -1,102 +0,0 @@ -Models: - - Name: faster_rcnn_r50_fpn_32x2_1x_openimages - In Collection: Faster R-CNN - Config: configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py - Metadata: - Training Memory (GB): 7.7 - Epochs: 12 - Training Data: Open Images v6 - Training Techniques: - - SGD with Momentum - - Weight Decay - Results: - - Task: Object Detection - Dataset: Open Images v6 - Metrics: - box AP: 51.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth - - - Name: retinanet_r50_fpn_32x2_1x_openimages - In Collection: RetinaNet - Config: configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py - Metadata: - Training Memory (GB): 6.6 - Epochs: 12 - Training Data: Open Images v6 - Training Techniques: - - SGD with Momentum - - Weight Decay - Results: - - Task: Object Detection - Dataset: Open Images v6 - Metrics: - box AP: 61.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth - - - Name: ssd300_32x8_36e_openimages - In Collection: SSD - Config: configs/openimages/ssd300_32x8_36e_openimages.py - Metadata: - Training Memory (GB): 10.8 - Epochs: 36 - Training Data: Open Images v6 - Training Techniques: - - SGD with Momentum - - Weight Decay - Results: - - Task: Object Detection - Dataset: Open Images v6 - Metrics: - box AP: 35.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth - - - Name: faster_rcnn_r50_fpn_32x2_1x_openimages_challenge - In Collection: Faster R-CNN - Config: configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py - Metadata: - Training Memory (GB): 7.7 - Epochs: 12 - Training Data: Open Images Challenge 2019 - Training Techniques: - - SGD with Momentum - - Weight Decay - Results: - - Task: Object Detection - Dataset: Open Images Challenge 2019 - Metrics: - box AP: 54.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth - - - Name: faster_rcnn_r50_fpn_32x2_cas_1x_openimages - In Collection: Faster R-CNN - Config: configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py - Metadata: - Training Memory (GB): 7.7 - Epochs: 12 - Training Data: Open Images Challenge 2019 - Training Techniques: - - SGD with Momentum - - Weight Decay - Results: - - Task: Object Detection - Dataset: Open Images Challenge 2019 - Metrics: - box AP: 60.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth - - - Name: faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge - In Collection: Faster R-CNN - Config: configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py - Metadata: - Training Memory (GB): 7.1 - Epochs: 12 - Training Data: Open Images Challenge 2019 - Training Techniques: - - SGD with Momentum - - Weight Decay - Results: - - Task: Object Detection - Dataset: Open Images Challenge 2019 - Metrics: - box AP: 65.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth diff --git a/cv/detection/co-detr/pytorch/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py b/cv/detection/co-detr/pytorch/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py deleted file mode 100644 index 0191aa16358fa65920f7a515d29320df15e3374e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py +++ /dev/null @@ -1,22 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/openimages_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict(bbox_head=dict(num_classes=601)) - -optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=26000, - warmup_ratio=1.0 / 64, - step=[8, 11]) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (32 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/openimages/ssd300_32x8_36e_openimages.py b/cv/detection/co-detr/pytorch/configs/openimages/ssd300_32x8_36e_openimages.py deleted file mode 100644 index e2565b98e212d71d56c3fcb2b6e50a7809854e78..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/openimages/ssd300_32x8_36e_openimages.py +++ /dev/null @@ -1,83 +0,0 @@ -_base_ = [ - '../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' -] -model = dict( - bbox_head=dict( - num_classes=601, - anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) -# dataset settings -dataset_type = 'OpenImagesDataset' -data_root = 'data/OpenImages/' -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True, normed_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(300, 300), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(300, 300), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, # using 32 GPUS while training. - workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory - train=dict( - _delete_=True, - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/oidv6-train-annotations-bbox.csv', - img_prefix=data_root + 'OpenImages/train/', - label_file=data_root + - 'annotations/class-descriptions-boxable.csv', - hierarchy_file=data_root + - 'annotations/bbox_labels_600_hierarchy.json', - pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=20000, - warmup_ratio=0.001, - step=[8, 11]) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (32 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=256) diff --git a/cv/detection/co-detr/pytorch/configs/paa/README.md b/cv/detection/co-detr/pytorch/configs/paa/README.md deleted file mode 100644 index c8861ece1d9616fa0230b7e2a2a2481ebe8c69db..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# PAA - -> [Probabilistic Anchor Assignment with IoU Prediction for Object Detection](https://arxiv.org/abs/2007.08103) - - - -## Abstract - -In object detection, determining which anchors to assign as positive or negative samples, known as anchor assignment, has been revealed as a core procedure that can significantly affect a model's performance. In this paper we propose a novel anchor assignment strategy that adaptively separates anchors into positive and negative samples for a ground truth bounding box according to the model's learning status such that it is able to reason about the separation in a probabilistic manner. To do so we first calculate the scores of anchors conditioned on the model and fit a probability distribution to these scores. The model is then trained with anchors separated into positive and negative samples according to their probabilities. Moreover, we investigate the gap between the training and testing objectives and propose to predict the Intersection-over-Unions of detected boxes as a measure of localization quality to reduce the discrepancy. The combined score of classification and localization qualities serving as a box selection metric in non-maximum suppression well aligns with the proposed anchor assignment strategy and leads significant performance improvements. The proposed methods only add a single convolutional layer to RetinaNet baseline and does not require multiple anchors per location, so are efficient. Experimental results verify the effectiveness of the proposed methods. Especially, our models set new records for single-stage detectors on MS COCO test-dev dataset with various backbones. - -
- -
- -## Results and Models - -We provide config files to reproduce the object detection results in the -ECCV 2020 paper for Probabilistic Anchor Assignment with IoU -Prediction for Object Detection. - -| Backbone | Lr schd | Mem (GB) | Score voting | box AP | Config | Download | -| :-------: | :-----: | :------: | :----------: | :----: | :---------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | 12e | 3.7 | True | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.log.json) | -| R-50-FPN | 12e | 3.7 | False | 40.2 | - | | -| R-50-FPN | 18e | 3.7 | True | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1.5x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.log.json) | -| R-50-FPN | 18e | 3.7 | False | 41.2 | - | | -| R-50-FPN | 24e | 3.7 | True | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.log.json) | -| R-50-FPN | 36e | 3.7 | True | 43.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722.log.json) | -| R-101-FPN | 12e | 6.2 | True | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.log.json) | -| R-101-FPN | 12e | 6.2 | False | 42.4 | - | | -| R-101-FPN | 24e | 6.2 | True | 43.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.log.json) | -| R-101-FPN | 36e | 6.2 | True | 45.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202.log.json) | - -**Note**: - -1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.2 mAP. We report the best results. - -## Citation - -```latex -@inproceedings{paa-eccv2020, - title={Probabilistic Anchor Assignment with IoU Prediction for Object Detection}, - author={Kim, Kang and Lee, Hee Seok}, - booktitle = {ECCV}, - year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/paa/metafile.yml b/cv/detection/co-detr/pytorch/configs/paa/metafile.yml deleted file mode 100644 index e08b663a7c45ff70e51b8da46fba27cfcc8aca88..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/metafile.yml +++ /dev/null @@ -1,104 +0,0 @@ -Collections: - - Name: PAA - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - Probabilistic Anchor Assignment - - ResNet - Paper: - URL: https://arxiv.org/abs/2007.08103 - Title: 'Probabilistic Anchor Assignment with IoU Prediction for Object Detection' - README: configs/paa/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/paa.py#L6 - Version: v2.4.0 - -Models: - - Name: paa_r50_fpn_1x_coco - In Collection: PAA - Config: configs/paa/paa_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.7 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth - - - Name: paa_r50_fpn_1.5x_coco - In Collection: PAA - Config: configs/paa/paa_r50_fpn_1.5x_coco.py - Metadata: - Training Memory (GB): 3.7 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth - - - Name: paa_r50_fpn_2x_coco - In Collection: PAA - Config: configs/paa/paa_r50_fpn_2x_coco.py - Metadata: - Training Memory (GB): 3.7 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth - - - Name: paa_r50_fpn_mstrain_3x_coco - In Collection: PAA - Config: configs/paa/paa_r50_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 3.7 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth - - - Name: paa_r101_fpn_1x_coco - In Collection: PAA - Config: configs/paa/paa_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth - - - Name: paa_r101_fpn_2x_coco - In Collection: PAA - Config: configs/paa/paa_r101_fpn_2x_coco.py - Metadata: - Training Memory (GB): 6.2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth - - - Name: paa_r101_fpn_mstrain_3x_coco - In Collection: PAA - Config: configs/paa/paa_r101_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 6.2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth diff --git a/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_1x_coco.py deleted file mode 100644 index 94f1c278dc16c1befbca510ca0ac5ba407969f6d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './paa_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_2x_coco.py deleted file mode 100644 index 641ef764d2713184845b624b20db1771cfcd6739..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_2x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './paa_r101_fpn_1x_coco.py' -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_mstrain_3x_coco.py deleted file mode 100644 index 71858ed65c7fa998fdc960161689be083bdb4e62..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/paa_r101_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './paa_r50_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_1.5x_coco.py b/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_1.5x_coco.py deleted file mode 100644 index aabce4af987aa5504e1748e10b9955f760a013e1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_1.5x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './paa_r50_fpn_1x_coco.py' -lr_config = dict(step=[12, 16]) -runner = dict(type='EpochBasedRunner', max_epochs=18) diff --git a/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_1x_coco.py deleted file mode 100644 index 4c9c4aa73e1190da0edf1f20ffc3e60654cf87b1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_1x_coco.py +++ /dev/null @@ -1,70 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='PAA', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='PAAHead', - reg_decoded_bbox=True, - score_voting=True, - topk=9, - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.3), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.1, - neg_iou_thr=0.1, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_2x_coco.py deleted file mode 100644 index 663d2c0ded52086663360a8a3dce89702584fc1f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_2x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './paa_r50_fpn_1x_coco.py' -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_mstrain_3x_coco.py deleted file mode 100644 index 91fa28cde470cb323f90f89a56d8acb6f9f0a22e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/paa/paa_r50_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,20 +0,0 @@ -_base_ = './paa_r50_fpn_1x_coco.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/pafpn/README.md b/cv/detection/co-detr/pytorch/configs/pafpn/README.md deleted file mode 100644 index ae1e3a38bad664d1b5a0142d8b2034cbac29f06f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pafpn/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# PAFPN - -> [Path Aggregation Network for Instance Segmentation](https://arxiv.org/abs/1803.01534) - - - -## Abstract - -The way that information propagates in neural networks is of great importance. In this paper, we propose Path Aggregation Network (PANet) aiming at boosting information flow in proposal-based instance segmentation framework. Specifically, we enhance the entire feature hierarchy with accurate localization signals in lower layers by bottom-up path augmentation, which shortens the information path between lower layers and topmost feature. We present adaptive feature pooling, which links feature grid and all feature levels to make useful information in each feature level propagate directly to following proposal subnetworks. A complementary branch capturing different views for each proposal is created to further improve mask prediction. These improvements are simple to implement, with subtle extra computational overhead. Our PANet reaches the 1st place in the COCO 2017 Challenge Instance Segmentation task and the 2nd place in Object Detection task without large-batch training. It is also state-of-the-art on MVD and Cityscapes. - -
- -
- -## Results and Models - -| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 4.0 | 17.2 | 37.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_20200503_105836.log.json) | - -## Citation - -```latex -@inproceedings{liu2018path, - author = {Shu Liu and - Lu Qi and - Haifang Qin and - Jianping Shi and - Jiaya Jia}, - title = {Path Aggregation Network for Instance Segmentation}, - booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2018} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py deleted file mode 100644 index b2fdef91c5cc8396baee9c2d8a09556162443078..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' - -model = dict( - neck=dict( - type='PAFPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/pafpn/metafile.yml b/cv/detection/co-detr/pytorch/configs/pafpn/metafile.yml deleted file mode 100644 index f9cf97c8c41378c9f1eb3d16b62c4ac1a23dbf89..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pafpn/metafile.yml +++ /dev/null @@ -1,38 +0,0 @@ -Collections: - - Name: PAFPN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - PAFPN - Paper: - URL: https://arxiv.org/abs/1803.01534 - Title: 'Path Aggregation Network for Instance Segmentation' - README: configs/pafpn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/pafpn.py#L11 - Version: v2.0.0 - -Models: - - Name: faster_rcnn_r50_pafpn_1x_coco - In Collection: PAFPN - Config: configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py - Metadata: - Training Memory (GB): 4.0 - inference time (ms/im): - - value: 58.14 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth diff --git a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/README.md b/cv/detection/co-detr/pytorch/configs/panoptic_fpn/README.md deleted file mode 100644 index 12980ce56fca00ecfd35f3998c50c5c4fa6f7fb2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# Panoptic FPN - -> [Panoptic feature pyramid networks](https://arxiv.org/abs/1901.02446) - - - -## Abstract - -The recently introduced panoptic segmentation task has renewed our community's interest in unifying the tasks of instance segmentation (for thing classes) and semantic segmentation (for stuff classes). However, current state-of-the-art methods for this joint task use separate and dissimilar networks for instance and semantic segmentation, without performing any shared computation. In this work, we aim to unify these methods at the architectural level, designing a single network for both tasks. Our approach is to endow Mask R-CNN, a popular instance segmentation method, with a semantic segmentation branch using a shared Feature Pyramid Network (FPN) backbone. Surprisingly, this simple baseline not only remains effective for instance segmentation, but also yields a lightweight, top-performing method for semantic segmentation. In this work, we perform a detailed study of this minimally extended version of Mask R-CNN with FPN, which we refer to as Panoptic FPN, and show it is a robust and accurate baseline for both tasks. Given its effectiveness and conceptual simplicity, we hope our method can serve as a strong baseline and aid future research in panoptic segmentation. - -
- -
- -## Dataset - -PanopticFPN requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. -The directory should be like this. - -```none -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── coco -│ │ ├── annotations -│ │ │ ├── panoptic_train2017.json -│ │ │ ├── panoptic_train2017 -│ │ │ ├── panoptic_val2017.json -│ │ │ ├── panoptic_val2017 -│ │ ├── train2017 -│ │ ├── val2017 -│ │ ├── test2017 -``` - -## Results and Models - -| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | PQ | SQ | RQ | PQ_th | SQ_th | RQ_th | PQ_st | SQ_st | RQ_st | Config | Download | -| :-------: | :-----: | :-----: | :------: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :---: | :---: | :---: | :---------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 4.7 | | 40.2 | 77.8 | 49.3 | 47.8 | 80.9 | 57.5 | 28.9 | 73.1 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153.log.json) | -| R-50-FPN | pytorch | 3x | - | - | 42.5 | 78.1 | 51.7 | 50.3 | 81.5 | 60.3 | 30.7 | 73.0 | 38.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155.log.json) | -| R-101-FPN | pytorch | 1x | 6.7 | | 42.2 | 78.3 | 51.4 | 50.1 | 81.4 | 59.9 | 30.3 | 73.6 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950.log.json) | -| R-101-FPN | pytorch | 3x | - | - | 44.1 | 78.9 | 53.6 | 52.1 | 81.7 | 62.3 | 32.0 | 74.6 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712.log.json) | - -## Citation - -The base method for panoptic segmentation task. - -```latex -@inproceedings{kirillov2018panopticfpn, - author = { - Alexander Kirillov, - Ross Girshick, - Kaiming He, - Piotr Dollar, - }, - title = {Panoptic Feature Pyramid Networks}, - booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/metafile.yml b/cv/detection/co-detr/pytorch/configs/panoptic_fpn/metafile.yml deleted file mode 100644 index 8c9d39dcee8c35c483f002c7ebeca73288370d63..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/metafile.yml +++ /dev/null @@ -1,70 +0,0 @@ -Collections: - - Name: PanopticFPN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - PanopticFPN - Paper: - URL: https://arxiv.org/pdf/1901.02446 - Title: 'Panoptic feature pyramid networks' - README: configs/panoptic_fpn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/detectors/panoptic_fpn.py#L7 - Version: v2.16.0 - -Models: - - Name: panoptic_fpn_r50_fpn_1x_coco - In Collection: PanopticFPN - Config: configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.6 - Epochs: 12 - Results: - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 40.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth - - - Name: panoptic_fpn_r50_fpn_mstrain_3x_coco - In Collection: PanopticFPN - Config: configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 4.6 - Epochs: 36 - Results: - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 42.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth - - - Name: panoptic_fpn_r101_fpn_1x_coco - In Collection: PanopticFPN - Config: configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.5 - Epochs: 12 - Results: - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 42.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth - - - Name: panoptic_fpn_r101_fpn_mstrain_3x_coco - In Collection: PanopticFPN - Config: configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 6.5 - Epochs: 36 - Results: - - Task: Panoptic Segmentation - Dataset: COCO - Metrics: - PQ: 44.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth diff --git a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py deleted file mode 100644 index 78b80798d3ab678b903775e3a4594d5c9dd92b92..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py deleted file mode 100644 index 057e4811ebfca7cc1aea6ef2a6d10d2d2c34a1a7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './panoptic_fpn_r50_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py deleted file mode 100644 index 29955246032fd8e5ce624ceea586945fa3c91cce..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,33 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_panoptic.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='PanopticFPN', - semantic_head=dict( - type='PanopticFPNHead', - num_things_classes=80, - num_stuff_classes=53, - in_channels=256, - inner_channels=128, - start_level=0, - end_level=4, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - conv_cfg=None, - loss_seg=dict( - type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)), - panoptic_fusion_head=dict( - type='HeuristicFusionHead', - num_things_classes=80, - num_stuff_classes=53), - test_cfg=dict( - panoptic=dict( - score_thr=0.6, - max_per_img=100, - mask_thr_binary=0.5, - mask_overlap=0.5, - nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True), - stuff_area_limit=4096))) - -custom_hooks = [] diff --git a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py deleted file mode 100644 index b510935358f55275434d5bcfe565545f861fbec9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,61 +0,0 @@ -_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' - -# dataset settings -dataset_type = 'CocoPanopticDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], -# multiscale_mode='range' -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadPanopticAnnotations', - with_bbox=True, - with_mask=True, - with_seg=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 4), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - train=dict( - _delete_=True, - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/panoptic_train2017.json', - img_prefix=data_root + 'train2017/', - seg_prefix=data_root + 'annotations/panoptic_train2017/', - pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/pascal_voc/README.md b/cv/detection/co-detr/pytorch/configs/pascal_voc/README.md deleted file mode 100644 index 3c098135e3a6bf39c0e22b34479478a748a49dd6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pascal_voc/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# Pascal VOC - -> [The Pascal Visual Object Classes (VOC) Challenge](https://link.springer.com/article/10.1007/s11263-009-0275-4) - - - -## Abstract - -The Pascal Visual Object Classes (VOC) challenge is a benchmark in visual object category recognition and detection, providing the vision and machine learning communities with a standard dataset of images and annotation, and standard evaluation procedures. Organised annually from 2005 to present, the challenge and its associated dataset has become accepted as the benchmark for object detection. - -This paper describes the dataset and evaluation procedure. We review the state-of-the-art in evaluated methods for both classification and detection, analyse whether the methods are statistically different, what they are learning from the images (e.g. the object or its context), and what the methods find easy or confuse. The paper concludes with lessons learnt in the three year history of the challenge, and proposes directions for future improvement and extension. - -
- -
- -## Results and Models - -| Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Faster R-CNN C4 | R-50 | caffe | 18k | | - | 80.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712//home/dong/code_sensetime/2022Q1/mmdetection/work_dirs/prepare_voc/gather/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712_20220314_234327-847a14d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712_20220314_234327.log.json) | -| Faster R-CNN | R-50 | pytorch | 1x | 2.6 | - | 80.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712-54bef0f3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712.log.json) | -| Retinanet | R-50 | pytorch | 1x | 2.1 | - | 77.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200617-47cbdd0e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200616_014642.log.json) | -| SSD300 | VGG16 | - | 120e | - | - | 76.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/ssd300_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd300_voc0712/ssd300_voc0712_20220320_194658-17edda1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd300_voc0712/ssd300_voc0712_20220320_194658.log.json) | -| SSD512 | VGG16 | - | 120e | - | - | 79.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/ssd512_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd512_voc0712/ssd512_voc0712_20220320_194717-03cefefe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd512_voc0712/ssd512_voc0712_20220320_194717.log.json) | - -## Citation - -```latex -@Article{Everingham10, - author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.", - title = "The Pascal Visual Object Classes (VOC) Challenge", - journal = "International Journal of Computer Vision", - volume = "88", - year = "2010", - number = "2", - month = jun, - pages = "303--338", -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py b/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py deleted file mode 100644 index 7bb1d736bfb17dfeba395c9d506bb78cb118ff6d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py +++ /dev/null @@ -1,81 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_caffe_c4.py', - '../_base_/default_runtime.py' -] -model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) - -# dataset settings -dataset_type = 'VOCDataset' -data_root = 'data/VOCdevkit/' -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576), - (1333, 608), (1333, 640), (1333, 672), (1333, 704), - (1333, 736), (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=[ - data_root + 'VOC2007/ImageSets/Main/trainval.txt', - data_root + 'VOC2012/ImageSets/Main/trainval.txt' - ], - img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', - img_prefix=data_root + 'VOC2007/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', - img_prefix=data_root + 'VOC2007/', - pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) - -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=100, - warmup_ratio=0.001, - step=[12000, 16000]) - -# Runner type -runner = dict(type='IterBasedRunner', max_iters=18000) - -checkpoint_config = dict(interval=3000) -evaluation = dict(interval=3000, metric='mAP') diff --git a/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py b/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py deleted file mode 100644 index 7866acebea689e7a863a836c326b1407de733fe8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', - '../_base_/default_runtime.py' -] -model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -# actual epoch = 3 * 3 = 9 -lr_config = dict(policy='step', step=[3]) -# runtime settings -runner = dict( - type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py b/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py deleted file mode 100644 index 12eee2c1ecdaa5f9e84a3bd2084b00493f2f76c0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py +++ /dev/null @@ -1,75 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', - '../_base_/default_runtime.py' -] -model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) - -CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', - 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', - 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') - -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/VOCdevkit/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1000, 600), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file='data/voc0712_trainval.json', - img_prefix='data/VOCdevkit', - pipeline=train_pipeline, - classes=CLASSES)), - val=dict( - type=dataset_type, - ann_file='data/voc07_test.json', - img_prefix='data/VOCdevkit', - pipeline=test_pipeline, - classes=CLASSES), - test=dict( - type=dataset_type, - ann_file='data/voc07_test.json', - img_prefix='data/VOCdevkit', - pipeline=test_pipeline, - classes=CLASSES)) -evaluation = dict(interval=1, metric='bbox') - -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -# actual epoch = 3 * 3 = 9 -lr_config = dict(policy='step', step=[3]) -# runtime settings -runner = dict( - type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/cv/detection/co-detr/pytorch/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py b/cv/detection/co-detr/pytorch/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py deleted file mode 100644 index b4b050dda5d2d752c0db3c83c434879c8765a272..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', - '../_base_/default_runtime.py' -] -model = dict(bbox_head=dict(num_classes=20)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -# actual epoch = 3 * 3 = 9 -lr_config = dict(policy='step', step=[3]) -# runtime settings -runner = dict( - type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12 diff --git a/cv/detection/co-detr/pytorch/configs/pascal_voc/ssd300_voc0712.py b/cv/detection/co-detr/pytorch/configs/pascal_voc/ssd300_voc0712.py deleted file mode 100644 index e7008aef3a94fe9470539ffe620017d008ea8784..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pascal_voc/ssd300_voc0712.py +++ /dev/null @@ -1,74 +0,0 @@ -_base_ = [ - '../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py', - '../_base_/default_runtime.py' -] -model = dict( - bbox_head=dict( - num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2, - 0.9)))) -# dataset settings -dataset_type = 'VOCDataset' -data_root = 'data/VOCdevkit/' -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(300, 300), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(300, 300), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=3, - train=dict( - type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[16, 20]) -checkpoint_config = dict(interval=1) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=24) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/pascal_voc/ssd512_voc0712.py b/cv/detection/co-detr/pytorch/configs/pascal_voc/ssd512_voc0712.py deleted file mode 100644 index f4627c2dc236ede39e258785ee5d153d23f40cf0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pascal_voc/ssd512_voc0712.py +++ /dev/null @@ -1,57 +0,0 @@ -_base_ = 'ssd300_voc0712.py' -input_size = 512 -model = dict( - neck=dict( - out_channels=(512, 1024, 512, 256, 256, 256, 256), - level_strides=(2, 2, 2, 2, 1), - level_paddings=(1, 1, 1, 1, 1), - last_kernel_size=4), - bbox_head=dict( - in_channels=(512, 1024, 512, 256, 256, 256, 256), - anchor_generator=dict( - input_size=input_size, - strides=[8, 16, 32, 64, 128, 256, 512], - basesize_ratio_range=(0.15, 0.9), - ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2])))) -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(512, 512), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(512, 512), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/pisa/README.md b/cv/detection/co-detr/pytorch/configs/pisa/README.md deleted file mode 100644 index c847c85c0d13dea0a0af6ff57372c096176de314..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# PISA - -> [Prime Sample Attention in Object Detection](https://arxiv.org/abs/1904.04821) - - - -## Abstract - -It is a common paradigm in object detection frameworks to treat all samples equally and target at maximizing the performance on average. In this work, we revisit this paradigm through a careful study on how different samples contribute to the overall performance measured in terms of mAP. Our study suggests that the samples in each mini-batch are neither independent nor equally important, and therefore a better classifier on average does not necessarily mean higher mAP. Motivated by this study, we propose the notion of Prime Samples, those that play a key role in driving the detection performance. We further develop a simple yet effective sampling and learning strategy called PrIme Sample Attention (PISA) that directs the focus of the training process towards such samples. Our experiments demonstrate that it is often more effective to focus on prime samples than hard samples when training a detector. Particularly, On the MSCOCO dataset, PISA outperforms the random sampling baseline and hard mining schemes, e.g., OHEM and Focal Loss, consistently by around 2% on both single-stage and two-stage detectors, even with a strong backbone ResNeXt-101. - -
- -
- -## Results and Models - -| PISA | Network | Backbone | Lr schd | box AP | mask AP | Config | Download | -| :--: | :----------: | :------------: | :-----: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| × | Faster R-CNN | R-50-FPN | 1x | 36.4 | | - | | -| √ | Faster R-CNN | R-50-FPN | 1x | 38.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco_20200506_185619.log.json) | -| × | Faster R-CNN | X101-32x4d-FPN | 1x | 40.1 | | - | | -| √ | Faster R-CNN | X101-32x4d-FPN | 1x | 41.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco_20200505_181503.log.json) | -| × | Mask R-CNN | R-50-FPN | 1x | 37.3 | 34.2 | - | | -| √ | Mask R-CNN | R-50-FPN | 1x | 39.1 | 35.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco_20200508_150500.log.json) | -| × | Mask R-CNN | X101-32x4d-FPN | 1x | 41.1 | 37.1 | - | | -| √ | Mask R-CNN | X101-32x4d-FPN | 1x | | | | | -| × | RetinaNet | R-50-FPN | 1x | 35.6 | | - | | -| √ | RetinaNet | R-50-FPN | 1x | 36.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco_20200504_014311.log.json) | -| × | RetinaNet | X101-32x4d-FPN | 1x | 39.0 | | - | | -| √ | RetinaNet | X101-32x4d-FPN | 1x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco_20200505_001404.log.json) | -| × | SSD300 | VGG16 | 1x | 25.6 | | - | | -| √ | SSD300 | VGG16 | 1x | 27.6 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd300_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco_20200504_144325.log.json) | -| × | SSD512 | VGG16 | 1x | 29.3 | | - | | -| √ | SSD512 | VGG16 | 1x | 31.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd512_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco_20200508_131030.log.json) | - -**Notes:** - -- In the original paper, all models are trained and tested on mmdet v1.x, thus results may not be exactly the same with this release on v2.0. -- It is noted PISA only modifies the training pipeline so the inference time remains the same with the baseline. - -## Citation - -```latex -@inproceedings{cao2019prime, - title={Prime sample attention in object detection}, - author={Cao, Yuhang and Chen, Kai and Loy, Chen Change and Lin, Dahua}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/pisa/metafile.yml b/cv/detection/co-detr/pytorch/configs/pisa/metafile.yml deleted file mode 100644 index cd43afb00b029e39036dd8a1e70bbc96548f2584..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/metafile.yml +++ /dev/null @@ -1,110 +0,0 @@ -Collections: - - Name: PISA - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - PISA - - RPN - - ResNet - - RoIPool - Paper: - URL: https://arxiv.org/abs/1904.04821 - Title: 'Prime Sample Attention in Object Detection' - README: configs/pisa/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/pisa_roi_head.py#L8 - Version: v2.1.0 - -Models: - - Name: pisa_faster_rcnn_r50_fpn_1x_coco - In Collection: PISA - Config: configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth - - - Name: pisa_faster_rcnn_x101_32x4d_fpn_1x_coco - In Collection: PISA - Config: configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth - - - Name: pisa_mask_rcnn_r50_fpn_1x_coco - In Collection: PISA - Config: configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 35.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth - - - Name: pisa_retinanet_r50_fpn_1x_coco - In Collection: PISA - Config: configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth - - - Name: pisa_retinanet_x101_32x4d_fpn_1x_coco - In Collection: PISA - Config: configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth - - - Name: pisa_ssd300_coco - In Collection: PISA - Config: configs/pisa/pisa_ssd300_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 27.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth - - - Name: pisa_ssd512_coco - In Collection: PISA - Config: configs/pisa/pisa_ssd512_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 31.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth diff --git a/cv/detection/co-detr/pytorch/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 71e65b0b2bc72379f4db73e491f76fc767cb786b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,30 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' - -model = dict( - roi_head=dict( - type='PISARoIHead', - bbox_head=dict( - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - train_cfg=dict( - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - sampler=dict( - type='ScoreHLRSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - k=0.5, - bias=0.), - isr=dict(k=2, bias=0), - carl=dict(k=1, bias=0.2))), - test_cfg=dict( - rpn=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/cv/detection/co-detr/pytorch/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 16edd99de295161a3c246243e8c482ede4e5bdae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,30 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py' - -model = dict( - roi_head=dict( - type='PISARoIHead', - bbox_head=dict( - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - train_cfg=dict( - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - sampler=dict( - type='ScoreHLRSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - k=0.5, - bias=0.), - isr=dict(k=2, bias=0), - carl=dict(k=1, bias=0.2))), - test_cfg=dict( - rpn=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/cv/detection/co-detr/pytorch/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 047a293466a20ea90501e3054d7fcfe23fcdcb39..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,30 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' - -model = dict( - roi_head=dict( - type='PISARoIHead', - bbox_head=dict( - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - train_cfg=dict( - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - sampler=dict( - type='ScoreHLRSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - k=0.5, - bias=0.), - isr=dict(k=2, bias=0), - carl=dict(k=1, bias=0.2))), - test_cfg=dict( - rpn=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/cv/detection/co-detr/pytorch/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 2186a8f695ae6de9f27f5e96e398766f7a0e74bd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,30 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' - -model = dict( - roi_head=dict( - type='PISARoIHead', - bbox_head=dict( - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - train_cfg=dict( - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - sampler=dict( - type='ScoreHLRSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - k=0.5, - bias=0.), - isr=dict(k=2, bias=0), - carl=dict(k=1, bias=0.2))), - test_cfg=dict( - rpn=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/cv/detection/co-detr/pytorch/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py deleted file mode 100644 index 70f89e227ec64b5c7224375aac0cf7ae3a10a29e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' - -model = dict( - bbox_head=dict( - type='PISARetinaHead', - loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), - train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) diff --git a/cv/detection/co-detr/pytorch/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index b97b6720f0522ee19e3f8353bf490b74a5835308..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py' - -model = dict( - bbox_head=dict( - type='PISARetinaHead', - loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), - train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) diff --git a/cv/detection/co-detr/pytorch/configs/pisa/pisa_ssd300_coco.py b/cv/detection/co-detr/pytorch/configs/pisa/pisa_ssd300_coco.py deleted file mode 100644 index b5cc006477eacaa9ab40d463312dc2156a59d634..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/pisa_ssd300_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../ssd/ssd300_coco.py' - -model = dict( - bbox_head=dict(type='PISASSDHead'), - train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) - -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/pisa/pisa_ssd512_coco.py b/cv/detection/co-detr/pytorch/configs/pisa/pisa_ssd512_coco.py deleted file mode 100644 index 3219d6d667cb185e6fa4f1954d632ccad9512a48..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pisa/pisa_ssd512_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../ssd/ssd512_coco.py' - -model = dict( - bbox_head=dict(type='PISASSDHead'), - train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) - -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/point_rend/README.md b/cv/detection/co-detr/pytorch/configs/point_rend/README.md deleted file mode 100644 index 183e83dd89a347b757926ecba6e2bf75e6d5426b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/point_rend/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# PointRend - -> [PointRend: Image Segmentation as Rendering](https://arxiv.org/abs/1912.08193) - - - -## Abstract - -We present a new method for efficient high-quality image segmentation of objects and scenes. By analogizing classical computer graphics methods for efficient rendering with over- and undersampling challenges faced in pixel labeling tasks, we develop a unique perspective of image segmentation as a rendering problem. From this vantage, we present the PointRend (Point-based Rendering) neural network module: a module that performs point-based segmentation predictions at adaptively selected locations based on an iterative subdivision algorithm. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. While many concrete implementations of the general idea are possible, we show that a simple design already achieves excellent results. Qualitatively, PointRend outputs crisp object boundaries in regions that are over-smoothed by previous methods. Quantitatively, PointRend yields significant gains on COCO and Cityscapes, for both instance and semantic segmentation. PointRend's efficiency enables output resolutions that are otherwise impractical in terms of memory or computation compared to existing approaches. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :------: | :---: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | caffe | 1x | 4.6 | | 38.4 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco_20200612_161407.log.json) | -| R-50-FPN | caffe | 3x | 4.6 | | 41.0 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco_20200614_002632.log.json) | - -Note: All models are trained with multi-scale, the input image shorter side is randomly scaled to one of (640, 672, 704, 736, 768, 800). - -## Citation - -```latex -@InProceedings{kirillov2019pointrend, - title={{PointRend}: Image Segmentation as Rendering}, - author={Alexander Kirillov and Yuxin Wu and Kaiming He and Ross Girshick}, - journal={ArXiv:1912.08193}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/point_rend/metafile.yml b/cv/detection/co-detr/pytorch/configs/point_rend/metafile.yml deleted file mode 100644 index 82aea05be69c17bb75592423d2883512691586fd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/point_rend/metafile.yml +++ /dev/null @@ -1,54 +0,0 @@ -Collections: - - Name: PointRend - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - PointRend - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1912.08193 - Title: 'PointRend: Image Segmentation as Rendering' - README: configs/point_rend/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/point_rend.py#L6 - Version: v2.2.0 - -Models: - - Name: point_rend_r50_caffe_fpn_mstrain_1x_coco - In Collection: PointRend - Config: configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py - Metadata: - Training Memory (GB): 4.6 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth - - - Name: point_rend_r50_caffe_fpn_mstrain_3x_coco - In Collection: PointRend - Config: configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 4.6 - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth diff --git a/cv/detection/co-detr/pytorch/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py deleted file mode 100644 index 0c0e563d6fe307d05fbd3862cd28b6dc2a3e52b2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py +++ /dev/null @@ -1,44 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' -# model settings -model = dict( - type='PointRend', - roi_head=dict( - type='PointRendRoIHead', - mask_roi_extractor=dict( - type='GenericRoIExtractor', - aggregation='concat', - roi_layer=dict( - _delete_=True, type='SimpleRoIAlign', output_size=14), - out_channels=256, - featmap_strides=[4]), - mask_head=dict( - _delete_=True, - type='CoarseMaskHead', - num_fcs=2, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), - point_head=dict( - type='MaskPointHead', - num_fcs=3, - in_channels=256, - fc_channels=256, - num_classes=80, - coarse_pred_each_layer=True, - loss_point=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rcnn=dict( - mask_size=7, - num_points=14 * 14, - oversample_ratio=3, - importance_sample_ratio=0.75)), - test_cfg=dict( - rcnn=dict( - subdivision_steps=5, - subdivision_num_points=28 * 28, - scale_factor=2))) diff --git a/cv/detection/co-detr/pytorch/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py deleted file mode 100644 index 169278e5738b0abd4ae5e99594e4adbaaefa2d96..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py' -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/README.md b/cv/detection/co-detr/pytorch/configs/pvt/README.md deleted file mode 100644 index 1fd090bd7b6a07574ee3058cb13ea3d3d967577b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# PVT - -> [Pyramid vision transformer: A versatile backbone for dense prediction without convolutions](https://arxiv.org/abs/2102.12122) - - - -## Abstract - -Although using convolutional neural networks (CNNs) as backbones achieves great successes in computer vision, this work investigates a simple backbone network useful for many dense prediction tasks without convolutions. Unlike the recently-proposed Transformer model (e.g., ViT) that is specially designed for image classification, we propose Pyramid Vision Transformer~(PVT), which overcomes the difficulties of porting Transformer to various dense prediction tasks. PVT has several merits compared to prior arts. (1) Different from ViT that typically has low-resolution outputs and high computational and memory cost, PVT can be not only trained on dense partitions of the image to achieve high output resolution, which is important for dense predictions but also using a progressive shrinking pyramid to reduce computations of large feature maps. (2) PVT inherits the advantages from both CNN and Transformer, making it a unified backbone in various vision tasks without convolutions by simply replacing CNN backbones. (3) We validate PVT by conducting extensive experiments, showing that it boosts the performance of many downstream tasks, e.g., object detection, semantic, and instance segmentation. For example, with a comparable number of parameters, RetinaNet+PVT achieves 40.4 AP on the COCO dataset, surpassing RetinNet+ResNet50 (36.3 AP) by 4.1 absolute AP. We hope PVT could serve as an alternative and useful backbone for pixel-level predictions and facilitate future researches. - -Transformer recently has shown encouraging progresses in computer vision. In this work, we present new baselines by improving the original Pyramid Vision Transformer (abbreviated as PVTv1) by adding three designs, including (1) overlapping patch embedding, (2) convolutional feed-forward networks, and (3) linear complexity attention layers. -With these modifications, our PVTv2 significantly improves PVTv1 on three tasks e.g., classification, detection, and segmentation. Moreover, PVTv2 achieves comparable or better performances than recent works such as Swin Transformer. We hope this work will facilitate state-of-the-art Transformer researches in computer vision. - -
- -
- -## Results and Models - -### RetinaNet (PVTv1) - -| Backbone | Lr schd | Mem (GB) | box AP | Config | Download | -| :--------: | :-----: | :------: | :----: | :--------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| PVT-Tiny | 12e | 8.5 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_t_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110.log.json) | -| PVT-Small | 12e | 14.5 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_s_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921.log.json) | -| PVT-Medium | 12e | 20.9 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_m_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243.log.json) | - -### RetinaNet (PVTv2) - -| Backbone | Lr schd | Mem (GB) | box AP | Config | Download | -| :------: | :-----: | :------: | :----: | :------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| PVTv2-B0 | 12e | 7.4 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b0_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157.log.json) | -| PVTv2-B1 | 12e | 9.5 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b1_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318.log.json) | -| PVTv2-B2 | 12e | 16.2 | 44.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b2_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843.log.json) | -| PVTv2-B3 | 12e | 23.0 | 46.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b3_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512.log.json) | -| PVTv2-B4 | 12e | 17.0 | 46.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b4_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151.log.json) | -| PVTv2-B5 | 12e | 18.7 | 46.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800.log.json) | - -## Citation - -```latex -@article{wang2021pyramid, - title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, - author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, - journal={arXiv preprint arXiv:2102.12122}, - year={2021} -} -``` - -```latex -@article{wang2021pvtv2, - title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, - author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, - journal={arXiv preprint arXiv:2106.13797}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/pvt/metafile.yml b/cv/detection/co-detr/pytorch/configs/pvt/metafile.yml deleted file mode 100644 index 58843784955f3f4be7aeebf7caa9b50b7891f4c5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/metafile.yml +++ /dev/null @@ -1,243 +0,0 @@ -Models: - - Name: retinanet_pvt-t_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvt-t_fpn_1x_coco.py - Metadata: - Training Memory (GB): 8.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformer - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth - Paper: - URL: https://arxiv.org/abs/2102.12122 - Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 - Version: 2.17.0 - - - Name: retinanet_pvt-s_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvt-s_fpn_1x_coco.py - Metadata: - Training Memory (GB): 14.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformer - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth - Paper: - URL: https://arxiv.org/abs/2102.12122 - Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 - Version: 2.17.0 - - - Name: retinanet_pvt-m_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvt-m_fpn_1x_coco.py - Metadata: - Training Memory (GB): 20.9 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformer - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth - Paper: - URL: https://arxiv.org/abs/2102.12122 - Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 - Version: 2.17.0 - - - Name: retinanet_pvtv2-b0_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.4 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformerV2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth - Paper: - URL: https://arxiv.org/abs/2106.13797 - Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 - Version: 2.17.0 - - - Name: retinanet_pvtv2-b1_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py - Metadata: - Training Memory (GB): 9.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformerV2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth - Paper: - URL: https://arxiv.org/abs/2106.13797 - Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 - Version: 2.17.0 - - - Name: retinanet_pvtv2-b2_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py - Metadata: - Training Memory (GB): 16.2 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformerV2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth - Paper: - URL: https://arxiv.org/abs/2106.13797 - Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 - Version: 2.17.0 - - - Name: retinanet_pvtv2-b3_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py - Metadata: - Training Memory (GB): 23.0 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformerV2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth - Paper: - URL: https://arxiv.org/abs/2106.13797 - Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 - Version: 2.17.0 - - - Name: retinanet_pvtv2-b4_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py - Metadata: - Training Memory (GB): 17.0 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformerV2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth - Paper: - URL: https://arxiv.org/abs/2106.13797 - Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 - Version: 2.17.0 - - - Name: retinanet_pvtv2-b5_fpn_1x_coco - In Collection: RetinaNet - Config: configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py - Metadata: - Training Memory (GB): 18.7 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x NVIDIA V100 GPUs - Architecture: - - PyramidVisionTransformerV2 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth - Paper: - URL: https://arxiv.org/abs/2106.13797 - Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" - README: configs/pvt/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 - Version: 2.17.0 diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py deleted file mode 100644 index e299f2a098e7cd2299e369cdf1aba9c56980cb0d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' -model = dict( - backbone=dict( - num_layers=[3, 8, 27, 3], - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_large.pth'))) -fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py deleted file mode 100644 index b888f788b6c7310491751774238451bb7107dccc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' -model = dict( - backbone=dict( - num_layers=[3, 4, 18, 3], - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_medium.pth'))) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py deleted file mode 100644 index 46603488bb3ceb4fc1052139da53340a3d595256..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' -model = dict( - backbone=dict( - num_layers=[3, 4, 6, 3], - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_small.pth'))) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py deleted file mode 100644 index a6cff7d033554d93b40e741278029cb914e36b68..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='RetinaNet', - backbone=dict( - _delete_=True, - type='PyramidVisionTransformer', - num_layers=[2, 2, 2, 2], - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_tiny.pth')), - neck=dict(in_channels=[64, 128, 320, 512])) -# optimizer -optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py deleted file mode 100644 index cbe2295d8f66192a442653882c1f2b4d54a05b53..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='RetinaNet', - backbone=dict( - _delete_=True, - type='PyramidVisionTransformerV2', - embed_dims=32, - num_layers=[2, 2, 2, 2], - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_v2_b0.pth')), - neck=dict(in_channels=[32, 64, 160, 256])) -# optimizer -optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py deleted file mode 100644 index 5374c50925f5c7ed8a761eda40dc4bf374df3aeb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' -model = dict( - backbone=dict( - embed_dims=64, - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_v2_b1.pth')), - neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py deleted file mode 100644 index cf9a18debbe5f8b9918e0d086ad6d54d203ef310..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' -model = dict( - backbone=dict( - embed_dims=64, - num_layers=[3, 4, 6, 3], - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_v2_b2.pth')), - neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py deleted file mode 100644 index 7a47f820324af7fecf773640d7d1829b0c115471..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' -model = dict( - backbone=dict( - embed_dims=64, - num_layers=[3, 4, 18, 3], - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_v2_b3.pth')), - neck=dict(in_channels=[64, 128, 320, 512])) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py deleted file mode 100644 index 9891d7bd76a484a74b5a1722599d0660aaeb775a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' -model = dict( - backbone=dict( - embed_dims=64, - num_layers=[3, 8, 27, 3], - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_v2_b4.pth')), - neck=dict(in_channels=[64, 128, 320, 512])) -# optimizer -optimizer = dict( - _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) -# dataset settings -data = dict(samples_per_gpu=1, workers_per_gpu=1) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (1 samples per GPU) -auto_scale_lr = dict(base_batch_size=8) diff --git a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py deleted file mode 100644 index a9fea2ebe4777f73dce32a19649b359ae6066036..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' -model = dict( - backbone=dict( - embed_dims=64, - num_layers=[3, 6, 40, 3], - mlp_ratios=(4, 4, 4, 4), - init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' - 'releases/download/v2/pvt_v2_b5.pth')), - neck=dict(in_channels=[64, 128, 320, 512])) -# optimizer -optimizer = dict( - _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) -# dataset settings -data = dict(samples_per_gpu=1, workers_per_gpu=1) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (1 samples per GPU) -auto_scale_lr = dict(base_batch_size=8) diff --git a/cv/detection/co-detr/pytorch/configs/queryinst/README.md b/cv/detection/co-detr/pytorch/configs/queryinst/README.md deleted file mode 100644 index ad6e0b317cbf2cce07baa24bf44eab5cccf68f25..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/queryinst/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# QueryInst - -> [Instances as Queries](https://openaccess.thecvf.com/content/ICCV2021/html/Fang_Instances_As_Queries_ICCV_2021_paper.html) - - - -## Abstract - -We present QueryInst, a new perspective for instance segmentation. QueryInst is a multi-stage end-to-end system that treats instances of interest as learnable queries, enabling query based object detectors, e.g., Sparse R-CNN, to have strong instance segmentation performance. The attributes of instances such as categories, bounding boxes, instance masks, and instance association embeddings are represented by queries in a unified manner. In QueryInst, a query is shared by both detection and segmentation via dynamic convolutions and driven by parallelly-supervised multi-stage learning. We conduct extensive experiments on three challenging benchmarks, i.e., COCO, CityScapes, and YouTube-VIS to evaluate the effectiveness of QueryInst in object detection, instance segmentation, and video instance segmentation tasks. For the first time, we demonstrate that a simple end-to-end query based framework can achieve the state-of-the-art performance in various instance-level recognition tasks. - -
- -
- -## Results and Models - -| Model | Backbone | Style | Lr schd | Number of Proposals | Multi-Scale | RandomCrop | box AP | mask AP | Config | Download | -| :-------: | :-------: | :-----: | :-----: | :-----------------: | :---------: | :--------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| QueryInst | R-50-FPN | pytorch | 1x | 100 | False | False | 42.0 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916.log.json) | -| QueryInst | R-50-FPN | pytorch | 3x | 100 | True | False | 44.8 | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643.log.json) | -| QueryInst | R-50-FPN | pytorch | 3x | 300 | True | True | 47.5 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802.log.json) | -| QueryInst | R-101-FPN | pytorch | 3x | 100 | True | False | 46.4 | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048.log.json) | -| QueryInst | R-101-FPN | pytorch | 3x | 300 | True | True | 49.0 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621.log.json) | - -## Citation - -```latex -@InProceedings{Fang_2021_ICCV, - author = {Fang, Yuxin and Yang, Shusheng and Wang, Xinggang and Li, Yu and Fang, Chen and Shan, Ying and Feng, Bin and Liu, Wenyu}, - title = {Instances As Queries}, - booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, - month = {October}, - year = {2021}, - pages = {6910-6919} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/queryinst/metafile.yml b/cv/detection/co-detr/pytorch/configs/queryinst/metafile.yml deleted file mode 100644 index da7f0a72c4ec891ff5eb224a041ff2ea6b809bf5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/queryinst/metafile.yml +++ /dev/null @@ -1,100 +0,0 @@ -Collections: - - Name: QueryInst - Metadata: - Training Data: COCO - Training Techniques: - - AdamW - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - ResNet - - QueryInst - Paper: - URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Fang_Instances_As_Queries_ICCV_2021_paper.pdf - Title: 'Instances as Queries' - README: configs/queryinst/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/queryinst.py - Version: v2.18.0 - -Models: - - Name: queryinst_r50_fpn_1x_coco - In Collection: QueryInst - Config: configs/queryinst/queryinst_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth - - - Name: queryinst_r50_fpn_mstrain_480-800_3x_coco - In Collection: QueryInst - Config: configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth - - - Name: queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco - In Collection: QueryInst - Config: configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth - - - Name: queryinst_r101_fpn_mstrain_480-800_3x_coco - In Collection: QueryInst - Config: configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth - - - Name: queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco - In Collection: QueryInst - Config: configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 49.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 42.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth diff --git a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py deleted file mode 100644 index fd138f5ac5928089352b616463ac7f6fe386ce99..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py deleted file mode 100644 index 07cae19cea544f0e0b201aaef80a6c8d7b492fb3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_1x_coco.py deleted file mode 100644 index 48f5773b054fe185e5cbfac2350b86536db0d1d3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_1x_coco.py +++ /dev/null @@ -1,138 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -num_stages = 6 -num_proposals = 100 -model = dict( - type='QueryInst', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=0, - add_extra_convs='on_input', - num_outs=4), - rpn_head=dict( - type='EmbeddingRPNHead', - num_proposals=num_proposals, - proposal_feature_channel=256), - roi_head=dict( - type='SparseRoIHead', - num_stages=num_stages, - stage_loss_weights=[1] * num_stages, - proposal_feature_channel=256, - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='DIIHead', - num_classes=80, - num_ffn_fcs=2, - num_heads=8, - num_cls_fcs=1, - num_reg_fcs=3, - feedforward_channels=2048, - in_channels=256, - dropout=0.0, - ffn_act_cfg=dict(type='ReLU', inplace=True), - dynamic_conv_cfg=dict( - type='DynamicConv', - in_channels=256, - feat_channels=64, - out_channels=256, - input_feat_shape=7, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN')), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - clip_border=False, - target_means=[0., 0., 0., 0.], - target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) - ], - mask_head=[ - dict( - type='DynamicMaskHead', - dynamic_conv_cfg=dict( - type='DynamicConv', - in_channels=256, - feat_channels=64, - out_channels=256, - input_feat_shape=14, - with_proj=False, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN')), - num_convs=4, - num_classes=80, - roi_feat_size=14, - in_channels=256, - conv_kernel_size=3, - conv_out_channels=256, - class_agnostic=False, - norm_cfg=dict(type='BN'), - upsample_cfg=dict(type='deconv', scale_factor=2), - loss_mask=dict( - type='DiceLoss', - loss_weight=8.0, - use_sigmoid=True, - activate=False, - eps=1e-5)) for _ in range(num_stages) - ]), - # training and testing settings - train_cfg=dict( - rpn=None, - rcnn=[ - dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBoxL1Cost', weight=5.0), - iou_cost=dict(type='IoUCost', iou_mode='giou', - weight=2.0)), - sampler=dict(type='PseudoSampler'), - pos_weight=1, - mask_size=28, - ) for _ in range(num_stages) - ]), - test_cfg=dict( - rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) - -# optimizer -optimizer = dict( - _delete_=True, - type='AdamW', - lr=0.0001, - weight_decay=0.0001, - paramwise_cfg=dict( - custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[8, 11], warmup_iters=1000) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py deleted file mode 100644 index 3089b3c6c8af5d052849414572b8995d9ecd0828..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,54 +0,0 @@ -_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' -num_proposals = 300 -model = dict( - rpn_head=dict(num_proposals=num_proposals), - test_cfg=dict( - _delete_=True, - rpn=None, - rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[[ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ]]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) -] -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py deleted file mode 100644 index 89e2cd10c5c7d1ac7fda4b43305f47221d2c7ac5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = './queryinst_r50_fpn_1x_coco.py' - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, value) for value in min_values], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) -] - -data = dict(train=dict(pipeline=train_pipeline)) -lr_config = dict(policy='step', step=[27, 33]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/README.md b/cv/detection/co-detr/pytorch/configs/regnet/README.md deleted file mode 100644 index 61dba42daf9168b1c522b38fdf2bbf263f90ba92..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# RegNet - -> [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) - - - -## Abstract - -In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs. - -
- -
- -## Introduction - -We implement RegNetX and RegNetY models in detection systems and provide their first results on Mask R-CNN, Faster R-CNN and RetinaNet. - -The pre-trained models are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). - -## Usage - -To use a regnet model, there are two steps to do: - -1. Convert the model to ResNet-style supported by MMDetection -2. Modify backbone and neck in config accordingly - -### Convert model - -We already prepare models of FLOPs from 400M to 12G in our model zoo. - -For more general usage, we also provide script `regnet2mmdet.py` in the tools directory to convert the key of models pretrained by [pycls](https://github.com/facebookresearch/pycls/) to -ResNet-style checkpoints used in MMDetection. - -```bash -python -u tools/model_converters/regnet2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} -``` - -This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. - -### Modify config - -The users can modify the config's `depth` of backbone and corresponding keys in `arch` according to the configs in the [pycls model zoo](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). -The parameter `in_channels` in FPN can be found in the Figure 15 & 16 of the paper (`wi` in the legend). -This directory already provides some configs with their performance, using RegNetX from 800MF to 12GF level. -For other pre-trained models or self-implemented regnet models, the users are responsible to check these parameters by themselves. - -**Note**: Although Fig. 15 & 16 also provide `w0`, `wa`, `wm`, `group_w`, and `bot_mul` for `arch`, they are quantized thus inaccurate, using them sometimes produces different backbone that does not match the key in the pre-trained model. - -## Results and Models - -### Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :----------------------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| [R-50-FPN](../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | pytorch | 1x | 4.4 | 12.0 | 38.2 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | -| [RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 5.0 | | 40.3 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) | -| [RegNetX-4.0GF-FPN](./mask_rcnn_regnetx-4GF_fpn_1x_coco.py) | pytorch | 1x | 5.5 | | 41.5 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217.log.json) | -| [R-101-FPN](../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | pytorch | 1x | 6.4 | 10.3 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | -| [RegNetX-6.4GF-FPN](./mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py) | pytorch | 1x | 6.1 | | 41.0 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439.log.json) | -| [X-101-32x4d-FPN](../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | pytorch | 1x | 7.6 | 9.4 | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) | -| [RegNetX-8.0GF-FPN](./mask_rcnn_regnetx-8GF_fpn_1x_coco.py) | pytorch | 1x | 6.4 | | 41.7 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515.log.json) | -| [RegNetX-12GF-FPN](./mask_rcnn_regnetx-12GF_fpn_1x_coco.py) | pytorch | 1x | 7.4 | | 42.2 | 38 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552.log.json) | -| [RegNetX-3.2GF-FPN-DCN-C3-C5](./mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py) | pytorch | 1x | 5.0 | | 40.3 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726.log.json) | - -### Faster R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| [R-50-FPN](../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | pytorch | 1x | 4.0 | 18.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | -| [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 4.5 | | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927.log.json) | -| [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py) | pytorch | 2x | 4.5 | | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955.log.json) | - -### RetinaNet - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-----------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| [R-50-FPN](../retinanet/retinanet_r50_fpn_1x_coco.py) | pytorch | 1x | 3.8 | 16.6 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) | -| [RegNetX-800MF-FPN](./retinanet_regnetx-800MF_fpn_1x_coco.py) | pytorch | 1x | 2.5 | | 35.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403.log.json) | -| [RegNetX-1.6GF-FPN](./retinanet_regnetx-1.6GF_fpn_1x_coco.py) | pytorch | 1x | 3.3 | | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403.log.json) | -| [RegNetX-3.2GF-FPN](./retinanet_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 4.2 | | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) | - -### Pre-trained models - -We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. - -| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :---------------: | :---------------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Faster RCNN | [RegNetX-400MF-FPN](./faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 2.3 | | 37.1 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112.log.json) | -| Faster RCNN | [RegNetX-800MF-FPN](./faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 2.8 | | 38.8 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118.log.json) | -| Faster RCNN | [RegNetX-1.6GF-FPN](./faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 3.4 | | 40.5 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325.log.json) | -| Faster RCNN | [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.4 | | 42.3 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152.log.json) | -| Faster RCNN | [RegNetX-4GF-FPN](./faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.9 | | 42.8 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201.log.json) | -| Mask RCNN | [RegNetX-400MF-FPN](./mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 2.5 | | 37.6 | 34.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443.log.json) | -| Mask RCNN | [RegNetX-800MF-FPN](./mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 2.9 | | 39.5 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641.log.json) | -| Mask RCNN | [RegNetX-1.6GF-FPN](./mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 3.6 | | 40.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641.log.json) | -| Mask RCNN | [RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.0 | | 43.1 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221.log.json) | -| Mask RCNN | [RegNetX-4GF-FPN](./mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 5.1 | | 43.4 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621.log.json) | -| Cascade Mask RCNN | [RegNetX-400MF-FPN](./cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.3 | | 41.6 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619.log.json) | -| Cascade Mask RCNN | [RegNetX-800MF-FPN](./cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.8 | | 42.8 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616.log.json) | -| Cascade Mask RCNN | [RegNetX-1.6GF-FPN](./cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.4 | | 44.5 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616.log.json) | -| Cascade Mask RCNN | [RegNetX-3.2GF-FPN](./cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 6.4 | | 45.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616.log.json) | -| Cascade Mask RCNN | [RegNetX-4GF-FPN](./cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 6.9 | | 45.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034.log.json) | - -### Notice - -1. The models are trained using a different weight decay, i.e., `weight_decay=5e-5` according to the setting in ImageNet training. This brings improvement of at least 0.7 AP absolute but does not improve the model using ResNet-50. -2. RetinaNets using RegNets are trained with learning rate 0.02 with gradient clip. We find that using learning rate 0.02 could improve the results by at least 0.7 AP absolute and gradient clip is necessary to stabilize the training. However, this does not improve the performance of ResNet-50-FPN RetinaNet. - -## Citation - -```latex -@article{radosavovic2020designing, - title={Designing Network Design Spaces}, - author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, - year={2020}, - eprint={2003.13678}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 358d85aa97a0d6cac41ac0daca2f54ce2f143d50..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_1.6gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), - neck=dict( - type='FPN', - in_channels=[72, 168, 408, 912], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 84645718b37b4a1b7d9bb252c7b1207d20ae6e5e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,63 +0,0 @@ -_base_ = [ - '../common/mstrain_3x_coco_instance.py', - '../_base_/models/cascade_mask_rcnn_r50_fpn.py' -] -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_3.2gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), - neck=dict( - type='FPN', - in_channels=[96, 192, 432, 1008], - out_channels=256, - num_outs=5)) -img_norm_cfg = dict( - # The mean and std are used in PyCls when training RegNets - mean=[103.53, 116.28, 123.675], - std=[57.375, 57.12, 58.395], - to_rgb=False) -train_pipeline = [ - # Images are converted to float32 directly after loading in PyCls - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -optimizer = dict(weight_decay=0.00005) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 2a8990a60d425859a3481a4fbc6fcca72fb5c8ce..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_400mf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), - neck=dict( - type='FPN', - in_channels=[32, 64, 160, 384], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 3157863459213eb094f8a1c510ebd11be2d0e9c4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_4.0gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), - neck=dict( - type='FPN', - in_channels=[80, 240, 560, 1360], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 41376ad88132dfbe956d721ceda88e48f75be435..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_800mf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), - neck=dict( - type='FPN', - in_channels=[64, 128, 288, 672], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 385b5ca73b5f7432ad60b0a1528ee8c992b31d44..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_1.6gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), - neck=dict( - type='FPN', - in_channels=[72, 168, 408, 912], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py deleted file mode 100644 index 88d270e3ce76f631acbef116cd3f7d3e6853ab59..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py +++ /dev/null @@ -1,57 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_3.2gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), - neck=dict( - type='FPN', - in_channels=[96, 192, 432, 1008], - out_channels=256, - num_outs=5)) -img_norm_cfg = dict( - # The mean and std are used in PyCls when training RegNets - mean=[103.53, 116.28, 123.675], - std=[57.375, 57.12, 58.395], - to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py deleted file mode 100644 index 612490b4342a1b6fc164ec80bbe0a6c6df147d76..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py' -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py deleted file mode 100644 index b7e6e1a3125d67f4fd7d99c0ef856bf02402ddb6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,61 +0,0 @@ -_base_ = [ - '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' -] -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_3.2gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), - neck=dict( - type='FPN', - in_channels=[96, 192, 432, 1008], - out_channels=256, - num_outs=5)) -img_norm_cfg = dict( - # The mean and std are used in PyCls when training RegNets - mean=[103.53, 116.28, 123.675], - std=[57.375, 57.12, 58.395], - to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -optimizer = dict(weight_decay=0.00005) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 0a05f6e4e3c6aa2e85f5473872b5633cdb8bfc50..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_400mf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), - neck=dict( - type='FPN', - in_channels=[32, 64, 160, 384], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 98b3fc2b5b6cd122a42cab4754336fd355d40cfb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_4.0gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), - neck=dict( - type='FPN', - in_channels=[80, 240, 560, 1360], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py deleted file mode 100644 index 67f448bdb797459da8898d1846b7e97786163cf4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_800mf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), - neck=dict( - type='FPN', - in_channels=[64, 128, 288, 672], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index 7970c3c80531f5975013026390d6262a59363e7e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,26 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_1.6gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), - neck=dict( - type='FPN', - in_channels=[72, 168, 408, 912], - out_channels=256, - num_outs=5)) - -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py deleted file mode 100644 index ce3661cffbfee0aa4206c889c2f8517d6d1e0e58..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_12gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_12gf')), - neck=dict( - type='FPN', - in_channels=[224, 448, 896, 2240], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py deleted file mode 100644 index 44bf0d1176bf3fd585b65dc10fbac455ce01c59c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py +++ /dev/null @@ -1,58 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_3.2gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), - neck=dict( - type='FPN', - in_channels=[96, 192, 432, 1008], - out_channels=256, - num_outs=5)) -img_norm_cfg = dict( - # The mean and std are used in PyCls when training RegNets - mean=[103.53, 116.28, 123.675], - std=[57.375, 57.12, 58.395], - to_rgb=False) -train_pipeline = [ - # Images are converted to float32 directly after loading in PyCls - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py deleted file mode 100644 index 5b53428125e5a8732bfd489195b0f6e179420b47..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = 'mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf'))) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py deleted file mode 100644 index aca64d335c7b299d985621adb254d9e4f471cca7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,66 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_3.2gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), - neck=dict( - type='FPN', - in_channels=[96, 192, 432, 1008], - out_channels=256, - num_outs=5)) -img_norm_cfg = dict( - # The mean and std are used in PyCls when training RegNets - mean=[103.53, 116.28, 123.675], - std=[57.375, 57.12, 58.395], - to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index c38dfa6ab6e1b62b3e558a54c80d9c47fb26daf1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,26 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_400mf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), - neck=dict( - type='FPN', - in_channels=[32, 64, 160, 384], - out_channels=256, - num_outs=5)) - -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py deleted file mode 100644 index 874d485bec139ec2bfd8253ac82e8f5861d3f9c2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_4.0gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), - neck=dict( - type='FPN', - in_channels=[80, 240, 560, 1360], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index f0b65eabfec8bb293fadef5427204a262a238ad0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,26 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_4.0gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), - neck=dict( - type='FPN', - in_channels=[80, 240, 560, 1360], - out_channels=256, - num_outs=5)) - -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py deleted file mode 100644 index 99387d8655eaa8bca5276dff7f2b7505afe185ed..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_6.4gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_6.4gf')), - neck=dict( - type='FPN', - in_channels=[168, 392, 784, 1624], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py deleted file mode 100644 index 335ebabf7919d84faf4cd59f2199b3337e46857b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py +++ /dev/null @@ -1,26 +0,0 @@ -_base_ = [ - '../common/mstrain-poly_3x_coco_instance.py', - '../_base_/models/mask_rcnn_r50_fpn.py' -] - -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_800mf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), - neck=dict( - type='FPN', - in_channels=[64, 128, 288, 672], - out_channels=256, - num_outs=5)) - -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py deleted file mode 100644 index 1e7832ff2605346e9743e54023dfd5872dc55567..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_8.0gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')), - neck=dict( - type='FPN', - in_channels=[80, 240, 720, 1920], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/metafile.yml b/cv/detection/co-detr/pytorch/configs/regnet/metafile.yml deleted file mode 100644 index 28bd82fbfed0fc3ffdc7c52521b5ef50f54c3a17..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/metafile.yml +++ /dev/null @@ -1,797 +0,0 @@ -Models: - - Name: mask_rcnn_regnetx-3.2GF_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.0 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-4GF_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-6.4GF_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.1 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-8GF_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 6.4 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-12GF_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.4 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py - Metadata: - Training Memory (GB): 5.0 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: faster_rcnn_regnetx-3.2GF_fpn_1x_coco - In Collection: Faster R-CNN - Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: faster_rcnn_regnetx-3.2GF_fpn_2x_coco - In Collection: Faster R-CNN - Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py - Metadata: - Training Memory (GB): 4.5 - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: retinanet_regnetx-800MF_fpn_1x_coco - In Collection: RetinaNet - Config: configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 2.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 35.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: retinanet_regnetx-1.6GF_fpn_1x_coco - In Collection: RetinaNet - Config: configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.3 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: retinanet_regnetx-3.2GF_fpn_1x_coco - In Collection: RetinaNet - Config: configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.2 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 2.3 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 2.8 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 3.4 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 4.4 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco - In Collection: Faster R-CNN - Config: configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 4.9 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 5.0 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 2.5 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 34.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 2.9 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 3.6 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.9 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 5.0 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco - In Collection: Mask R-CNN - Config: configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py - Metadata: - Training Memory (GB): 5.1 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco - In Collection: Cascade R-CNN - Config: configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 4.3 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco - In Collection: Cascade R-CNN - Config: configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 4.8 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco - In Collection: Cascade R-CNN - Config: configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 5.4 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco - In Collection: Cascade R-CNN - Config: configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 6.4 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 - - - Name: cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco - In Collection: Cascade R-CNN - Config: configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py - Metadata: - Training Memory (GB): 6.9 - Epochs: 36 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - RegNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth - Paper: - URL: https://arxiv.org/abs/2003.13678 - Title: 'Designing Network Design Spaces' - README: configs/regnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 - Version: v2.1.0 diff --git a/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py deleted file mode 100644 index 7395c1bfbfa16670294c721f9f3135da9b9e69ae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_1.6gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), - neck=dict( - type='FPN', - in_channels=[72, 168, 408, 912], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py deleted file mode 100644 index f05307c4364c565d410de35cc720db70d22be947..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py +++ /dev/null @@ -1,59 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - backbone=dict( - _delete_=True, - type='RegNet', - arch='regnetx_3.2gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), - neck=dict( - type='FPN', - in_channels=[96, 192, 432, 1008], - out_channels=256, - num_outs=5)) -img_norm_cfg = dict( - # The mean and std are used in PyCls when training RegNets - mean=[103.53, 116.28, 123.675], - std=[57.375, 57.12, 58.395], - to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py deleted file mode 100644 index f6f8989320d6ffbcd55148471f62a962c52f9131..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='RegNet', - arch='regnetx_800mf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), - neck=dict( - type='FPN', - in_channels=[64, 128, 288, 672], - out_channels=256, - num_outs=5)) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/README.md b/cv/detection/co-detr/pytorch/configs/reppoints/README.md deleted file mode 100644 index 5e71ae5236c9f7a269b1a34837a4d568a8c7e9cd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# RepPoints - -> [RepPoints: Point Set Representation for Object Detection](https://arxiv.org/abs/1904.11490) - - - -## Abstract - -Modern object detectors rely heavily on rectangular bounding boxes, such as anchors, proposals and the final predictions, to represent objects at various recognition stages. The bounding box is convenient to use but provides only a coarse localization of objects and leads to a correspondingly coarse extraction of object features. In this paper, we present RepPoints(representative points), a new finer representation of objects as a set of sample points useful for both localization and recognition. Given ground truth localization and recognition targets for training, RepPoints learn to automatically arrange themselves in a manner that bounds the spatial extent of an object and indicates semantically significant local areas. They furthermore do not require the use of anchors to sample a space of bounding boxes. We show that an anchor-free object detector based on RepPoints can be as effective as the state-of-the-art anchor-based detection methods, with 46.5 AP and 67.4 AP50 on the COCO test-dev detection benchmark, using ResNet-101 model. - -
- -
- -## Introdution - -By [Ze Yang](https://yangze.tech/), [Shaohui Liu](http://b1ueber2y.me/), and [Han Hu](https://ancientmooner.github.io/). - -We provide code support and configuration files to reproduce the results in the paper for -["RepPoints: Point Set Representation for Object Detection"](https://arxiv.org/abs/1904.11490) on COCO object detection. - -**RepPoints**, initially described in [arXiv](https://arxiv.org/abs/1904.11490), is a new representation method for visual objects, on which visual understanding tasks are typically centered. Visual object representation, aiming at both geometric description and appearance feature extraction, is conventionally achieved by `bounding box + RoIPool (RoIAlign)`. The bounding box representation is convenient to use; however, it provides only a rectangular localization of objects that lacks geometric precision and may consequently degrade feature quality. Our new representation, RepPoints, models objects by a `point set` instead of a `bounding box`, which learns to adaptively position themselves over an object in a manner that circumscribes the object’s `spatial extent` and enables `semantically aligned feature extraction`. This richer and more flexible representation maintains the convenience of bounding boxes while facilitating various visual understanding applications. This repo demonstrated the effectiveness of RepPoints for COCO object detection. - -Another feature of this repo is the demonstration of an `anchor-free detector`, which can be as effective as state-of-the-art anchor-based detection methods. The anchor-free detector can utilize either `bounding box` or `RepPoints` as the basic object representation. - -## Results and Models - -The results on COCO 2017val are shown in the table below. - -| Method | Backbone | GN | Anchor | convert func | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------: | :-----------: | :-: | :----: | :----------: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| BBox | R-50-FPN | Y | single | - | 1x | 3.9 | 15.9 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916.log.json) | -| BBox | R-50-FPN | Y | none | - | 1x | 3.9 | 15.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916.log.json) | -| RepPoints | R-50-FPN | N | none | moment | 1x | 3.3 | 18.5 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330_233609.log.json) | -| RepPoints | R-50-FPN | Y | none | moment | 1x | 3.9 | 17.5 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952-3e51b550.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952.log.json) | -| RepPoints | R-50-FPN | Y | none | moment | 2x | 3.9 | - | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329_150020.log.json) | -| RepPoints | R-101-FPN | Y | none | moment | 2x | 5.8 | 13.7 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329_132205.log.json) | -| RepPoints | R-101-FPN-DCN | Y | none | moment | 2x | 5.9 | 12.1 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132134.log.json) | -| RepPoints | X-101-FPN-DCN | Y | none | moment | 2x | 7.1 | 9.3 | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132201.log.json) | - -**Notes:** - -- `R-xx`, `X-xx` denote the ResNet and ResNeXt architectures, respectively. -- `DCN` denotes replacing 3x3 conv with the 3x3 deformable convolution in `c3-c5` stages of backbone. -- `none` in the `anchor` column means 2-d `center point` (x,y) is used to represent the initial object hypothesis. `single` denotes one 4-d anchor box (x,y,w,h) with IoU based label assign criterion is adopted. -- `moment`, `partial MinMax`, `MinMax` in the `convert func` column are three functions to convert a point set to a pseudo box. -- Note the results here are slightly different from those reported in the paper, due to framework change. While the original paper uses an [MXNet](https://mxnet.apache.org/) implementation, we re-implement the method in [PyTorch](https://pytorch.org/) based on mmdetection. - -## Citation - -```latex -@inproceedings{yang2019reppoints, - title={RepPoints: Point Set Representation for Object Detection}, - author={Yang, Ze and Liu, Shaohui and Hu, Han and Wang, Liwei and Lin, Stephen}, - booktitle={The IEEE International Conference on Computer Vision (ICCV)}, - month={Oct}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py deleted file mode 100644 index b24c8db768423de12d1e8582bb26dd71218f52ee..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' -model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True)) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py deleted file mode 100644 index 8d5013d30a059f067c71e877dbc0bcef94790154..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' -model = dict( - bbox_head=dict(transform_method='minmax', use_grid_points=True), - # training and testing settings - train_cfg=dict( - init=dict( - assigner=dict( - _delete_=True, - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1)))) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/metafile.yml b/cv/detection/co-detr/pytorch/configs/reppoints/metafile.yml deleted file mode 100644 index d94137eeeb78c2e47c61bc7156e06f52e17da386..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/metafile.yml +++ /dev/null @@ -1,181 +0,0 @@ -Collections: - - Name: RepPoints - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Group Normalization - - FPN - - RepPoints - - ResNet - Paper: - URL: https://arxiv.org/abs/1904.11490 - Title: 'RepPoints: Point Set Representation for Object Detection' - README: configs/reppoints/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/reppoints_detector.py#L9 - Version: v2.0.0 - -Models: - - Name: bbox_r50_grid_fpn_gn-neck+head_1x_coco - In Collection: RepPoints - Config: configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py - Metadata: - Training Memory (GB): 3.9 - inference time (ms/im): - - value: 62.89 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth - - - Name: bbox_r50_grid_center_fpn_gn-neck+head_1x_coco - In Collection: RepPoints - Config: configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py - Metadata: - Training Memory (GB): 3.9 - inference time (ms/im): - - value: 64.94 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth - - - Name: reppoints_moment_r50_fpn_1x_coco - In Collection: RepPoints - Config: configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.3 - inference time (ms/im): - - value: 54.05 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth - - - Name: reppoints_moment_r50_fpn_gn-neck+head_1x_coco - In Collection: RepPoints - Config: configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py - Metadata: - Training Memory (GB): 3.9 - inference time (ms/im): - - value: 57.14 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952-3e51b550.pth - - - Name: reppoints_moment_r50_fpn_gn-neck+head_2x_coco - In Collection: RepPoints - Config: configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py - Metadata: - Training Memory (GB): 3.9 - inference time (ms/im): - - value: 57.14 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth - - - Name: reppoints_moment_r101_fpn_gn-neck+head_2x_coco - In Collection: RepPoints - Config: configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py - Metadata: - Training Memory (GB): 5.8 - inference time (ms/im): - - value: 72.99 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth - - - Name: reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco - In Collection: RepPoints - Config: configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py - Metadata: - Training Memory (GB): 5.9 - inference time (ms/im): - - value: 82.64 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth - - - Name: reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco - In Collection: RepPoints - Config: configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py - Metadata: - Training Memory (GB): 7.1 - inference time (ms/im): - - value: 107.53 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py deleted file mode 100644 index 0f56a46b3c002cdec630bb06df66a4fc9e7804a8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' -model = dict(bbox_head=dict(transform_method='minmax')) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py deleted file mode 100644 index e223d80fab5eabf99da7ee28668d81d0f059d9cc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py deleted file mode 100644 index 118547096e67abb82c563ad128dd1a18309dd775..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py deleted file mode 100644 index 158a90670b86a78d872e7db4cf80db72401481b8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py +++ /dev/null @@ -1,67 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='RepPointsDetector', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_input', - num_outs=5), - bbox_head=dict( - type='RepPointsHead', - num_classes=80, - in_channels=256, - feat_channels=256, - point_feat_channels=256, - stacked_convs=3, - num_points=9, - gradient_mul=0.1, - point_strides=[8, 16, 32, 64, 128], - point_base_scale=4, - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5), - loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0), - transform_method='moment'), - # training and testing settings - train_cfg=dict( - init=dict( - assigner=dict(type='PointAssigner', scale=4, pos_num=1), - allowed_border=-1, - pos_weight=-1, - debug=False), - refine=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False)), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) -optimizer = dict(lr=0.01) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py deleted file mode 100644 index 337f167c820979f345eef120a936195d8f5975c2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_1x_coco.py' -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) -optimizer = dict(lr=0.01) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py deleted file mode 100644 index feca44aa67126b3326e45b1c9fbbf9e9c3bec11a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py deleted file mode 100644 index c0a12d00615aaa347ad6790c110be1304458501d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py b/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py deleted file mode 100644 index 9a63bd0862be6d5f363c5d481bade3e8e2e8433a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' -model = dict(bbox_head=dict(transform_method='partial_minmax')) diff --git a/cv/detection/co-detr/pytorch/configs/res2net/README.md b/cv/detection/co-detr/pytorch/configs/res2net/README.md deleted file mode 100644 index 1285870e98bf30229ae7add6fd189876ee74d707..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/res2net/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Res2Net - -> [Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/abs/1904.01169) - - - -## Abstract - -Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale representation ability, leading to consistent performance gains on a wide range of applications. However, most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net block on all these models and demonstrate consistent performance gains over baseline models on widely-used datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, further verify the superiority of the Res2Net over the state-of-the-art baseline methods. - -
- -
- -## Introduction - -We propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. - -| Backbone | Params. | GFLOPs | top-1 err. | top-5 err. | -| :---------------: | :-----: | :----: | :--------: | :--------: | -| ResNet-101 | 44.6 M | 7.8 | 22.63 | 6.44 | -| ResNeXt-101-64x4d | 83.5M | 15.5 | 20.40 | - | -| HRNetV2p-W48 | 77.5M | 16.1 | 20.70 | 5.50 | -| Res2Net-101 | 45.2M | 8.3 | 18.77 | 4.64 | - -Compared with other backbone networks, Res2Net requires fewer parameters and FLOPs. - -**Note:** - -- GFLOPs for classification are calculated with image size (224x224). - -## Results and Models - -### Faster R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R2-101-FPN | pytorch | 2x | 7.4 | - | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco_20200514_231734.log.json) | - -### Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R2-101-FPN | pytorch | 2x | 7.9 | - | 43.6 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco_20200515_002413.log.json) | - -### Cascade R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R2-101-FPN | pytorch | 20e | 7.8 | - | 45.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco_20200515_091644.log.json) | - -### Cascade Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R2-101-FPN | pytorch | 20e | 9.5 | - | 46.4 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco_20200515_091645.log.json) | - -### Hybrid Task Cascade (HTC) - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R2-101-FPN | pytorch | 20e | - | - | 47.5 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/htc_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco_20200515_150029.log.json) | - -- Res2Net ImageNet pretrained models are in [Res2Net-PretrainedModels](https://github.com/Res2Net/Res2Net-PretrainedModels). -- More applications of Res2Net are in [Res2Net-Github](https://github.com/Res2Net/). - -## Citation - -```latex -@article{gao2019res2net, - title={Res2Net: A New Multi-scale Backbone Architecture}, - author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, - journal={IEEE TPAMI}, - year={2020}, - doi={10.1109/TPAMI.2019.2938758}, -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py deleted file mode 100644 index 6b6c0010a44be43131defb002767eeb5b5d15600..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - type='Res2Net', - depth=101, - scales=4, - base_width=26, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/cv/detection/co-detr/pytorch/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py deleted file mode 100644 index 10dddbb467993a023f8e498b57f86775b142ce4f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - type='Res2Net', - depth=101, - scales=4, - base_width=26, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/cv/detection/co-detr/pytorch/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py deleted file mode 100644 index fc2221cbabf293b55098d543ef9f14d9f75f1909..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='Res2Net', - depth=101, - scales=4, - base_width=26, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/cv/detection/co-detr/pytorch/configs/res2net/htc_r2_101_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/res2net/htc_r2_101_fpn_20e_coco.py deleted file mode 100644 index 22d0c5da57aa00daa62ebccab73d29fbe5620938..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/res2net/htc_r2_101_fpn_20e_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='Res2Net', - depth=101, - scales=4, - base_width=26, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py deleted file mode 100644 index 33aef1a54d4e6c7d30eb2a2abc67937005a24aae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='Res2Net', - depth=101, - scales=4, - base_width=26, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/cv/detection/co-detr/pytorch/configs/res2net/metafile.yml b/cv/detection/co-detr/pytorch/configs/res2net/metafile.yml deleted file mode 100644 index 27bac8c1bfb026977a3d53da385ba73625b37052..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/res2net/metafile.yml +++ /dev/null @@ -1,146 +0,0 @@ -Models: - - Name: faster_rcnn_r2_101_fpn_2x_coco - In Collection: Faster R-CNN - Config: configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py - Metadata: - Training Memory (GB): 7.4 - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Res2Net - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth - Paper: - URL: https://arxiv.org/abs/1904.01169 - Title: 'Res2Net for object detection and instance segmentation' - README: configs/res2net/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 - Version: v2.1.0 - - - Name: mask_rcnn_r2_101_fpn_2x_coco - In Collection: Mask R-CNN - Config: configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py - Metadata: - Training Memory (GB): 7.9 - Epochs: 24 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Res2Net - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth - Paper: - URL: https://arxiv.org/abs/1904.01169 - Title: 'Res2Net for object detection and instance segmentation' - README: configs/res2net/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 - Version: v2.1.0 - - - Name: cascade_rcnn_r2_101_fpn_20e_coco - In Collection: Cascade R-CNN - Config: configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py - Metadata: - Training Memory (GB): 7.8 - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Res2Net - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth - Paper: - URL: https://arxiv.org/abs/1904.01169 - Title: 'Res2Net for object detection and instance segmentation' - README: configs/res2net/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 - Version: v2.1.0 - - - Name: cascade_mask_rcnn_r2_101_fpn_20e_coco - In Collection: Cascade R-CNN - Config: configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py - Metadata: - Training Memory (GB): 9.5 - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Res2Net - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth - Paper: - URL: https://arxiv.org/abs/1904.01169 - Title: 'Res2Net for object detection and instance segmentation' - README: configs/res2net/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 - Version: v2.1.0 - - - Name: htc_r2_101_fpn_20e_coco - In Collection: HTC - Config: configs/res2net/htc_r2_101_fpn_20e_coco.py - Metadata: - Epochs: 20 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Res2Net - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth - Paper: - URL: https://arxiv.org/abs/1904.01169 - Title: 'Res2Net for object detection and instance segmentation' - README: configs/res2net/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/res2net.py#L239 - Version: v2.1.0 diff --git a/cv/detection/co-detr/pytorch/configs/resnest/README.md b/cv/detection/co-detr/pytorch/configs/resnest/README.md deleted file mode 100644 index 3676e56827a6efa9d0fc14d1fd9648d33623e31a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# ResNeSt - -> [ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955) - - - -## Abstract - -It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. - -
- -
- -## Results and Models - -### Faster R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| S-50-FPN | pytorch | 1x | 4.8 | - | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20200926_125502.log.json) | -| S-101-FPN | pytorch | 1x | 7.1 | - | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201006_021058-421517f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201006_021058.log.json) | - -### Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| S-50-FPN | pytorch | 1x | 5.5 | - | 42.6 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20200926_125503-8a2c3d47.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20200926_125503.log.json) | -| S-101-FPN | pytorch | 1x | 7.8 | - | 45.2 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_215831-af60cdf9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201005_215831.log.json) | - -### Cascade R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| S-50-FPN | pytorch | 1x | - | - | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201122_213640-763cc7b5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201005_113242.log.json) | -| S-101-FPN | pytorch | 1x | 8.4 | - | 46.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201005_113242-b9459f8f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco-20201122_213640.log.json) | - -### Cascade Mask R-CNN - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| S-50-FPN | pytorch | 1x | - | - | 45.4 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201122_104428-99eca4c7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201122_104428.log.json) | -| S-101-FPN | pytorch | 1x | 10.5 | - | 47.7 | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_113243-42607475.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco-20201005_113243.log.json) | - -## Citation - -```latex -@article{zhang2020resnest, -title={ResNeSt: Split-Attention Networks}, -author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, -journal={arXiv preprint arXiv:2004.08955}, -year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py deleted file mode 100644 index 406f39db91bb5c5abacb76db969b9181df453466..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' -model = dict( - backbone=dict( - stem_channels=128, - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='open-mmlab://resnest101'))) diff --git a/cv/detection/co-detr/pytorch/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py deleted file mode 100644 index 83d75372fc561935e43542743c8814ca2734414d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py +++ /dev/null @@ -1,118 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - backbone=dict( - type='ResNeSt', - stem_channels=64, - depth=50, - radix=2, - reduction_factor=4, - avg_down_stride=True, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), - roi_head=dict( - bbox_head=[ - dict( - type='Shared4Conv1FCBBoxHead', - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - norm_cfg=norm_cfg, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared4Conv1FCBBoxHead', - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - norm_cfg=norm_cfg, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared4Conv1FCBBoxHead', - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - norm_cfg=norm_cfg, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_head=dict(norm_cfg=norm_cfg))) -# # use ResNeSt img_norm -img_norm_cfg = dict( - mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py deleted file mode 100644 index 0a7476a3748b6ce80d25188284facfec13d9f86e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' -model = dict( - backbone=dict( - stem_channels=128, - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='open-mmlab://resnest101'))) diff --git a/cv/detection/co-detr/pytorch/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py deleted file mode 100644 index 6ed7730104ca42e23a004827bb7aa0a114fa5e70..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py +++ /dev/null @@ -1,116 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - backbone=dict( - type='ResNeSt', - stem_channels=64, - depth=50, - radix=2, - reduction_factor=4, - avg_down_stride=True, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), - roi_head=dict( - bbox_head=[ - dict( - type='Shared4Conv1FCBBoxHead', - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - norm_cfg=norm_cfg, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared4Conv1FCBBoxHead', - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - norm_cfg=norm_cfg, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared4Conv1FCBBoxHead', - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - norm_cfg=norm_cfg, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], )) -# # use ResNeSt img_norm -img_norm_cfg = dict( - mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=False, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py deleted file mode 100644 index 40a2f1f2c9d62f173e88893e4ef809e70e2cbf5b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py' -model = dict( - backbone=dict( - stem_channels=128, - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='open-mmlab://resnest101'))) diff --git a/cv/detection/co-detr/pytorch/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py deleted file mode 100644 index eb1ecd224cb86d6c296363ab53fb733848f6224c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - backbone=dict( - type='ResNeSt', - stem_channels=64, - depth=50, - radix=2, - reduction_factor=4, - avg_down_stride=True, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg))) -# # use ResNeSt img_norm -img_norm_cfg = dict( - mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=False, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py deleted file mode 100644 index c882ba1421afdcc7100995da7ab10eb16bd3db25..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py' -model = dict( - backbone=dict( - stem_channels=128, - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='open-mmlab://resnest101'))) diff --git a/cv/detection/co-detr/pytorch/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py deleted file mode 100644 index 4e50deacbdecdccace68f77636edac7a29d4ef57..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py +++ /dev/null @@ -1,64 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - backbone=dict( - type='ResNeSt', - stem_channels=64, - depth=50, - radix=2, - reduction_factor=4, - avg_down_stride=True, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg), - mask_head=dict(norm_cfg=norm_cfg))) -# # use ResNeSt img_norm -img_norm_cfg = dict( - mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', - with_bbox=True, - with_mask=True, - poly2mask=False), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/resnest/metafile.yml b/cv/detection/co-detr/pytorch/configs/resnest/metafile.yml deleted file mode 100644 index cfeec7193138ae7710ce8c99b42134992209ae57..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnest/metafile.yml +++ /dev/null @@ -1,230 +0,0 @@ -Models: - - Name: faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco - In Collection: Faster R-CNN - Config: configs/resnest/faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py - Metadata: - Training Memory (GB): 4.8 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNeSt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20200926_125502-20289c16.pth - Paper: - URL: https://arxiv.org/abs/2004.08955 - Title: 'ResNeSt: Split-Attention Networks' - README: configs/resnest/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 - Version: v2.7.0 - - - Name: faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco - In Collection: Faster R-CNN - Config: configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py - Metadata: - Training Memory (GB): 7.1 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNeSt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/faster_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201006_021058-421517f1.pth - Paper: - URL: https://arxiv.org/abs/2004.08955 - Title: 'ResNeSt: Split-Attention Networks' - README: configs/resnest/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 - Version: v2.7.0 - - - Name: mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco - In Collection: Mask R-CNN - Config: configs/resnest/mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py - Metadata: - Training Memory (GB): 5.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNeSt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.6 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20200926_125503-8a2c3d47.pth - Paper: - URL: https://arxiv.org/abs/2004.08955 - Title: 'ResNeSt: Split-Attention Networks' - README: configs/resnest/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 - Version: v2.7.0 - - - Name: mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco - In Collection: Mask R-CNN - Config: configs/resnest/mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py - Metadata: - Training Memory (GB): 7.8 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNeSt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_215831-af60cdf9.pth - Paper: - URL: https://arxiv.org/abs/2004.08955 - Title: 'ResNeSt: Split-Attention Networks' - README: configs/resnest/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 - Version: v2.7.0 - - - Name: cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco - In Collection: Cascade R-CNN - Config: configs/resnest/cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py - Metadata: - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNeSt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201122_213640-763cc7b5.pth - Paper: - URL: https://arxiv.org/abs/2004.08955 - Title: 'ResNeSt: Split-Attention Networks' - README: configs/resnest/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 - Version: v2.7.0 - - - Name: cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco - In Collection: Cascade R-CNN - Config: configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py - Metadata: - Training Memory (GB): 8.4 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNeSt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco/cascade_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain-range_1x_coco_20201005_113242-b9459f8f.pth - Paper: - URL: https://arxiv.org/abs/2004.08955 - Title: 'ResNeSt: Split-Attention Networks' - README: configs/resnest/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 - Version: v2.7.0 - - - Name: cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco - In Collection: Cascade R-CNN - Config: configs/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py - Metadata: - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNeSt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.4 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s50_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201122_104428-99eca4c7.pth - Paper: - URL: https://arxiv.org/abs/2004.08955 - Title: 'ResNeSt: Split-Attention Networks' - README: configs/resnest/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 - Version: v2.7.0 - - - Name: cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco - In Collection: Cascade R-CNN - Config: configs/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py - Metadata: - Training Memory (GB): 10.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNeSt - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnest/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco/cascade_mask_rcnn_s101_fpn_syncbn-backbone%2Bhead_mstrain_1x_coco_20201005_113243-42607475.pth - Paper: - URL: https://arxiv.org/abs/2004.08955 - Title: 'ResNeSt: Split-Attention Networks' - README: configs/resnest/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/backbones/resnest.py#L273 - Version: v2.7.0 diff --git a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/README.md b/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/README.md deleted file mode 100644 index dd00b207a9d90546c2f68dec5660dc016d81f38f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# ResNet strikes back - -> [ResNet strikes back: An improved training procedure in timm](https://arxiv.org/abs/2110.00476) - - - -## Abstract - -The influential Residual Networks designed by He et al. remain the gold-standard architecture in numerous scientific publications. They typically serve as the default architecture in studies, or as baselines when new architectures are proposed. Yet there has been significant progress on best practices for training neural networks since the inception of the ResNet architecture in 2015. Novel optimization & dataaugmentation have increased the effectiveness of the training recipes. - -In this paper, we re-evaluate the performance of the vanilla ResNet-50 when trained with a procedure that integrates such advances. We share competitive training settings and pre-trained models in the timm open-source library, with the hope that they will serve as better baselines for future work. For instance, with our more demanding training setting, a vanilla ResNet-50 reaches 80.4% top-1 accuracy at resolution 224×224 on ImageNet-val without extra data or distillation. We also report the performance achieved with popular models with our training procedure. - -
- -
- -## Results and Models - -| Method | Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :----------------: | :------: | :-----: | :------: | :------------: | :---------: | :---------: | :-----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Faster R-CNN | R-50 rsb | 1x | 3.9 | - | 40.8 (+3.4) | - | [Config](./faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229-32ae82a9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229.log.json) | -| Mask R-CNN | R-50 rsb | 1x | 4.5 | - | 41.2 (+3.0) | 38.2 (+3.0) | [Config](./mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054.log.json) | -| Cascade Mask R-CNN | R-50 rsb | 1x | 6.2 | - | 44.8 (+3.6) | 39.9 (+3.6) | [Config](./cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636-8b9ad50f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636.log.json) | -| RetinaNet | R-50 rsb | 1x | 3.8 | - | 39.0 (+2.5) | - | [Config](./retinanet_r50_fpn_rsb-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432-bd24aae9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432.log.json) | - -**Notes:** - -- 'rsb' is short for 'resnet strikes back' -- We have done some grid searches on learning rate and weight decay and get these optimal hyper-parameters. - -## Citation - -```latex -@article{wightman2021resnet, -title={Resnet strikes back: An improved training procedure in timm}, -author={Ross Wightman, Hugo Touvron, Hervé Jégou}, -journal={arXiv preprint arXiv:2110.00476}, -year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py deleted file mode 100644 index 8b601f05718427b54ce7b472fad8a75636159de2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa -model = dict( - backbone=dict( - init_cfg=dict( - type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) - -optimizer = dict( - _delete_=True, - type='AdamW', - lr=0.0002, - weight_decay=0.05, - paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py deleted file mode 100644 index fe866843483722eaeceb4ab20c05fd8b80841cb0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa -model = dict( - backbone=dict( - init_cfg=dict( - type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) - -optimizer = dict( - _delete_=True, - type='AdamW', - lr=0.0002, - weight_decay=0.05, - paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py deleted file mode 100644 index 321d98ebe124cfb5ff155870e3a4954fa2fcead2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa -model = dict( - backbone=dict( - init_cfg=dict( - type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) - -optimizer = dict( - _delete_=True, - type='AdamW', - lr=0.0002, - weight_decay=0.05, - paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/metafile.yml b/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/metafile.yml deleted file mode 100644 index 4c85a16d4fd2d49de48a47fb8ff7b4a2b09c799d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/metafile.yml +++ /dev/null @@ -1,116 +0,0 @@ -Models: - - Name: faster_rcnn_r50_fpn_rsb-pretrain_1x_coco - In Collection: Faster R-CNN - Config: configs/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py - Metadata: - Training Memory (GB): 3.9 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco/faster_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_162229-32ae82a9.pth - Paper: - URL: https://arxiv.org/abs/2110.00476 - Title: 'ResNet strikes back: An improved training procedure in timm' - README: configs/resnet_strikes_back/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md - Version: v2.22.0 - - - Name: cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco - In Collection: Cascade R-CNN - Config: configs/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py - Metadata: - Training Memory (GB): 6.2 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_193636-8b9ad50f.pth - Paper: - URL: https://arxiv.org/abs/2110.00476 - Title: 'ResNet strikes back: An improved training procedure in timm' - README: configs/resnet_strikes_back/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md - Version: v2.22.0 - - - Name: retinanet_r50_fpn_rsb-pretrain_1x_coco - In Collection: RetinaNet - Config: configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py - Metadata: - Training Memory (GB): 3.8 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco/retinanet_r50_fpn_rsb-pretrain_1x_coco_20220113_175432-bd24aae9.pth - Paper: - URL: https://arxiv.org/abs/2110.00476 - Title: 'ResNet strikes back: An improved training procedure in timm' - README: configs/resnet_strikes_back/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md - Version: v2.22.0 - - - Name: mask_rcnn_r50_fpn_rsb-pretrain_1x_coco - In Collection: Mask R-CNN - Config: configs/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py - Metadata: - Training Memory (GB): 4.5 - Epochs: 12 - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNet - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 38.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/resnet_strikes_back/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco/mask_rcnn_r50_fpn_rsb-pretrain_1x_coco_20220113_174054-06ce8ba0.pth - Paper: - URL: https://arxiv.org/abs/2110.00476 - Title: 'ResNet strikes back: An improved training procedure in timm' - README: configs/resnet_strikes_back/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/configs/resnet_strikes_back/README.md - Version: v2.22.0 diff --git a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py deleted file mode 100644 index 480697a06105f8af8f6f80cf2b1799036f39d103..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/resnet_strikes_back/retinanet_r50_fpn_rsb-pretrain_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa -model = dict( - backbone=dict( - init_cfg=dict( - type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) - -optimizer = dict( - _delete_=True, - type='AdamW', - lr=0.0001, - weight_decay=0.05, - paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True)) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/README.md b/cv/detection/co-detr/pytorch/configs/retinanet/README.md deleted file mode 100644 index b9e0a2af38b85e23aecacb89f6255fc44504ea39..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# RetinaNet - -> [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002) - - - -## Abstract - -The highest accuracy object detectors to date are based on a two-stage approach popularized by R-CNN, where a classifier is applied to a sparse set of candidate object locations. In contrast, one-stage detectors that are applied over a regular, dense sampling of possible object locations have the potential to be faster and simpler, but have trailed the accuracy of two-stage detectors thus far. In this paper, we investigate why this is the case. We discover that the extreme foreground-background class imbalance encountered during training of dense detectors is the central cause. We propose to address this class imbalance by reshaping the standard cross entropy loss such that it down-weights the loss assigned to well-classified examples. Our novel Focal Loss focuses training on a sparse set of hard examples and prevents the vast number of easy negatives from overwhelming the detector during training. To evaluate the effectiveness of our loss, we design and train a simple dense detector we call RetinaNet. Our results show that when trained with the focal loss, RetinaNet is able to match the speed of previous one-stage detectors while surpassing the accuracy of all existing state-of-the-art two-stage detectors. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :----------: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-18-FPN | pytorch | 1x | 1.7 | | 31.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r18_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055.log.json) | -| R-18-FPN | pytorch | 1x(1 x 8 BS) | 5.0 | | 31.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255.log.json) | -| R-50-FPN | caffe | 1x | 3.5 | 18.6 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531_012518.log.json) | -| R-50-FPN | pytorch | 1x | 3.8 | 19.0 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) | -| R-50-FPN (FP16) | pytorch | 1x | 2.8 | 31.6 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702_020127.log.json) | -| R-50-FPN | pytorch | 2x | - | - | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131_114738.log.json) | -| R-101-FPN | caffe | 1x | 5.5 | 14.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531_012536.log.json) | -| R-101-FPN | pytorch | 1x | 5.7 | 15.0 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130_003055.log.json) | -| R-101-FPN | pytorch | 2x | - | - | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131_114859.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 7.0 | 12.1 | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130_003004.log.json) | -| X-101-32x4d-FPN | pytorch | 2x | - | - | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131_114812.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 10.0 | 8.7 | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130_003008.log.json) | -| X-101-64x4d-FPN | pytorch | 2x | - | - | 40.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131_114833.log.json) | - -## Pre-trained Models - -We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. - -| Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 3x | 3.5 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.log.json) | -| R-101-FPN | caffe | 3x | 5.4 | 40.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.log.json) | -| R-101-FPN | pytorch | 3x | 5.4 | 41 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.log.json) | -| X-101-64x4d-FPN | pytorch | 3x | 9.8 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.log.json) | - -## Citation - -```latex -@inproceedings{lin2017focal, - title={Focal loss for dense object detection}, - author={Lin, Tsung-Yi and Goyal, Priya and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr}, - booktitle={Proceedings of the IEEE international conference on computer vision}, - year={2017} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/metafile.yml b/cv/detection/co-detr/pytorch/configs/retinanet/metafile.yml deleted file mode 100644 index 8751cbbc1d3dd604b41a593d0137082d97c12f76..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/metafile.yml +++ /dev/null @@ -1,312 +0,0 @@ -Collections: - - Name: RetinaNet - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Focal Loss - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1708.02002 - Title: "Focal Loss for Dense Object Detection" - README: configs/retinanet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/retinanet.py#L6 - Version: v2.0.0 - -Models: - - Name: retinanet_r18_fpn_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r18_fpn_1x_coco.py - Metadata: - Training Memory (GB): 1.7 - Training Resources: 8x V100 GPUs - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 31.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x_coco/retinanet_r18_fpn_1x_coco_20220407_171055-614fd399.pth - - - Name: retinanet_r18_fpn_1x8_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py - Metadata: - Training Memory (GB): 5.0 - Training Resources: 1x V100 GPUs - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 31.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r18_fpn_1x8_1x_coco/retinanet_r18_fpn_1x8_1x_coco_20220407_171255-4ea310d7.pth - - - Name: retinanet_r50_caffe_fpn_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.5 - inference time (ms/im): - - value: 53.76 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_caffe_fpn_1x_coco/retinanet_r50_caffe_fpn_1x_coco_20200531-f11027c5.pth - - - Name: retinanet_r50_fpn_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 3.8 - inference time (ms/im): - - value: 52.63 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth - - - Name: retinanet_r50_fpn_fp16_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py - Metadata: - Training Memory (GB): 2.8 - Training Techniques: - - SGD with Momentum - - Weight Decay - - Mixed Precision Training - inference time (ms/im): - - value: 31.65 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP16 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 36.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/retinanet_r50_fpn_fp16_1x_coco/retinanet_r50_fpn_fp16_1x_coco_20200702-0dbfb212.pth - - - Name: retinanet_r50_fpn_2x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r50_fpn_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_2x_coco/retinanet_r50_fpn_2x_coco_20200131-fdb43119.pth - - - Name: retinanet_r50_fpn_mstrain_640-800_3x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_mstrain_3x_coco/retinanet_r50_fpn_mstrain_3x_coco_20210718_220633-88476508.pth - - - Name: retinanet_r101_caffe_fpn_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.5 - inference time (ms/im): - - value: 68.03 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_1x_coco/retinanet_r101_caffe_fpn_1x_coco_20200531-b428fa0f.pth - - - Name: retinanet_r101_caffe_fpn_mstrain_3x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco/retinanet_r101_caffe_fpn_mstrain_3x_coco_20210721_063439-88a8a944.pth - - - Name: retinanet_r101_fpn_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r101_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.7 - inference time (ms/im): - - value: 66.67 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_1x_coco/retinanet_r101_fpn_1x_coco_20200130-7a93545f.pth - - - Name: retinanet_r101_fpn_2x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r101_fpn_2x_coco.py - Metadata: - Training Memory (GB): 5.7 - inference time (ms/im): - - value: 66.67 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_2x_coco/retinanet_r101_fpn_2x_coco_20200131-5560aee8.pth - - - Name: retinanet_r101_fpn_mstrain_640-800_3x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r101_fpn_mstrain_3x_coco/retinanet_r101_fpn_mstrain_3x_coco_20210720_214650-7ee888e0.pth - - - Name: retinanet_x101_32x4d_fpn_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 82.64 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_1x_coco/retinanet_x101_32x4d_fpn_1x_coco_20200130-5c8b7ec4.pth - - - Name: retinanet_x101_32x4d_fpn_2x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 82.64 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_32x4d_fpn_2x_coco/retinanet_x101_32x4d_fpn_2x_coco_20200131-237fc5e1.pth - - - Name: retinanet_x101_64x4d_fpn_1x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py - Metadata: - Training Memory (GB): 10.0 - inference time (ms/im): - - value: 114.94 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_1x_coco/retinanet_x101_64x4d_fpn_1x_coco_20200130-366f5af1.pth - - - Name: retinanet_x101_64x4d_fpn_2x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py - Metadata: - Training Memory (GB): 10.0 - inference time (ms/im): - - value: 114.94 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_2x_coco/retinanet_x101_64x4d_fpn_2x_coco_20200131-bca068ab.pth - - - Name: retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco - In Collection: RetinaNet - Config: configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_x101_64x4d_fpn_mstrain_3x_coco/retinanet_x101_64x4d_fpn_mstrain_3x_coco_20210719_051838-022c2187.pth diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 56eaae200fb839eddabc95f18a7a6889cb830100..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './retinanet_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py deleted file mode 100644 index b87295e69d113105cb85d388e7cf5abc9f9af217..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_caffe_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' -# learning policy -model = dict( - pretrained='open-mmlab://detectron2/resnet101_caffe', - backbone=dict(depth=101)) -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_1x_coco.py deleted file mode 100644 index a7f06002413dcdf2716975655a582a3eefaf007a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_2x_coco.py deleted file mode 100644 index 721112a221953bb86dc3259e3991d7f0f740b26c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './retinanet_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py deleted file mode 100644 index 6bbcac4fa4f50f6e40372c672fdc6bd1075ec5c4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r101_fpn_mstrain_640-800_3x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' -] -# optimizer -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py deleted file mode 100644 index 01a35f23dcdddc0e48a5469561b2d40ed5ed1a3f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# data -data = dict(samples_per_gpu=8) - -# optimizer -model = dict( - backbone=dict( - depth=18, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), - neck=dict(in_channels=[64, 128, 256, 512])) - -# Note: If the learning rate is set to 0.0025, the mAP will be 32.4. -optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (1 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=8) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r18_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r18_fpn_1x_coco.py deleted file mode 100644 index 6197b32dd8db44987f0a4e891bfdcfee6bd7f906..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r18_fpn_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# optimizer -model = dict( - backbone=dict( - depth=18, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')), - neck=dict(in_channels=[64, 128, 256, 512])) -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=16) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 04c9af5898971b4a13c46d71362c111e8cabbbaf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,41 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py deleted file mode 100644 index 4d7b8f2bd04598d64f1cf24cfaf9c155f9b21e87..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py +++ /dev/null @@ -1,46 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py deleted file mode 100644 index eea9690eb159fe03865825bb9f9ca5fd6ff99d70..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py deleted file mode 100644 index 8057650736eaab0b7b01a7957339124f73d6d6b0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py' -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_1x_coco.py deleted file mode 100644 index 04bd696b9589e37ad34c9fdd035b97e271d3b214..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_2x_coco.py deleted file mode 100644 index 927915fa8c63d380cc4bd62a580ffaad8b1ce386..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_90k_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_90k_coco.py deleted file mode 100644 index ceda32798840bf653bf83ab506ddd80f59e3a355..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_90k_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = 'retinanet_r50_fpn_1x_coco.py' - -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[60000, 80000]) - -# Runner type -runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) - -checkpoint_config = dict(interval=10000) -evaluation = dict(interval=10000, metric='bbox') diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index 6b6cebe48a166155c24918d4504acebcd104d672..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py deleted file mode 100644 index 02a2c291631838781d63f06b286a4c5dd6a009fe..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_r50_fpn_mstrain_640-800_3x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' -] -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 765a4c2cc0f69bf13891bf371c94c17b6cd5f30c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py deleted file mode 100644 index 14de96faf70180d7828a670630a8f48a3cd1081d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_32x4d_fpn_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './retinanet_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 948cd18e4d995d18d947b345ba7229b5cad60eb1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py deleted file mode 100644 index ad04b6eea793add40c81d1d7096481597357d5bd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './retinanet_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py deleted file mode 100644 index f6ab512f182a003d246fc11b3caba0a0161d25d0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/retinanet/retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py' -] -# optimizer -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4)) -optimizer = dict(type='SGD', lr=0.01) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/README.md b/cv/detection/co-detr/pytorch/configs/rpn/README.md deleted file mode 100644 index 99addc0e799c4692e7d20842728cd8c9e741dc98..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# RPN - -> [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497) - - - -## Abstract - -State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. - -
- -
- -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | AR1000 | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | caffe | 1x | 3.5 | 22.6 | 58.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_caffe_fpn_1x_coco/rpn_r50_caffe_fpn_1x_coco_20200531-5b903a37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_caffe_fpn_1x_coco/rpn_r50_caffe_fpn_1x_coco_20200531_012334.log.json) | -| R-50-FPN | pytorch | 1x | 3.8 | 22.3 | 58.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_1x_coco/rpn_r50_fpn_1x_coco_20200218_151240.log.json) | -| R-50-FPN | pytorch | 2x | - | - | 58.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_2x_coco/rpn_r50_fpn_2x_coco_20200131-0728c9b3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r50_fpn_2x_coco/rpn_r50_fpn_2x_coco_20200131_190631.log.json) | -| R-101-FPN | caffe | 1x | 5.4 | 17.3 | 60.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_caffe_fpn_1x_coco/rpn_r101_caffe_fpn_1x_coco_20200531-0629a2e2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_caffe_fpn_1x_coco/rpn_r101_caffe_fpn_1x_coco_20200531_012345.log.json) | -| R-101-FPN | pytorch | 1x | 5.8 | 16.5 | 59.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_1x_coco/rpn_r101_fpn_1x_coco_20200131-2ace2249.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_1x_coco/rpn_r101_fpn_1x_coco_20200131_191000.log.json) | -| R-101-FPN | pytorch | 2x | - | - | 60.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_2x_coco/rpn_r101_fpn_2x_coco_20200131-24e3db1a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_r101_fpn_2x_coco/rpn_r101_fpn_2x_coco_20200131_191106.log.json) | -| X-101-32x4d-FPN | pytorch | 1x | 7.0 | 13.0 | 60.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_1x_coco/rpn_x101_32x4d_fpn_1x_coco_20200219-b02646c6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_1x_coco/rpn_x101_32x4d_fpn_1x_coco_20200219_012037.log.json) | -| X-101-32x4d-FPN | pytorch | 2x | - | - | 61.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_2x_coco/rpn_x101_32x4d_fpn_2x_coco_20200208-d22bd0bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_32x4d_fpn_2x_coco/rpn_x101_32x4d_fpn_2x_coco_20200208_200752.log.json) | -| X-101-64x4d-FPN | pytorch | 1x | 10.1 | 9.1 | 61.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_1x_coco/rpn_x101_64x4d_fpn_1x_coco_20200208-cde6f7dd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_1x_coco/rpn_x101_64x4d_fpn_1x_coco_20200208_200752.log.json) | -| X-101-64x4d-FPN | pytorch | 2x | - | - | 61.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_2x_coco/rpn_x101_64x4d_fpn_2x_coco_20200208-c65f524f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rpn/rpn_x101_64x4d_fpn_2x_coco/rpn_x101_64x4d_fpn_2x_coco_20200208_200752.log.json) | - -## Citation - -```latex -@inproceedings{ren2015faster, - title={Faster r-cnn: Towards real-time object detection with region proposal networks}, - author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian}, - booktitle={Advances in neural information processing systems}, - year={2015} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py deleted file mode 100644 index 27be94638a989f238972e85f9c14e1bcba0d09ac..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './rpn_r50_caffe_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet101_caffe'))) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_fpn_1x_coco.py deleted file mode 100644 index 962728ff08abb4652c617a085649575b6cfdcbf8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './rpn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_fpn_2x_coco.py deleted file mode 100644 index ac7671c1c2421c0caa7b42d012cc3a2edc068934..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r101_fpn_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './rpn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_caffe_c4_1x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_caffe_c4_1x_coco.py deleted file mode 100644 index 6da0ee94906fd8febaf69786976e478ef8f35c9e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_caffe_c4_1x_coco.py +++ /dev/null @@ -1,38 +0,0 @@ -_base_ = [ - '../_base_/models/rpn_r50_caffe_c4.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# dataset settings -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_label=False), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='proposal_fast') diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 68c36fa8caa0d0715128b02da03d14e7f5b27862..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,41 +0,0 @@ -_base_ = './rpn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe'))) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_label=False), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_fpn_1x_coco.py deleted file mode 100644 index 26f95a3402f9fd2d54c5919484e2f4958beb8a34..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_label=False), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes']), -] -data = dict(train=dict(pipeline=train_pipeline)) -evaluation = dict(interval=1, metric='proposal_fast') diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_fpn_2x_coco.py deleted file mode 100644 index 2f264bfe4234c870839ee77e3a671464aacc7813..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_r50_fpn_2x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './rpn_r50_fpn_1x_coco.py' - -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index d0c73948ac56afa34b9d6c8d22d6158271306b8c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './rpn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py deleted file mode 100644 index c6880b762abc8f5d3bf12f278054d76958756fb2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './rpn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index 96e691a912c424f09add038c75631a2e1fefeffc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './rpn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py deleted file mode 100644 index 4182a39667c47d774a1df9d34a1bc2fe60b45538..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/rpn/rpn_x101_64x4d_fpn_2x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './rpn_r50_fpn_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/README.md b/cv/detection/co-detr/pytorch/configs/sabl/README.md deleted file mode 100644 index 03992be4ce1eb5fa49dbfe6c909171b5aa5597eb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# SABL - -> [Side-Aware Boundary Localization for More Precise Object Detection](https://arxiv.org/abs/1912.04260) - - - -## Abstract - -Current object detection frameworks mainly rely on bounding box regression to localize objects. Despite the remarkable progress in recent years, the precision of bounding box regression remains unsatisfactory, hence limiting performance in object detection. We observe that precise localization requires careful placement of each side of the bounding box. However, the mainstream approach, which focuses on predicting centers and sizes, is not the most effective way to accomplish this task, especially when there exists displacements with large variance between the anchors and the targets. In this paper, we propose an alternative approach, named as Side-Aware Boundary Localization (SABL), where each side of the bounding box is respectively localized with a dedicated network branch. To tackle the difficulty of precise localization in the presence of displacements with large variance, we further propose a two-step localization scheme, which first predicts a range of movement through bucket prediction and then pinpoints the precise position within the predicted bucket. We test the proposed method on both two-stage and single-stage detection frameworks. Replacing the standard bounding box regression branch with the proposed design leads to significant improvements on Faster R-CNN, RetinaNet, and Cascade R-CNN, by 3.0%, 1.7%, and 0.9%, respectively. - -
- -
- -## Results and Models - -The results on COCO 2017 val is shown in the below table. (results on test-dev are usually slightly higher than val). -Single-scale testing (1333x800) is adopted in all results. - -| Method | Backbone | Lr schd | ms-train | box AP | Config | Download | -| :----------------: | :-------: | :-----: | :------: | :----: | :----------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| SABL Faster R-CNN | R-50-FPN | 1x | N | 39.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/20200830_130324.log.json) | -| SABL Faster R-CNN | R-101-FPN | 1x | N | 41.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/sabl_faster_rcnn_r101_fpn_1x_coco-f804c6c1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/20200830_183949.log.json) | -| SABL Cascade R-CNN | R-50-FPN | 1x | N | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/sabl_cascade_rcnn_r50_fpn_1x_coco-e1748e5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/20200831_033726.log.json) | -| SABL Cascade R-CNN | R-101-FPN | 1x | N | 43.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/sabl_cascade_rcnn_r101_fpn_1x_coco-2b83e87c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/20200831_141745.log.json) | - -| Method | Backbone | GN | Lr schd | ms-train | box AP | Config | Download | -| :------------: | :-------: | :-: | :-----: | :---------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| SABL RetinaNet | R-50-FPN | N | 1x | N | 37.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/20200830_053451.log.json) | -| SABL RetinaNet | R-50-FPN | Y | 1x | N | 38.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/sabl_retinanet_r50_fpn_gn_1x_coco-e16dfcf1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/20200831_141955.log.json) | -| SABL RetinaNet | R-101-FPN | N | 1x | N | 39.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/sabl_retinanet_r101_fpn_1x_coco-42026904.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/20200831_034256.log.json) | -| SABL RetinaNet | R-101-FPN | Y | 1x | N | 40.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/sabl_retinanet_r101_fpn_gn_1x_coco-40a893e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/20200830_201422.log.json) | -| SABL RetinaNet | R-101-FPN | Y | 2x | Y (640~800) | 42.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco-1e63382c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/20200830_144807.log.json) | -| SABL RetinaNet | R-101-FPN | Y | 2x | Y (480~960) | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco-5342f857.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/20200830_164537.log.json) | - -## Citation - -We provide config files to reproduce the object detection results in the ECCV 2020 Spotlight paper for [Side-Aware Boundary Localization for More Precise Object Detection](https://arxiv.org/abs/1912.04260). - -```latex -@inproceedings{Wang_2020_ECCV, - title = {Side-Aware Boundary Localization for More Precise Object Detection}, - author = {Jiaqi Wang and Wenwei Zhang and Yuhang Cao and Kai Chen and Jiangmiao Pang and Tao Gong and Jianping Shi and Chen Change Loy and Dahua Lin}, - booktitle = {ECCV}, - year = {2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/sabl/metafile.yml b/cv/detection/co-detr/pytorch/configs/sabl/metafile.yml deleted file mode 100644 index 23c51cffb574519f4983edc0b510b3dd7f5dd6fa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/metafile.yml +++ /dev/null @@ -1,140 +0,0 @@ -Collections: - - Name: SABL - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - ResNet - - SABL - Paper: - URL: https://arxiv.org/abs/1912.04260 - Title: 'Side-Aware Boundary Localization for More Precise Object Detection' - README: configs/sabl/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/roi_heads/bbox_heads/sabl_head.py#L14 - Version: v2.4.0 - -Models: - - Name: sabl_faster_rcnn_r50_fpn_1x_coco - In Collection: SABL - Config: configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth - - - Name: sabl_faster_rcnn_r101_fpn_1x_coco - In Collection: SABL - Config: configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/sabl_faster_rcnn_r101_fpn_1x_coco-f804c6c1.pth - - - Name: sabl_cascade_rcnn_r50_fpn_1x_coco - In Collection: SABL - Config: configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/sabl_cascade_rcnn_r50_fpn_1x_coco-e1748e5e.pth - - - Name: sabl_cascade_rcnn_r101_fpn_1x_coco - In Collection: SABL - Config: configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/sabl_cascade_rcnn_r101_fpn_1x_coco-2b83e87c.pth - - - Name: sabl_retinanet_r50_fpn_1x_coco - In Collection: SABL - Config: configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth - - - Name: sabl_retinanet_r50_fpn_gn_1x_coco - In Collection: SABL - Config: configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 38.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/sabl_retinanet_r50_fpn_gn_1x_coco-e16dfcf1.pth - - - Name: sabl_retinanet_r101_fpn_1x_coco - In Collection: SABL - Config: configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 39.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/sabl_retinanet_r101_fpn_1x_coco-42026904.pth - - - Name: sabl_retinanet_r101_fpn_gn_1x_coco - In Collection: SABL - Config: configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/sabl_retinanet_r101_fpn_gn_1x_coco-40a893e8.pth - - - Name: sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco - In Collection: SABL - Config: configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco-1e63382c.pth - - - Name: sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco - In Collection: SABL - Config: configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco-5342f857.pth diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index 64fe2304c0f34c366ff443d4531ae07c48d915d8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,90 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - roi_head=dict(bbox_head=[ - dict( - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, - loss_weight=1.0)), - dict( - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, - loss_weight=1.0)), - dict( - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) - ])) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 4b28a59280e6701d31afeeaae7ae12cdbd4fb95e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,86 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - roi_head=dict(bbox_head=[ - dict( - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, - loss_weight=1.0)), - dict( - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, - loss_weight=1.0)), - dict( - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0)) - ])) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py deleted file mode 100644 index e48d4259b78aa4494a9de1deabdf40c0d37d9816..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py +++ /dev/null @@ -1,38 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - roi_head=dict( - bbox_head=dict( - _delete_=True, - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, - loss_weight=1.0)))) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 732c7ba3f607e2ac68f16acceddd16b1269aa2cf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,34 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - roi_head=dict( - bbox_head=dict( - _delete_=True, - type='SABLHead', - num_classes=80, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, - loss_weight=1.0)))) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py deleted file mode 100644 index b08e916c9f9d158dd89a3a13418cc51bd25ef953..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py +++ /dev/null @@ -1,54 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - bbox_head=dict( - _delete_=True, - type='SABLRetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py deleted file mode 100644 index fc30d63dc58b44deda01790e6f432db0fe957a1e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py +++ /dev/null @@ -1,56 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - bbox_head=dict( - _delete_=True, - type='SABLRetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - norm_cfg=norm_cfg, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py deleted file mode 100644 index e8fe16646278fba3aba64742bb9912984720489b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py +++ /dev/null @@ -1,73 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] -# model settings -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - bbox_head=dict( - _delete_=True, - type='SABLRetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - norm_cfg=norm_cfg, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False)) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 480), (1333, 960)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py deleted file mode 100644 index 30c43399f7bf2ec1f67aee3265565a8067fe2b6a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py +++ /dev/null @@ -1,73 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] -# model settings -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - bbox_head=dict( - _delete_=True, - type='SABLRetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - norm_cfg=norm_cfg, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False)) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py deleted file mode 100644 index 6fe6bd660230eedf70f87072e5abec66036d865f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,50 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - bbox_head=dict( - _delete_=True, - type='SABLRetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py deleted file mode 100644 index 6acf080afe1b04e50467b16b60700feb5c12e886..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py +++ /dev/null @@ -1,52 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - bbox_head=dict( - _delete_=True, - type='SABLRetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - norm_cfg=norm_cfg, - bbox_coder=dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0.0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/scnet/README.md b/cv/detection/co-detr/pytorch/configs/scnet/README.md deleted file mode 100644 index 773874a2c933abb1961205855aefd82d64d2f908..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scnet/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# SCNet - -> [SCNet: Training Inference Sample Consistency for Instance Segmentation](https://arxiv.org/abs/2012.10150) - - - -## Abstract - - - -Cascaded architectures have brought significant performance improvement in object detection and instance segmentation. However, there are lingering issues regarding the disparity in the Intersection-over-Union (IoU) distribution of the samples between training and inference. This disparity can potentially exacerbate detection accuracy. This paper proposes an architecture referred to as Sample Consistency Network (SCNet) to ensure that the IoU distribution of the samples at training time is close to that at inference time. Furthermore, SCNet incorporates feature relay and utilizes global contextual information to further reinforce the reciprocal relationships among classifying, detecting, and segmenting sub-tasks. Extensive experiments on the standard COCO dataset reveal the effectiveness of the proposed method over multiple evaluation metrics, including box AP, mask AP, and inference speed. In particular, while running 38% faster, the proposed SCNet improves the AP of the box and mask predictions by respectively 1.3 and 2.3 points compared to the strong Cascade Mask R-CNN baseline. - -
- -
- -## Dataset - -SCNet requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. -The directory should be like this. - -```none -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── coco -│ │ ├── annotations -│ │ ├── train2017 -│ │ ├── val2017 -│ │ ├── test2017 -| | ├── stuffthingmaps -``` - -## Results and Models - -The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) - -| Backbone | Style | Lr schd | Mem (GB) | Inf speed (fps) | box AP | mask AP | TTA box AP | TTA mask AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :-------------: | :----: | :-----: | :--------: | :---------: | :------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-FPN | pytorch | 1x | 7.0 | 6.2 | 43.5 | 39.2 | 44.8 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco_20210117_192725.log.json) | -| R-50-FPN | pytorch | 20e | 7.0 | 6.2 | 44.5 | 40.0 | 45.8 | 41.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco_20210116_060148.log.json) | -| R-101-FPN | pytorch | 20e | 8.9 | 5.8 | 45.8 | 40.9 | 47.3 | 42.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco_20210118_175824.log.json) | -| X-101-64x4d-FPN | pytorch | 20e | 13.2 | 4.9 | 47.5 | 42.3 | 48.9 | 44.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco_20210120_045959.log.json) | - -### Notes - -- Training hyper-parameters are identical to those of [HTC](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc). -- TTA means Test Time Augmentation, which applies horizontal flip and multi-scale testing. Refer to [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scnet/scnet_r50_fpn_1x_coco.py). - -## Citation - -We provide the code for reproducing experiment results of [SCNet](https://arxiv.org/abs/2012.10150). - -```latex -@inproceedings{vu2019cascade, - title={SCNet: Training Inference Sample Consistency for Instance Segmentation}, - author={Vu, Thang and Haeyong, Kang and Yoo, Chang D}, - booktitle={AAAI}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/scnet/metafile.yml b/cv/detection/co-detr/pytorch/configs/scnet/metafile.yml deleted file mode 100644 index 15eaebfa80f5594357d825316e969e8afdfa9c1e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scnet/metafile.yml +++ /dev/null @@ -1,116 +0,0 @@ -Collections: - - Name: SCNet - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - ResNet - - SCNet - Paper: - URL: https://arxiv.org/abs/2012.10150 - Title: 'SCNet: Training Inference Sample Consistency for Instance Segmentation' - README: configs/scnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/scnet.py#L6 - Version: v2.9.0 - -Models: - - Name: scnet_r50_fpn_1x_coco - In Collection: SCNet - Config: configs/scnet/scnet_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 161.29 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_1x_coco/scnet_r50_fpn_1x_coco-c3f09857.pth - - - Name: scnet_r50_fpn_20e_coco - In Collection: SCNet - Config: configs/scnet/scnet_r50_fpn_20e_coco.py - Metadata: - Training Memory (GB): 7.0 - inference time (ms/im): - - value: 161.29 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r50_fpn_20e_coco/scnet_r50_fpn_20e_coco-a569f645.pth - - - Name: scnet_r101_fpn_20e_coco - In Collection: SCNet - Config: configs/scnet/scnet_r101_fpn_20e_coco.py - Metadata: - Training Memory (GB): 8.9 - inference time (ms/im): - - value: 172.41 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_r101_fpn_20e_coco/scnet_r101_fpn_20e_coco-294e312c.pth - - - Name: scnet_x101_64x4d_fpn_20e_coco - In Collection: SCNet - Config: configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py - Metadata: - Training Memory (GB): 13.2 - inference time (ms/im): - - value: 204.08 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (800, 1333) - Epochs: 20 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 42.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/scnet/scnet_x101_64x4d_fpn_20e_coco/scnet_x101_64x4d_fpn_20e_coco-fb09dec9.pth diff --git a/cv/detection/co-detr/pytorch/configs/scnet/scnet_r101_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/scnet/scnet_r101_fpn_20e_coco.py deleted file mode 100644 index ebba52978b23c07a68e3563033c860a95dd515b6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scnet/scnet_r101_fpn_20e_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './scnet_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/scnet/scnet_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/scnet/scnet_r50_fpn_1x_coco.py deleted file mode 100644 index fe03b0d4d7c4556a486a13a1d543a668b5d3fcab..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scnet/scnet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,136 +0,0 @@ -_base_ = '../htc/htc_r50_fpn_1x_coco.py' -# model settings -model = dict( - type='SCNet', - roi_head=dict( - _delete_=True, - type='SCNetRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='SCNetBBoxHead', - num_shared_fcs=2, - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='SCNetMaskHead', - num_convs=12, - in_channels=256, - conv_out_channels=256, - num_classes=80, - conv_to_res=True, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), - semantic_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[8]), - semantic_head=dict( - type='SCNetSemanticHead', - num_ins=5, - fusion_level=1, - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=183, - loss_seg=dict( - type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2), - conv_to_res=True), - glbctx_head=dict( - type='GlobalContextHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_weight=3.0, - conv_to_res=True), - feat_relay_head=dict( - type='FeatureRelayHead', - in_channels=1024, - out_conv_channels=256, - roi_feat_size=7, - scale_factor=2))) - -# uncomment below code to enable test time augmentations -# img_norm_cfg = dict( -# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# test_pipeline = [ -# dict(type='LoadImageFromFile'), -# dict( -# type='MultiScaleFlipAug', -# img_scale=[(600, 900), (800, 1200), (1000, 1500), (1200, 1800), -# (1400, 2100)], -# flip=True, -# transforms=[ -# dict(type='Resize', keep_ratio=True), -# dict(type='RandomFlip', flip_ratio=0.5), -# dict(type='Normalize', **img_norm_cfg), -# dict(type='Pad', size_divisor=32), -# dict(type='ImageToTensor', keys=['img']), -# dict(type='Collect', keys=['img']), -# ]) -# ] -# data = dict( -# val=dict(pipeline=test_pipeline), -# test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/scnet/scnet_r50_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/scnet/scnet_r50_fpn_20e_coco.py deleted file mode 100644 index 3b121a6a2836ac7626f7b383ada9508f8b9d972d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scnet/scnet_r50_fpn_20e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './scnet_r50_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py b/cv/detection/co-detr/pytorch/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py deleted file mode 100644 index 1e54b030fa68f76f22edf66e3594d66a13c2c672..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = './scnet_r50_fpn_20e_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/scnet/scnet_x101_64x4d_fpn_8x1_20e_coco.py b/cv/detection/co-detr/pytorch/configs/scnet/scnet_x101_64x4d_fpn_8x1_20e_coco.py deleted file mode 100644 index be8ddc51fefbbad4a57fa4beb80b54748c89bdf0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scnet/scnet_x101_64x4d_fpn_8x1_20e_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = './scnet_x101_64x4d_fpn_20e_coco.py' -data = dict(samples_per_gpu=1, workers_per_gpu=1) -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (1 samples per GPU) -auto_scale_lr = dict(base_batch_size=8) diff --git a/cv/detection/co-detr/pytorch/configs/scratch/README.md b/cv/detection/co-detr/pytorch/configs/scratch/README.md deleted file mode 100644 index 189f181dea459d5b1b9c32aafde405a74a39ac3d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scratch/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Scratch - -> [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883) - - - -## Abstract - -We report competitive results on object detection and instance segmentation on the COCO dataset using standard models trained from random initialization. The results are no worse than their ImageNet pre-training counterparts even when using the hyper-parameters of the baseline system (Mask R-CNN) that were optimized for fine-tuning pre-trained models, with the sole exception of increasing the number of training iterations so the randomly initialized models may converge. Training from random initialization is surprisingly robust; our results hold even when: (i) using only 10% of the training data, (ii) for deeper and wider models, and (iii) for multiple tasks and metrics. Experiments show that ImageNet pre-training speeds up convergence early in training, but does not necessarily provide regularization or improve final target task accuracy. To push the envelope we demonstrate 50.9 AP on COCO object detection without using any external data---a result on par with the top COCO 2017 competition results that used ImageNet pre-training. These observations challenge the conventional wisdom of ImageNet pre-training for dependent tasks and we expect these discoveries will encourage people to rethink the current de facto paradigm of \`pre-training and fine-tuning' in computer vision. - -
- -
- -## Results and Models - -| Model | Backbone | Style | Lr schd | box AP | mask AP | Config | Download | -| :----------: | :------: | :-----: | :-----: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Faster R-CNN | R-50-FPN | pytorch | 6x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_20200201_193013.log.json) | -| Mask R-CNN | R-50-FPN | pytorch | 6x | 41.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_20200201_193051.log.json) | - -Note: - -- The above models are trained with 16 GPUs. - -## Citation - -```latex -@article{he2018rethinking, - title={Rethinking imagenet pre-training}, - author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr}, - journal={arXiv preprint arXiv:1811.08883}, - year={2018} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py b/cv/detection/co-detr/pytorch/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py deleted file mode 100644 index 55aa3a6e73b9e56fb1d285272b3011fad8e9e11f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py +++ /dev/null @@ -1,24 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - frozen_stages=-1, - zero_init_residual=False, - norm_cfg=norm_cfg, - init_cfg=None), - neck=dict(norm_cfg=norm_cfg), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg))) -# optimizer -optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) -optimizer_config = dict(_delete_=True, grad_clip=None) -# learning policy -lr_config = dict(warmup_ratio=0.1, step=[65, 71]) -runner = dict(type='EpochBasedRunner', max_epochs=73) diff --git a/cv/detection/co-detr/pytorch/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py b/cv/detection/co-detr/pytorch/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py deleted file mode 100644 index cc52cb8f7618f57f280f4e5d640f99839bf66278..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) -model = dict( - backbone=dict( - frozen_stages=-1, - zero_init_residual=False, - norm_cfg=norm_cfg, - init_cfg=None), - neck=dict(norm_cfg=norm_cfg), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=norm_cfg), - mask_head=dict(norm_cfg=norm_cfg))) -# optimizer -optimizer = dict(paramwise_cfg=dict(norm_decay_mult=0)) -optimizer_config = dict(_delete_=True, grad_clip=None) -# learning policy -lr_config = dict(warmup_ratio=0.1, step=[65, 71]) -runner = dict(type='EpochBasedRunner', max_epochs=73) diff --git a/cv/detection/co-detr/pytorch/configs/scratch/metafile.yml b/cv/detection/co-detr/pytorch/configs/scratch/metafile.yml deleted file mode 100644 index 65025fac3927a138597e5947c1af0c213c3503fb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/scratch/metafile.yml +++ /dev/null @@ -1,48 +0,0 @@ -Collections: - - Name: Rethinking ImageNet Pre-training - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - RPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1811.08883 - Title: 'Rethinking ImageNet Pre-training' - README: configs/scratch/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py - Version: v2.0.0 - -Models: - - Name: faster_rcnn_r50_fpn_gn-all_scratch_6x_coco - In Collection: Rethinking ImageNet Pre-training - Config: configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py - Metadata: - Epochs: 72 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth - - - Name: mask_rcnn_r50_fpn_gn-all_scratch_6x_coco - In Collection: Rethinking ImageNet Pre-training - Config: configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py - Metadata: - Epochs: 72 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/README.md b/cv/detection/co-detr/pytorch/configs/seesaw_loss/README.md deleted file mode 100644 index 696b0080de2d1ed7827e646a7a5354ce45132010..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# Seesaw Loss - -> [Seesaw Loss for Long-Tailed Instance Segmentation](https://arxiv.org/abs/2008.10032) - - - -## Abstract - -Instance segmentation has witnessed a remarkable progress on class-balanced benchmarks. However, they fail to perform as accurately in real-world scenarios, where the category distribution of objects naturally comes with a long tail. Instances of head classes dominate a long-tailed dataset and they serve as negative samples of tail categories. The overwhelming gradients of negative samples on tail classes lead to a biased learning process for classifiers. Consequently, objects of tail categories are more likely to be misclassified as backgrounds or head categories. To tackle this problem, we propose Seesaw Loss to dynamically re-balance gradients of positive and negative samples for each category, with two complementary factors, i.e., mitigation factor and compensation factor. The mitigation factor reduces punishments to tail categories w.r.t. the ratio of cumulative training instances between different categories. Meanwhile, the compensation factor increases the penalty of misclassified instances to avoid false positives of tail categories. We conduct extensive experiments on Seesaw Loss with mainstream frameworks and different data sampling strategies. With a simple end-to-end training pipeline, Seesaw Loss obtains significant gains over Cross-Entropy Loss, and achieves state-of-the-art performance on LVIS dataset without bells and whistles. - -
- -
- -- Please setup [LVIS dataset](../lvis/README.md) for MMDetection. - -- RFS indicates to use oversample strategy [here](../../docs/tutorials/customize_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`. - -## Results and models of Seasaw Loss on LVIS v1 dataset - -| Method | Backbone | Style | Lr schd | Data Sampler | Norm Mask | box AP | mask AP | Config | Download | -| :----------------: | :-------: | :-----: | :-----: | :----------: | :-------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Mask R-CNN | R-50-FPN | pytorch | 2x | random | N | 25.6 | 25.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | -| Mask R-CNN | R-50-FPN | pytorch | 2x | random | Y | 25.6 | 25.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | -| Mask R-CNN | R-101-FPN | pytorch | 2x | random | N | 27.4 | 26.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | -| Mask R-CNN | R-101-FPN | pytorch | 2x | random | Y | 27.2 | 27.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | -| Mask R-CNN | R-50-FPN | pytorch | 2x | RFS | N | 27.6 | 26.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | -| Mask R-CNN | R-50-FPN | pytorch | 2x | RFS | Y | 27.6 | 26.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | -| Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | N | 28.9 | 27.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | -| Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | Y | 28.9 | 28.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | -| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | random | N | 33.1 | 29.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.log.json) | -| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | random | Y | 33.0 | 30.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | -| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | N | 30.0 | 29.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.log.json) | -| Cascade Mask R-CNN | R-101-FPN | pytorch | 2x | RFS | Y | 32.8 | 30.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.log.json) | - -## Citation - -We provide config files to reproduce the instance segmentation performance in the CVPR 2021 paper for [Seesaw Loss for Long-Tailed Instance Segmentation](https://arxiv.org/abs/2008.10032). - -```latex -@inproceedings{wang2021seesaw, - title={Seesaw Loss for Long-Tailed Instance Segmentation}, - author={Jiaqi Wang and Wenwei Zhang and Yuhang Zang and Yuhang Cao and Jiangmiao Pang and Tao Gong and Kai Chen and Ziwei Liu and Chen Change Loy and Dahua Lin}, - booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py deleted file mode 100644 index beeb0d1e5cd221c822641a1f64a4f27ad0cf25e5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,132 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - roi_head=dict( - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1203, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), - loss_cls=dict( - type='SeesawLoss', - p=0.8, - q=2.0, - num_classes=1203, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1203, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), - loss_cls=dict( - type='SeesawLoss', - p=0.8, - q=2.0, - num_classes=1203, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1203, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), - loss_cls=dict( - type='SeesawLoss', - p=0.8, - q=2.0, - num_classes=1203, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_head=dict(num_classes=1203)), - test_cfg=dict( - rcnn=dict( - score_thr=0.0001, - # LVIS allows up to 300 - max_per_img=300))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -dataset_type = 'LVISV1Dataset' -data_root = 'data/lvis_v1/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_train.json', - img_prefix=data_root, - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root, - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root, - pipeline=test_pipeline)) -evaluation = dict(interval=24, metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py deleted file mode 100644 index 0f299484940db4ee1a1edd55006e2e145d99af2b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501 -model = dict( - roi_head=dict( - mask_head=dict( - predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py deleted file mode 100644 index bb88750fe5f4d8cefb81222d65de6ce8e4c7dcc9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,98 +0,0 @@ -_base_ = [ - '../_base_/models/cascade_mask_rcnn_r50_fpn.py', - '../_base_/datasets/lvis_v1_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101')), - roi_head=dict( - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1203, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), - loss_cls=dict( - type='SeesawLoss', - p=0.8, - q=2.0, - num_classes=1203, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1203, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), - loss_cls=dict( - type='SeesawLoss', - p=0.8, - q=2.0, - num_classes=1203, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1203, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), - loss_cls=dict( - type='SeesawLoss', - p=0.8, - q=2.0, - num_classes=1203, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_head=dict(num_classes=1203)), - test_cfg=dict( - rcnn=dict( - score_thr=0.0001, - # LVIS allows up to 300 - max_per_img=300))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) -evaluation = dict(interval=24, metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py deleted file mode 100644 index 262e76bdd5e26091670f33534b43172e0664d3ba..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' # noqa: E501 -model = dict( - roi_head=dict( - mask_head=dict( - predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py deleted file mode 100644 index 57deab10863a0d375e4393e051abad96545c73d7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py deleted file mode 100644 index a539929252c0b760a13a208883b867d085ba8821..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py deleted file mode 100644 index 1f5065e799a90e1458da2db737bd496d9dc11349..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py deleted file mode 100644 index 13d0b5f2304fdc8af9d65cdb591c5dc6ee035097..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py deleted file mode 100644 index 743f5f2617d01639cbcf855abb59e9cd94ed3c8a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,75 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] -model = dict( - roi_head=dict( - bbox_head=dict( - num_classes=1203, - cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), - loss_cls=dict( - type='SeesawLoss', - p=0.8, - q=2.0, - num_classes=1203, - loss_weight=1.0)), - mask_head=dict(num_classes=1203)), - test_cfg=dict( - rcnn=dict( - score_thr=0.0001, - # LVIS allows up to 300 - max_per_img=300))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -dataset_type = 'LVISV1Dataset' -data_root = 'data/lvis_v1/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_train.json', - img_prefix=data_root, - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root, - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root, - pipeline=test_pipeline)) -evaluation = dict(interval=24, metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py deleted file mode 100644 index 0af89210777d31e1ebf8c2852669fd397ab8c8bc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' -model = dict( - roi_head=dict( - mask_head=dict( - predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py deleted file mode 100644 index 4fc15049c6c6184506095483c1c16aabc5e55328..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,41 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/lvis_v1_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] -model = dict( - roi_head=dict( - bbox_head=dict( - num_classes=1203, - cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), - loss_cls=dict( - type='SeesawLoss', - p=0.8, - q=2.0, - num_classes=1203, - loss_weight=1.0)), - mask_head=dict(num_classes=1203)), - test_cfg=dict( - rcnn=dict( - score_thr=0.0001, - # LVIS allows up to 300 - max_per_img=300))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) -evaluation = dict(interval=12, metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py b/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py deleted file mode 100644 index 0ef6bd2ce4301287cba1b48d89efbbcccecfe3bc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' -model = dict( - roi_head=dict( - mask_head=dict( - predictor_cfg=dict(type='NormedConv2d', tempearture=20)))) diff --git a/cv/detection/co-detr/pytorch/configs/seesaw_loss/metafile.yml b/cv/detection/co-detr/pytorch/configs/seesaw_loss/metafile.yml deleted file mode 100644 index 70dd2fe681ee2cbefeb2b6a52c6eb789c811af43..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/seesaw_loss/metafile.yml +++ /dev/null @@ -1,203 +0,0 @@ -Collections: - - Name: Seesaw Loss - Metadata: - Training Data: LVIS - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Softmax - - RPN - - Convolution - - Dense Connections - - FPN - - ResNet - - RoIAlign - - Seesaw Loss - Paper: - URL: https://arxiv.org/abs/2008.10032 - Title: 'Seesaw Loss for Long-Tailed Instance Segmentation' - README: configs/seesaw_loss/README.md - -Models: - - Name: mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 25.6 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 25.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth - - Name: mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 25.6 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 25.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth - - Name: mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 27.4 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 26.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth - - Name: mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 27.2 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 27.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth - - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 27.6 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 26.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth - - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 27.6 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 26.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth - - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 28.9 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 27.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth - - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 28.9 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 28.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth - - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 33.1 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 29.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth - - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 33.0 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 30.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth - - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 30.0 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 29.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth - - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 - In Collection: Seesaw Loss - Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: LVIS v1 - Metrics: - box AP: 32.8 - - Task: Instance Segmentation - Dataset: LVIS v1 - Metrics: - mask AP: 30.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth diff --git a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/README.md b/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/README.md deleted file mode 100644 index 9bd92cb69e3ed32f7de75fbe38447f203c8eca41..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/README.md +++ /dev/null @@ -1,109 +0,0 @@ -# Backbones Trained by Self-Supervise Algorithms - - - -## Abstract - -Unsupervised image representations have significantly reduced the gap with supervised pretraining, notably with the recent achievements of contrastive learning methods. These contrastive methods typically work online and rely on a large number of explicit pairwise feature comparisons, which is computationally challenging. In this paper, we propose an online algorithm, SwAV, that takes advantage of contrastive methods without requiring to compute pairwise comparisons. Specifically, our method simultaneously clusters the data while enforcing consistency between cluster assignments produced for different augmentations (or views) of the same image, instead of comparing features directly as in contrastive learning. Simply put, we use a swapped prediction mechanism where we predict the cluster assignment of a view from the representation of another view. Our method can be trained with large and small batches and can scale to unlimited amounts of data. Compared to previous contrastive methods, our method is more memory efficient since it does not require a large memory bank or a special momentum network. In addition, we also propose a new data augmentation strategy, multi-crop, that uses a mix of views with different resolutions in place of two full-resolution views, without increasing the memory or compute requirements much. We validate our findings by achieving 75.3% top-1 accuracy on ImageNet with ResNet-50, as well as surpassing supervised pretraining on all the considered transfer tasks. - -
- -
- -We present Momentum Contrast (MoCo) for unsupervised visual representation learning. From a perspective on contrastive learning as dictionary look-up, we build a dynamic dictionary with a queue and a moving-averaged encoder. This enables building a large and consistent dictionary on-the-fly that facilitates contrastive unsupervised learning. MoCo provides competitive results under the common linear protocol on ImageNet classification. More importantly, the representations learned by MoCo transfer well to downstream tasks. MoCo can outperform its supervised pre-training counterpart in 7 detection/segmentation tasks on PASCAL VOC, COCO, and other datasets, sometimes surpassing it by large margins. This suggests that the gap between unsupervised and supervised representation learning has been largely closed in many vision tasks. - -
- -
- -## Usage - -To use a self-supervisely pretrained backbone, there are two steps to do: - -1. Download and convert the model to PyTorch-style supported by MMDetection -2. Modify the config and change the training setting accordingly - -### Convert model - -For more general usage, we also provide script `selfsup2mmdet.py` in the tools directory to convert the key of models pretrained by different self-supervised methods to PyTorch-style checkpoints used in MMDetection. - -```bash -python -u tools/model_converters/selfsup2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} --selfsup ${method} -``` - -This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. - -For example, to use a ResNet-50 backbone released by MoCo, you can download it from [here](https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v2_800ep/moco_v2_800ep_pretrain.pth.tar) and use the following command - -```bash -python -u tools/model_converters/selfsup2mmdet.py ./moco_v2_800ep_pretrain.pth.tar mocov2_r50_800ep_pretrain.pth --selfsup moco -``` - -To use the ResNet-50 backbone released by SwAV, you can download it from [here](https://dl.fbaipublicfiles.com/deepcluster/swav_800ep_pretrain.pth.tar) - -### Modify config - -The backbone requires SyncBN and the `frozen_stages` need to be changed. A config that use the moco backbone is as below - -```python -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - pretrained='./mocov2_r50_800ep_pretrain.pth', - backbone=dict( - frozen_stages=0, - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False)) - -``` - -## Results and Models - -| Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :-------: | :-----------------------------------------------------------------: | :-----: | :------------: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Mask RCNN | [R50 by MoCo v2](./mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py) | pytorch | 1x | | | 38.0 | 34.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco_20210604_114614-a8b63483.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco_20210604_114614.log.json) | -| Mask RCNN | [R50 by MoCo v2](./mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py) | pytorch | multi-scale 2x | | | 40.8 | 36.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco_20210605_163717-d95df20a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco_20210605_163717.log.json) | -| Mask RCNN | [R50 by SwAV](./mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py) | pytorch | 1x | | | 39.1 | 35.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco/mask_rcnn_r50_fpn_swav-pretrain_1x_coco_20210604_114640-7b9baf28.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco/mask_rcnn_r50_fpn_swav-pretrain_1x_coco_20210604_114640.log.json) | -| Mask RCNN | [R50 by SwAV](./mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py) | pytorch | multi-scale 2x | | | 41.3 | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco_20210605_163717-08e26fca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco_20210605_163717.log.json) | - -### Notice - -1. We only provide single-scale 1x and multi-scale 2x configs as examples to show how to use backbones trained by self-supervised algorithms. We will try to reproduce the results in their corresponding paper using the released backbone in the future. Please stay tuned. - -## Citation - -We support to apply the backbone models pre-trained by different self-supervised methods in detection systems and provide their results on Mask R-CNN. - -The pre-trained models are converted from [MoCo](https://github.com/facebookresearch/moco) and downloaded from [SwAV](https://github.com/facebookresearch/swav). - -For SwAV, please cite - -```latex -@article{caron2020unsupervised, - title={Unsupervised Learning of Visual Features by Contrasting Cluster Assignments}, - author={Caron, Mathilde and Misra, Ishan and Mairal, Julien and Goyal, Priya and Bojanowski, Piotr and Joulin, Armand}, - booktitle={Proceedings of Advances in Neural Information Processing Systems (NeurIPS)}, - year={2020} -} -``` - -For MoCo, please cite - -```latex -@Article{he2019moco, - author = {Kaiming He and Haoqi Fan and Yuxin Wu and Saining Xie and Ross Girshick}, - title = {Momentum Contrast for Unsupervised Visual Representation Learning}, - journal = {arXiv preprint arXiv:1911.05722}, - year = {2019}, -} -@Article{chen2020mocov2, - author = {Xinlei Chen and Haoqi Fan and Ross Girshick and Kaiming He}, - title = {Improved Baselines with Momentum Contrastive Learning}, - journal = {arXiv preprint arXiv:2003.04297}, - year = {2020}, -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py deleted file mode 100644 index f1e061524e656409e37d3ae80b290c368a47d6a6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - frozen_stages=0, - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - init_cfg=dict( - type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) diff --git a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py b/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py deleted file mode 100644 index 09aa15608decb610a2f0b1181e50cbe1b8c6387a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py +++ /dev/null @@ -1,32 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - frozen_stages=0, - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - init_cfg=dict( - type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) -] - -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py deleted file mode 100644 index f92a3453dd1d5e8460a4279764845ce3e9c3ed81..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - frozen_stages=0, - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - init_cfg=dict( - type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) diff --git a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py b/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py deleted file mode 100644 index fe473613492b5388ceb50b1669317539360b8e2f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py +++ /dev/null @@ -1,32 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] - -model = dict( - backbone=dict( - frozen_stages=0, - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - init_cfg=dict( - type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) -] - -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/README.md b/cv/detection/co-detr/pytorch/configs/simple_copy_paste/README.md deleted file mode 100644 index 46162aa51244d6080f8f50d5d76218de682eb85f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# SimpleCopyPaste - -> [Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation](https://arxiv.org/abs/2012.07177) - - - -## Abstract - -Building instance segmentation models that are data-efficient and can handle rare object categories is an important challenge in computer vision. Leveraging data augmentations is a promising direction towards addressing this challenge. Here, we perform a systematic study of the Copy-Paste augmentation (\[13, 12\]) for instance segmentation where we randomly paste objects onto an image. Prior studies on Copy-Paste relied on modeling the surrounding visual context for pasting the objects. However, we find that the simple mechanism of pasting objects randomly is good enough and can provide solid gains on top of strong baselines. Furthermore, we show Copy-Paste is additive with semi-supervised methods that leverage extra data through pseudo labeling (e.g. self-training). On COCO instance segmentation, we achieve 49.1 mask AP and 57.3 box AP, an improvement of +0.6 mask AP and +1.5 box AP over the previous state-of-the-art. We further demonstrate that Copy-Paste can lead to significant improvements on the LVIS benchmark. Our baseline model outperforms the LVIS 2020 Challenge winning entry by +3.6 mask AP on rare categories. - -
- -
- -## Results and Models - -### Mask R-CNN with Standard Scale Jittering (SSJ) and Simple Copy-Paste(SCP) - -Standard Scale Jittering(SSJ) resizes and crops an image with a resize range of 0.8 to 1.25 of the original image size, and Simple Copy-Paste(SCP) selects a random subset of objects from one of the images and pastes them onto the other image. - -| Backbone | Training schedule | Augmentation | batch size | box AP | mask AP | Config | Download | -| :------: | :---------------: | :----------: | :--------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | 90k | SSJ | 64 | 43.3 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409.log.json) | -| R-50 | 90k | SSJ+SCP | 64 | 43.8 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307.log.json) | -| R-50 | 270k | SSJ | 64 | 43.5 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940.log.json) | -| R-50 | 270k | SSJ+SCP | 64 | 45.1 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229.log.json) | - -## Citation - -```latex -@inproceedings{ghiasi2021simple, - title={Simple copy-paste is a strong data augmentation method for instance segmentation}, - author={Ghiasi, Golnaz and Cui, Yin and Srinivas, Aravind and Qian, Rui and Lin, Tsung-Yi and Cubuk, Ekin D and Le, Quoc V and Zoph, Barret}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={2918--2928}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py b/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py deleted file mode 100644 index d0ce9176a7381c392e7e5f9deb15bbed52dc2c66..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py +++ /dev/null @@ -1,20 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs - '../common/ssj_270k_coco_instance.py', -] - -norm_cfg = dict(type='SyncBN', requires_grad=True) -# Use MMSyncBN that handles empty tensor in head. It can be changed to -# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed. -head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) -model = dict( - backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg), - neck=dict(norm_cfg=norm_cfg), - rpn_head=dict(num_convs=2), # leads to 0.1+ mAP - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=head_norm_cfg), - mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py b/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py deleted file mode 100644 index 1eee95fe00b1c1c2c1f260255abab651aad95716..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py' - -# lr steps at [0.9, 0.95, 0.975] of the maximum iterations -lr_config = dict( - warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750]) -# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs -runner = dict(type='IterBasedRunner', max_iters=90000) diff --git a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py b/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py deleted file mode 100644 index bd28dddaf5c57a47e790f907ea683739640851ed..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py +++ /dev/null @@ -1,20 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs - '../common/ssj_scp_270k_coco_instance.py' -] - -norm_cfg = dict(type='SyncBN', requires_grad=True) -# Use MMSyncBN that handles empty tensor in head. It can be changed to -# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed. -head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) -model = dict( - backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg), - neck=dict(norm_cfg=norm_cfg), - rpn_head=dict(num_convs=2), # leads to 0.1+ mAP - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=head_norm_cfg), - mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py b/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py deleted file mode 100644 index b632c13a62a0276fd81ce4076eb8f9754654e41a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py' - -# lr steps at [0.9, 0.95, 0.975] of the maximum iterations -lr_config = dict( - warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750]) -# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs -runner = dict(type='IterBasedRunner', max_iters=90000) diff --git a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/metafile.yml b/cv/detection/co-detr/pytorch/configs/simple_copy_paste/metafile.yml deleted file mode 100644 index bb6106cfa1f8e393df2494ba44d6726e65ca487e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/simple_copy_paste/metafile.yml +++ /dev/null @@ -1,92 +0,0 @@ -Collections: - - Name: SimpleCopyPaste - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 32x A100 GPUs - Architecture: - - Softmax - - RPN - - Convolution - - Dense Connections - - FPN - - ResNet - - RoIAlign - Paper: - URL: https://arxiv.org/abs/2012.07177 - Title: "Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" - README: configs/simple_copy_paste/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/datasets/pipelines/transforms.py#L2762 - Version: v2.25.0 - -Models: - - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco - In Collection: SimpleCopyPaste - Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py - Metadata: - Training Memory (GB): 7.2 - Iterations: 270000 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.5 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth - - - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco - In Collection: SimpleCopyPaste - Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py - Metadata: - Training Memory (GB): 7.2 - Iterations: 90000 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.3 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth - - - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco - In Collection: SimpleCopyPaste - Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py - Metadata: - Training Memory (GB): 7.2 - Iterations: 270000 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.1 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 40.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth - - - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco - In Collection: SimpleCopyPaste - Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py - Metadata: - Training Memory (GB): 7.2 - Iterations: 90000 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.8 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth diff --git a/cv/detection/co-detr/pytorch/configs/solo/README.md b/cv/detection/co-detr/pytorch/configs/solo/README.md deleted file mode 100644 index 4a36676b1b5e0fafd3bfb1cbe4a6cef5fd549c57..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solo/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# SOLO - -> [SOLO: Segmenting Objects by Locations](https://arxiv.org/abs/1912.04488) - - - -## Abstract - -We present a new, embarrassingly simple approach to instance segmentation in images. Compared to many other dense prediction tasks, e.g., semantic segmentation, it is the arbitrary number of instances that have made instance segmentation much more challenging. In order to predict a mask for each instance, mainstream approaches either follow the 'detect-thensegment' strategy as used by Mask R-CNN, or predict category masks first then use clustering techniques to group pixels into individual instances. We view the task of instance segmentation from a completely new perspective by introducing the notion of "instance categories", which assigns categories to each pixel within an instance according to the instance's location and size, thus nicely converting instance mask segmentation into a classification-solvable problem. Now instance segmentation is decomposed into two classification tasks. We demonstrate a much simpler and flexible instance segmentation framework with strong performance, achieving on par accuracy with Mask R-CNN and outperforming recent singleshot instance segmenters in accuracy. We hope that this very simple and strong framework can serve as a baseline for many instance-level recognition tasks besides instance segmentation. - -
- -
- -## Results and Models - -### SOLO - -| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | -| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | N | 1x | 8.0 | 14.0 | 33.1 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055.log.json) | -| R-50 | pytorch | Y | 3x | 7.4 | 14.0 | 35.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353.log.json) | - -### Decoupled SOLO - -| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | -| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | N | 1x | 7.8 | 12.5 | 33.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348.log.json) | -| R-50 | pytorch | Y | 3x | 7.9 | 12.5 | 36.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504.log.json) | - -- Decoupled SOLO has a decoupled head which is different from SOLO head. - Decoupled SOLO serves as an efficient and equivalent variant in accuracy - of SOLO. Please refer to the corresponding config files for details. - -### Decoupled Light SOLO - -| Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | -| :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | Y | 3x | 2.2 | 31.2 | 32.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703.log.json) | - -- Decoupled Light SOLO using decoupled structure similar to Decoupled - SOLO head, with light-weight head and smaller input size, Please refer - to the corresponding config files for details. - -## Citation - -```latex -@inproceedings{wang2020solo, - title = {{SOLO}: Segmenting Objects by Locations}, - author = {Wang, Xinlong and Kong, Tao and Shen, Chunhua and Jiang, Yuning and Li, Lei}, - booktitle = {Proc. Eur. Conf. Computer Vision (ECCV)}, - year = {2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py deleted file mode 100644 index 101f8f1d376f8a574cc2b35d17498097891fa2c7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py +++ /dev/null @@ -1,63 +0,0 @@ -_base_ = './decoupled_solo_r50_fpn_3x_coco.py' - -# model settings -model = dict( - mask_head=dict( - type='DecoupledSOLOLightHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - strides=[8, 8, 16, 32, 32], - scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), - pos_scale=0.2, - num_grids=[40, 36, 24, 16, 12], - cls_down_index=0, - loss_mask=dict( - type='DiceLoss', use_sigmoid=True, activate=False, - loss_weight=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384), - (852, 352)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(852, 512), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_r50_fpn_1x_coco.py deleted file mode 100644 index b611cdf4d05fde5f76901c85b7ba55405a5190d0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_r50_fpn_1x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = [ - './solo_r50_fpn_1x_coco.py', -] -# model settings -model = dict( - mask_head=dict( - type='DecoupledSOLOHead', - num_classes=80, - in_channels=256, - stacked_convs=7, - feat_channels=256, - strides=[8, 8, 16, 32, 32], - scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), - pos_scale=0.2, - num_grids=[40, 36, 24, 16, 12], - cls_down_index=0, - loss_mask=dict( - type='DiceLoss', use_sigmoid=True, activate=False, - loss_weight=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) - -optimizer = dict(type='SGD', lr=0.01) diff --git a/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_r50_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_r50_fpn_3x_coco.py deleted file mode 100644 index 4a8c19decb72a3d904a277faac06670999f6b322..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solo/decoupled_solo_r50_fpn_3x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = './solo_r50_fpn_3x_coco.py' - -# model settings -model = dict( - mask_head=dict( - type='DecoupledSOLOHead', - num_classes=80, - in_channels=256, - stacked_convs=7, - feat_channels=256, - strides=[8, 8, 16, 32, 32], - scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), - pos_scale=0.2, - num_grids=[40, 36, 24, 16, 12], - cls_down_index=0, - loss_mask=dict( - type='DiceLoss', use_sigmoid=True, activate=False, - loss_weight=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) diff --git a/cv/detection/co-detr/pytorch/configs/solo/metafile.yml b/cv/detection/co-detr/pytorch/configs/solo/metafile.yml deleted file mode 100644 index b6244e80f1a53503d36b20d3d589476ad3930a42..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solo/metafile.yml +++ /dev/null @@ -1,115 +0,0 @@ -Collections: - - Name: SOLO - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - Convolution - - ResNet - Paper: https://arxiv.org/abs/1912.04488 - README: configs/solo/README.md - -Models: - - Name: decoupled_solo_r50_fpn_1x_coco - In Collection: SOLO - Config: configs/solo/decoupled_solo_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.8 - Epochs: 12 - inference time (ms/im): - - value: 116.4 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (1333, 800) - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 33.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth - - - Name: decoupled_solo_r50_fpn_3x_coco - In Collection: SOLO - Config: configs/solo/decoupled_solo_r50_fpn_3x_coco.py - Metadata: - Training Memory (GB): 7.9 - Epochs: 36 - inference time (ms/im): - - value: 117.2 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (1333, 800) - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 36.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth - - - Name: decoupled_solo_light_r50_fpn_3x_coco - In Collection: SOLO - Config: configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py - Metadata: - Training Memory (GB): 2.2 - Epochs: 36 - inference time (ms/im): - - value: 35.0 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (852, 512) - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 32.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth - - - Name: solo_r50_fpn_3x_coco - In Collection: SOLO - Config: configs/solo/solo_r50_fpn_3x_coco.py - Metadata: - Training Memory (GB): 7.4 - Epochs: 36 - inference time (ms/im): - - value: 94.2 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (1333, 800) - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 35.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth - - - Name: solo_r50_fpn_1x_coco - In Collection: SOLO - Config: configs/solo/solo_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 8.0 - Epochs: 12 - inference time (ms/im): - - value: 95.1 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (1333, 800) - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 33.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth diff --git a/cv/detection/co-detr/pytorch/configs/solo/solo_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/solo/solo_r50_fpn_1x_coco.py deleted file mode 100644 index 9093a50480096a22def41e36a9793bf765df56cc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solo/solo_r50_fpn_1x_coco.py +++ /dev/null @@ -1,53 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# model settings -model = dict( - type='SOLO', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=0, - num_outs=5), - mask_head=dict( - type='SOLOHead', - num_classes=80, - in_channels=256, - stacked_convs=7, - feat_channels=256, - strides=[8, 8, 16, 32, 32], - scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), - pos_scale=0.2, - num_grids=[40, 36, 24, 16, 12], - cls_down_index=0, - loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), - # model training and testing settings - test_cfg=dict( - nms_pre=500, - score_thr=0.1, - mask_thr=0.5, - filter_thr=0.05, - kernel='gaussian', # gaussian/linear - sigma=2.0, - max_per_img=100)) - -# optimizer -optimizer = dict(type='SGD', lr=0.01) diff --git a/cv/detection/co-detr/pytorch/configs/solo/solo_r50_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solo/solo_r50_fpn_3x_coco.py deleted file mode 100644 index 52302cdf9dea18ef511eb854fcd3e88f22b0ed44..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solo/solo_r50_fpn_3x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = './solo_r50_fpn_1x_coco.py' - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), - (1333, 672), (1333, 640)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[27, 33]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/README.md b/cv/detection/co-detr/pytorch/configs/solov2/README.md deleted file mode 100644 index 2ffe70f34c0820e9b3ac4222cfbbb2ea8ca4f835..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# SOLOv2 - -> [SOLOv2: Dynamic and Fast Instance Segmentation](https://arxiv.org/abs/2003.10152) - - - -## Abstract - -In this work, we aim at building a simple, direct, and fast instance segmentation -framework with strong performance. We follow the principle of the SOLO method of -Wang et al. "SOLO: segmenting objects by locations". Importantly, we take one -step further by dynamically learning the mask head of the object segmenter such -that the mask head is conditioned on the location. Specifically, the mask branch -is decoupled into a mask kernel branch and mask feature branch, which are -responsible for learning the convolution kernel and the convolved features -respectively. Moreover, we propose Matrix NMS (non maximum suppression) to -significantly reduce the inference time overhead due to NMS of masks. Our -Matrix NMS performs NMS with parallel matrix operations in one shot, and -yields better results. We demonstrate a simple direct instance segmentation -system, outperforming a few state-of-the-art methods in both speed and accuracy. -A light-weight version of SOLOv2 executes at 31.3 FPS and yields 37.1% AP. -Moreover, our state-of-the-art results in object detection (from our mask byproduct) -and panoptic segmentation show the potential to serve as a new strong baseline -for many instance-level recognition tasks besides instance segmentation. - -
- -
- -## Results and Models - -### SOLOv2 - -| Backbone | Style | MS train | Lr schd | Mem (GB) | mask AP | Config | Download | -| :--------: | :-----: | :------: | :-----: | :------: | :-----: | :-----------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | N | 1x | 5.1 | 34.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858.log.json) | -| R-50 | pytorch | Y | 3x | 5.1 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r50_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856.log.json) | -| R-101 | pytorch | Y | 3x | 6.9 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r101_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119.log.json) | -| R-101(DCN) | pytorch | Y | 3x | 7.1 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734.log.json) | -| X-101(DCN) | pytorch | Y | 3x | 11.3 | 42.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337.log.json) | - -### Light SOLOv2 - -| Backbone | Style | MS train | Lr schd | Mem (GB) | mask AP | Config | Download | -| :------: | :-----: | :------: | :-----: | :------: | :-----: | :------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-18 | pytorch | Y | 3x | 9.1 | 29.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r18_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717.log.json) | -| R-34 | pytorch | Y | 3x | 9.3 | 31.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r34_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839-e51659d3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839.log.json) | -| R-50 | pytorch | Y | 3x | 9.9 | 33.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r50_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256.log.json) | - -## Citation - -```latex -@article{wang2020solov2, - title={SOLOv2: Dynamic and Fast Instance Segmentation}, - author={Wang, Xinlong and Zhang, Rufeng and Kong, Tao and Li, Lei and Shen, Chunhua}, - journal={Proc. Advances in Neural Information Processing Systems (NeurIPS)}, - year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/solov2/metafile.yml b/cv/detection/co-detr/pytorch/configs/solov2/metafile.yml deleted file mode 100644 index 656f66f5c726a7f59777ad3e24127de2d488fe5f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/metafile.yml +++ /dev/null @@ -1,119 +0,0 @@ -Collections: - - Name: SOLOv2 - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x A100 GPUs - Architecture: - - FPN - - Convolution - - ResNet - Paper: https://arxiv.org/abs/2003.10152 - README: configs/solov2/README.md - -Models: - - Name: solov2_r50_fpn_1x_coco - In Collection: SOLOv2 - Config: configs/solov2/solov2_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 5.1 - Epochs: 12 - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 34.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth - - - Name: solov2_r50_fpn_3x_coco - In Collection: SOLOv2 - Config: configs/solov2/solov2_r50_fpn_3x_coco.py - Metadata: - Training Memory (GB): 5.1 - Epochs: 36 - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth - - - Name: solov2_r101_fpn_3x_coco - In Collection: SOLOv2 - Config: configs/solov2/solov2_r101_fpn_3x_coco.py - Metadata: - Training Memory (GB): 6.9 - Epochs: 36 - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth - - - Name: solov2_r101_dcn_fpn_3x_coco - In Collection: SOLOv2 - Config: configs/solov2/solov2_r101_dcn_fpn_3x_coco.py - Metadata: - Training Memory (GB): 7.1 - Epochs: 36 - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth - - - Name: solov2_x101_dcn_fpn_3x_coco - In Collection: SOLOv2 - Config: configs/solov2/solov2_x101_dcn_fpn_3x_coco.py - Metadata: - Training Memory (GB): 11.3 - Epochs: 36 - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 42.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth - - - Name: solov2_light_r18_fpn_3x_coco - In Collection: SOLOv2 - Config: configs/solov2/solov2_light_r18_fpn_3x_coco.py - Metadata: - Training Memory (GB): 9.1 - Epochs: 36 - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 29.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth - - - Name: solov2_light_r34_fpn_3x_coco - In Collection: SOLOv2 - Config: configs/solov2/solov2_light_r34_fpn_3x_coco.py - Metadata: - Training Memory (GB): 9.3 - Epochs: 36 - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 31.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839-e51659d3.pth - - - Name: solov2_light_r50_fpn_3x_coco - In Collection: SOLOv2 - Config: configs/solov2/solov2_light_r50_fpn_3x_coco.py - Metadata: - Training Memory (GB): 9.9 - Epochs: 36 - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 33.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r18_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r18_fpn_3x_coco.py deleted file mode 100644 index 6fb33b00b5990617ae27184df89f2a16ffd7adb9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r18_fpn_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = 'solov2_light_r50_fpn_3x_coco.py' - -# model settings -model = dict( - backbone=dict( - depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')), - neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r34_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r34_fpn_3x_coco.py deleted file mode 100644 index ea082a105d9d61f8ba8043adfd9644316382243e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r34_fpn_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = 'solov2_light_r50_fpn_3x_coco.py' - -# model settings -model = dict( - backbone=dict( - depth=34, init_cfg=dict(checkpoint='torchvision://resnet34')), - neck=dict(in_channels=[64, 128, 256, 512])) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r50_dcn_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r50_dcn_fpn_3x_coco.py deleted file mode 100644 index 4d758e238a56ba7c19f33ce74b239bf00c93b50f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r50_dcn_fpn_3x_coco.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = 'solov2_r50_fpn_3x_coco.py' - -# model settings -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True)), - mask_head=dict( - feat_channels=256, - stacked_convs=3, - scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), - mask_feature_head=dict(out_channels=128), - dcn_cfg=dict(type='DCNv2'), - dcn_apply_to_all_conv=False)) # light solov2 head - -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[27, 33]) -runner = dict(type='EpochBasedRunner', max_epochs=36) - -# data -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), - (768, 352)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(448, 768), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r50_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r50_fpn_3x_coco.py deleted file mode 100644 index e08f1db3a24e5ce1b16ea4fcb03d73a4763d6310..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_light_r50_fpn_3x_coco.py +++ /dev/null @@ -1,57 +0,0 @@ -_base_ = 'solov2_r50_fpn_1x_coco.py' - -# model settings -model = dict( - mask_head=dict( - stacked_convs=2, - feat_channels=256, - scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)), - mask_feature_head=dict(out_channels=128))) - -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[27, 33]) -runner = dict(type='EpochBasedRunner', max_epochs=36) - -# data -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), - (768, 352)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(448, 768), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py deleted file mode 100644 index 159411806ea97497d4a3d08b781a8f5ddf8271ed..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = 'solov2_r50_fpn_3x_coco.py' - -# model settings -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(checkpoint='torchvision://resnet101'), - dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True)), - mask_head=dict( - mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), - dcn_cfg=dict(type='DCNv2'), - dcn_apply_to_all_conv=True)) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_r101_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_r101_fpn_3x_coco.py deleted file mode 100644 index 6c248e5203a1d0fb7cb3ed9ac4eea26ed7ed621c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_r101_fpn_3x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = 'solov2_r50_fpn_3x_coco.py' - -# model settings -model = dict( - backbone=dict( - depth=101, init_cfg=dict(checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_r50_fpn_1x_coco.py deleted file mode 100644 index 9aee571bf31ad14c426c9b05e93da0000f39cb8c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_r50_fpn_1x_coco.py +++ /dev/null @@ -1,61 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# model settings -model = dict( - type='SOLOv2', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=0, - num_outs=5), - mask_head=dict( - type='SOLOV2Head', - num_classes=80, - in_channels=256, - feat_channels=512, - stacked_convs=4, - strides=[8, 8, 16, 32, 32], - scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), - pos_scale=0.2, - num_grids=[40, 36, 24, 16, 12], - cls_down_index=0, - mask_feature_head=dict( - feat_channels=128, - start_level=0, - end_level=3, - out_channels=256, - mask_stride=4, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), - loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), - # model training and testing settings - test_cfg=dict( - nms_pre=500, - score_thr=0.1, - mask_thr=0.5, - filter_thr=0.05, - kernel='gaussian', # gaussian/linear - sigma=2.0, - max_per_img=100)) - -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_r50_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_r50_fpn_3x_coco.py deleted file mode 100644 index 640c730edb06ac0f67bee6c0df12e7ec64ae0c40..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_r50_fpn_3x_coco.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = 'solov2_r50_fpn_1x_coco.py' - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), - (1333, 672), (1333, 640)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[27, 33]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py b/cv/detection/co-detr/pytorch/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py deleted file mode 100644 index 6115fed6d429c4e66665c4930730f827f95801b3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = 'solov2_r50_fpn_3x_coco.py' - -# model settings -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')), - mask_head=dict( - mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), - dcn_cfg=dict(type='DCNv2'), - dcn_apply_to_all_conv=True)) diff --git a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/README.md b/cv/detection/co-detr/pytorch/configs/sparse_rcnn/README.md deleted file mode 100644 index d7912e0e14a81b9b2d8984ed62afe838a4a9b15b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Sparse R-CNN - -> [Sparse R-CNN: End-to-End Object Detection with Learnable Proposals](https://arxiv.org/abs/2011.12450) - - - -## Abstract - -We present Sparse R-CNN, a purely sparse method for object detection in images. Existing works on object detection heavily rely on dense object candidates, such as k anchor boxes pre-defined on all grids of image feature map of size H×W. In our method, however, a fixed sparse set of learned object proposals, total length of N, are provided to object recognition head to perform classification and location. By eliminating HWk (up to hundreds of thousands) hand-designed object candidates to N (e.g. 100) learnable proposals, Sparse R-CNN completely avoids all efforts related to object candidates design and many-to-one label assignment. More importantly, final predictions are directly output without non-maximum suppression post-procedure. Sparse R-CNN demonstrates accuracy, run-time and training convergence performance on par with the well-established detector baselines on the challenging COCO dataset, e.g., achieving 45.0 AP in standard 3× training schedule and running at 22 fps using ResNet-50 FPN model. We hope our work could inspire re-thinking the convention of dense prior in object detectors. - -
- -
- -## Results and Models - -| Model | Backbone | Style | Lr schd | Number of Proposals | Multi-Scale | RandomCrop | box AP | Config | Download | -| :----------: | :-------: | :-----: | :-----: | :-----------------: | :---------: | :--------: | :----: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Sparse R-CNN | R-50-FPN | pytorch | 1x | 100 | False | False | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.log.json) | -| Sparse R-CNN | R-50-FPN | pytorch | 3x | 100 | True | False | 42.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.log.json) | -| Sparse R-CNN | R-50-FPN | pytorch | 3x | 300 | True | True | 45.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.log.json) | -| Sparse R-CNN | R-101-FPN | pytorch | 3x | 100 | True | False | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.log.json) | -| Sparse R-CNN | R-101-FPN | pytorch | 3x | 300 | True | True | 46.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.log.json) | - -### Notes - -We observe about 0.3 AP noise especially when using ResNet-101 as the backbone. - -## Citation - -```latex -@article{peize2020sparse, - title = {{SparseR-CNN}: End-to-End Object Detection with Learnable Proposals}, - author = {Peize Sun and Rufeng Zhang and Yi Jiang and Tao Kong and Chenfeng Xu and Wei Zhan and Masayoshi Tomizuka and Lei Li and Zehuan Yuan and Changhu Wang and Ping Luo}, - journal = {arXiv preprint arXiv:2011.12450}, - year = {2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/metafile.yml b/cv/detection/co-detr/pytorch/configs/sparse_rcnn/metafile.yml deleted file mode 100644 index bb1273eccd7541d1fcaa958c9aae276322c4d193..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/metafile.yml +++ /dev/null @@ -1,80 +0,0 @@ -Collections: - - Name: Sparse R-CNN - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - ResNet - - Sparse R-CNN - Paper: - URL: https://arxiv.org/abs/2011.12450 - Title: 'Sparse R-CNN: End-to-End Object Detection with Learnable Proposals' - README: configs/sparse_rcnn/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/sparse_rcnn.py#L6 - Version: v2.9.0 - -Models: - - Name: sparse_rcnn_r50_fpn_1x_coco - In Collection: Sparse R-CNN - Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth - - - Name: sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco - In Collection: Sparse R-CNN - Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth - - - Name: sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco - In Collection: Sparse R-CNN - Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 45.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth - - - Name: sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco - In Collection: Sparse R-CNN - Config: configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth - - - Name: sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco - In Collection: Sparse R-CNN - Config: configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth diff --git a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py deleted file mode 100644 index de323bdfaad7a092373da57d8f5ce99441bd48cf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py deleted file mode 100644 index ab4c5f68178a55d89a74bfa2911d48befb8869f8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index b383ee48598c9ae73c6f44dbb539cdfa6c052d80..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,95 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -num_stages = 6 -num_proposals = 100 -model = dict( - type='SparseRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=0, - add_extra_convs='on_input', - num_outs=4), - rpn_head=dict( - type='EmbeddingRPNHead', - num_proposals=num_proposals, - proposal_feature_channel=256), - roi_head=dict( - type='SparseRoIHead', - num_stages=num_stages, - stage_loss_weights=[1] * num_stages, - proposal_feature_channel=256, - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='DIIHead', - num_classes=80, - num_ffn_fcs=2, - num_heads=8, - num_cls_fcs=1, - num_reg_fcs=3, - feedforward_channels=2048, - in_channels=256, - dropout=0.0, - ffn_act_cfg=dict(type='ReLU', inplace=True), - dynamic_conv_cfg=dict( - type='DynamicConv', - in_channels=256, - feat_channels=64, - out_channels=256, - input_feat_shape=7, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN')), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - clip_border=False, - target_means=[0., 0., 0., 0.], - target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) - ]), - # training and testing settings - train_cfg=dict( - rpn=None, - rcnn=[ - dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBoxL1Cost', weight=5.0), - iou_cost=dict(type='IoUCost', iou_mode='giou', - weight=2.0)), - sampler=dict(type='PseudoSampler'), - pos_weight=1) for _ in range(num_stages) - ]), - test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals))) - -# optimizer -optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001) -optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py deleted file mode 100644 index 36f1d62eba62bb9c3266864cd4250caedea95a21..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,52 +0,0 @@ -_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' -num_proposals = 300 -model = dict( - rpn_head=dict(num_proposals=num_proposals), - test_cfg=dict( - _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[[ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ]]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py b/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py deleted file mode 100644 index 2fa2a807190427c857ddbea8ed7efd9434e5ef0f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = './sparse_rcnn_r50_fpn_1x_coco.py' - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, value) for value in min_values], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] - -data = dict(train=dict(pipeline=train_pipeline)) -lr_config = dict(policy='step', step=[27, 33]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/ssd/README.md b/cv/detection/co-detr/pytorch/configs/ssd/README.md deleted file mode 100644 index 463926b3b66dabce65f0fe071cd162a6409aa971..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ssd/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# SSD - -> [SSD: Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) - - - -## Abstract - -We present a method for detecting objects in images using a single deep neural network. Our approach, named SSD, discretizes the output space of bounding boxes into a set of default boxes over different aspect ratios and scales per feature map location. At prediction time, the network generates scores for the presence of each object category in each default box and produces adjustments to the box to better match the object shape. Additionally, the network combines predictions from multiple feature maps with different resolutions to naturally handle objects of various sizes. Our SSD model is simple relative to methods that require object proposals because it completely eliminates proposal generation and subsequent pixel or feature resampling stage and encapsulates all computation in a single network. This makes SSD easy to train and straightforward to integrate into systems that require a detection component. Experimental results on the PASCAL VOC, MS COCO, and ILSVRC datasets confirm that SSD has comparable accuracy to methods that utilize an additional object proposal step and is much faster, while providing a unified framework for both training and inference. Compared to other single stage methods, SSD has much better accuracy, even with a smaller input image size. For 300×300 input, SSD achieves 72.1% mAP on VOC2007 test at 58 FPS on a Nvidia Titan X and for 500×500 input, SSD achieves 75.1% mAP, outperforming a comparable state of the art Faster R-CNN model. - -
- -
- -## Results and models of SSD - -| Backbone | Size | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :------: | :--: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| VGG16 | 300 | caffe | 120e | 9.9 | 43.7 | 25.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssd300_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428.log.json) | -| VGG16 | 512 | caffe | 120e | 19.4 | 30.7 | 29.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssd512_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849.log.json) | - -## Results and models of SSD-Lite - -| Backbone | Size | Training from scratch | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :---------: | :--: | :-------------------: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| MobileNetV2 | 320 | yes | 600e | 4.0 | 69.9 | 21.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627.log.json) | - -## Notice - -### Compatibility - -In v2.14.0, [PR5291](https://github.com/open-mmlab/mmdetection/pull/5291) refactored SSD neck and head for more -flexible usage. If users want to use the SSD checkpoint trained in the older versions, we provide a scripts -`tools/model_converters/upgrade_ssd_version.py` to convert the model weights. - -```bash -python tools/model_converters/upgrade_ssd_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} - -``` - -- OLD_MODEL_PATH: the path to load the old version SSD model. -- NEW_MODEL_PATH: the path to save the converted model weights. - -### SSD-Lite training settings - -There are some differences between our implementation of MobileNetV2 SSD-Lite and the one in [TensorFlow 1.x detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md) . - -1. Use 320x320 as input size instead of 300x300. -2. The anchor sizes are different. -3. The C4 feature map is taken from the last layer of stage 4 instead of the middle of the block. -4. The model in TensorFlow1.x is trained on coco 2014 and validated on coco minival2014, but we trained and validated the model on coco 2017. The mAP on val2017 is usually a little lower than minival2014 (refer to the results in TensorFlow Object Detection API, e.g., MobileNetV2 SSD gets 22 mAP on minival2014 but 20.2 mAP on val2017). - -## Citation - -```latex -@article{Liu_2016, - title={SSD: Single Shot MultiBox Detector}, - journal={ECCV}, - author={Liu, Wei and Anguelov, Dragomir and Erhan, Dumitru and Szegedy, Christian and Reed, Scott and Fu, Cheng-Yang and Berg, Alexander C.}, - year={2016}, -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/ssd/metafile.yml b/cv/detection/co-detr/pytorch/configs/ssd/metafile.yml deleted file mode 100644 index b9ee79cd7b175eb4ee2c2306afd6eabea0f098f2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ssd/metafile.yml +++ /dev/null @@ -1,78 +0,0 @@ -Collections: - - Name: SSD - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - VGG - Paper: - URL: https://arxiv.org/abs/1512.02325 - Title: 'SSD: Single Shot MultiBox Detector' - README: configs/ssd/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.14.0/mmdet/models/dense_heads/ssd_head.py#L16 - Version: v2.14.0 - -Models: - - Name: ssd300_coco - In Collection: SSD - Config: configs/ssd/ssd300_coco.py - Metadata: - Training Memory (GB): 9.9 - inference time (ms/im): - - value: 22.88 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (300, 300) - Epochs: 120 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 25.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth - - - Name: ssd512_coco - In Collection: SSD - Config: configs/ssd/ssd512_coco.py - Metadata: - Training Memory (GB): 19.4 - inference time (ms/im): - - value: 32.57 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (512, 512) - Epochs: 120 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 29.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth - - - Name: ssdlite_mobilenetv2_scratch_600e_coco - In Collection: SSD - Config: configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py - Metadata: - Training Memory (GB): 4.0 - inference time (ms/im): - - value: 14.3 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (320, 320) - Epochs: 600 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 21.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth diff --git a/cv/detection/co-detr/pytorch/configs/ssd/ssd300_coco.py b/cv/detection/co-detr/pytorch/configs/ssd/ssd300_coco.py deleted file mode 100644 index 1891bade9bc1b21f801d7320081f6fb5678edb05..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ssd/ssd300_coco.py +++ /dev/null @@ -1,71 +0,0 @@ -_base_ = [ - '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' -] -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(300, 300), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(300, 300), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=3, - train=dict( - _delete_=True, - type='RepeatDataset', - times=5, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict(_delete_=True) -custom_hooks = [ - dict(type='NumClassCheckHook'), - dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') -] - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/ssd/ssd512_coco.py b/cv/detection/co-detr/pytorch/configs/ssd/ssd512_coco.py deleted file mode 100644 index 117777ff6a605dec23e5c4e1f5609735fea182cb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ssd/ssd512_coco.py +++ /dev/null @@ -1,84 +0,0 @@ -_base_ = 'ssd300_coco.py' -input_size = 512 -model = dict( - neck=dict( - out_channels=(512, 1024, 512, 256, 256, 256, 256), - level_strides=(2, 2, 2, 2, 1), - level_paddings=(1, 1, 1, 1, 1), - last_kernel_size=4), - bbox_head=dict( - in_channels=(512, 1024, 512, 256, 256, 256, 256), - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - input_size=input_size, - basesize_ratio_range=(0.1, 0.9), - strides=[8, 16, 32, 64, 128, 256, 512], - ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]))) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(512, 512), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(512, 512), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=3, - train=dict( - _delete_=True, - type='RepeatDataset', - times=5, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict(_delete_=True) -custom_hooks = [ - dict(type='NumClassCheckHook'), - dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') -] - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py b/cv/detection/co-detr/pytorch/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py deleted file mode 100644 index 929eb6c6189bce9ed7cc830ea8dabdca3ccefb26..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py +++ /dev/null @@ -1,150 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' -] - -model = dict( - type='SingleStageDetector', - backbone=dict( - type='MobileNetV2', - out_indices=(4, 7), - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), - neck=dict( - type='SSDNeck', - in_channels=(96, 1280), - out_channels=(96, 1280, 512, 256, 256, 128), - level_strides=(2, 2, 2, 2), - level_paddings=(1, 1, 1, 1), - l2_norm_scale=None, - use_depthwise=True, - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - act_cfg=dict(type='ReLU6'), - init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), - bbox_head=dict( - type='SSDHead', - in_channels=(96, 1280, 512, 256, 256, 128), - num_classes=80, - use_depthwise=True, - norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), - act_cfg=dict(type='ReLU6'), - init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), - - # set anchor size manually instead of using the predefined - # SSD300 setting. - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - strides=[16, 32, 64, 107, 160, 320], - ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], - min_sizes=[48, 100, 150, 202, 253, 304], - max_sizes=[100, 150, 202, 253, 304, 320]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2])), - # model training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0., - ignore_iof_thr=-1, - gt_max_assign_all=False), - smoothl1_beta=1., - allowed_border=-1, - pos_weight=-1, - neg_pos_ratio=3, - debug=False), - test_cfg=dict( - nms_pre=1000, - nms=dict(type='nms', iou_threshold=0.45), - min_bbox_size=0, - score_thr=0.02, - max_per_img=200)) -cudnn_benchmark = True - -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(320, 320), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=320), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(320, 320), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=320), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=24, - workers_per_gpu=4, - train=dict( - _delete_=True, - type='RepeatDataset', # use RepeatDataset to speed up training - times=5, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5) -optimizer_config = dict(grad_clip=None) - -# learning policy -lr_config = dict( - policy='CosineAnnealing', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - min_lr=0) -runner = dict(type='EpochBasedRunner', max_epochs=120) - -# Avoid evaluation and saving weights too frequently -evaluation = dict(interval=5, metric='bbox') -checkpoint_config = dict(interval=5) -custom_hooks = [ - dict(type='NumClassCheckHook'), - dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') -] - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (24 samples per GPU) -auto_scale_lr = dict(base_batch_size=192) diff --git a/cv/detection/co-detr/pytorch/configs/strong_baselines/README.md b/cv/detection/co-detr/pytorch/configs/strong_baselines/README.md deleted file mode 100644 index aa2550d9343a88446f6045e10c205d874a41f1d0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/strong_baselines/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Strong Baselines - - - -We train Mask R-CNN with large-scale jitter and longer schedule as strong baselines. -The modifications follow those in [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/configs/new_baselines). - -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------: | :----------------------: | -| R-50-FPN | pytorch | 50e | | | | | [config](./mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py) | [model](<>) \| [log](<>) | -| R-50-FPN | pytorch | 100e | | | | | [config](./mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py) | [model](<>) \| [log](<>) | -| R-50-FPN | caffe | 100e | | | 44.7 | 40.4 | [config](./mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py) | [model](<>) \| [log](<>) | -| R-50-FPN | caffe | 400e | | | | | [config](./mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py) | [model](<>) \| [log](<>) | - -## Notice - -When using large-scale jittering, there are sometimes empty proposals in the box and mask heads during training. -This requires MMSyncBN that allows empty tensors. Therefore, please use mmcv-full>=1.3.14 to train models supported in this directory. diff --git a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py b/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py deleted file mode 100644 index a40d6a036508c2ca9188caeda94b4ee0aca6c8b9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py +++ /dev/null @@ -1,80 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../common/lsj_100e_coco_instance.py' -] - -norm_cfg = dict(type='SyncBN', requires_grad=True) -# Use MMSyncBN that handles empty tensor in head. It can be changed to -# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed -# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. -head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) -model = dict( - backbone=dict( - frozen_stages=-1, - norm_eval=False, - norm_cfg=norm_cfg, - init_cfg=None, - style='caffe'), - neck=dict(norm_cfg=norm_cfg), - rpn_head=dict(num_convs=2), - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=head_norm_cfg), - mask_head=dict(norm_cfg=head_norm_cfg))) - -file_client_args = dict(backend='disk') -# file_client_args = dict( -# backend='petrel', -# path_mapping=dict({ -# './data/': 's3://openmmlab/datasets/detection/', -# 'data/': 's3://openmmlab/datasets/detection/' -# })) - -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -image_size = (1024, 1024) -train_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=file_client_args), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=image_size), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', file_client_args=file_client_args), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -# Use RepeatDataset to speed up training -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py b/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py deleted file mode 100644 index 31824eb50067c6b2cab49f3e6eebfa5f02fe592d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' -fp16 = dict(loss_scale=512.) diff --git a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py b/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py deleted file mode 100644 index 1211925dea4e27db833f06d52367791ac7125033..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' - -# Use RepeatDataset to speed up training -# change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs) -data = dict(train=dict(times=4 * 4)) -lr_config = dict(warmup_iters=500 * 4) diff --git a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py b/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py deleted file mode 100644 index 4a15d698b672da57e1bd866189e6b75785fbad8a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py +++ /dev/null @@ -1,22 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../common/lsj_100e_coco_instance.py' -] - -norm_cfg = dict(type='SyncBN', requires_grad=True) -# Use MMSyncBN that handles empty tensor in head. It can be changed to -# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed -# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. -head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) -model = dict( - # the model is trained from scratch, so init_cfg is None - backbone=dict( - frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None), - neck=dict(norm_cfg=norm_cfg), - rpn_head=dict(num_convs=2), # leads to 0.1+ mAP - roi_head=dict( - bbox_head=dict( - type='Shared4Conv1FCBBoxHead', - conv_out_channels=256, - norm_cfg=head_norm_cfg), - mask_head=dict(norm_cfg=head_norm_cfg))) diff --git a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py b/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py deleted file mode 100644 index 7b97960a878e4f0649c34dc7c00c99516baa731a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' -# use FP16 -fp16 = dict(loss_scale=512.) diff --git a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py b/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py deleted file mode 100644 index 922579a184a5a16c8e8263d50b39be4d99de8a90..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' - -# Use RepeatDataset to speed up training -# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) -data = dict(train=dict(times=2)) diff --git a/cv/detection/co-detr/pytorch/configs/swin/README.md b/cv/detection/co-detr/pytorch/configs/swin/README.md deleted file mode 100644 index 2136134c117ea9d2a4f83ab46c10b7c838abc663..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/swin/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# Swin - -> [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) - - - -## Abstract - -This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. - -
- -
- -## Results and Models - -### Mask R-CNN - -| Backbone | Pretrain | Lr schd | Multi-scale crop | FP16 | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -| :------: | :---------: | :-----: | :--------------: | :--: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| Swin-T | ImageNet-1K | 1x | no | no | 7.6 | | 42.7 | 39.3 | [config](./mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937.log.json) | -| Swin-T | ImageNet-1K | 3x | yes | no | 10.2 | | 46.0 | 41.6 | [config](./mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725.log.json) | -| Swin-T | ImageNet-1K | 3x | yes | yes | 7.8 | | 46.0 | 41.7 | [config](./mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006.log.json) | -| Swin-S | ImageNet-1K | 3x | yes | yes | 11.9 | | 48.2 | 43.2 | [config](./mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808.log.json) | - -### Notice - -Please follow the example -of `retinanet_swin-t-p4-w7_fpn_1x_coco.py` when you want to combine Swin Transformer with -the one-stage detector. Because there is a layer norm at the outs of Swin Transformer, you must set `start_level` as 0 in FPN, so we have to set the `out_indices` of backbone as `[1,2,3]`. - -## Citation - -```latex -@article{liu2021Swin, - title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, - author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, - journal={arXiv preprint arXiv:2103.14030}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py b/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py deleted file mode 100644 index 15d50a0228b9c4442596a440814109844ab6cfed..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py' -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa -model = dict( - backbone=dict( - depths=[2, 2, 18, 2], - init_cfg=dict(type='Pretrained', checkpoint=pretrained))) diff --git a/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py deleted file mode 100644 index 337e85818c5d3bd30147d636d4a90dc8d64184fc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa -model = dict( - type='MaskRCNN', - backbone=dict( - _delete_=True, - type='SwinTransformer', - embed_dims=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - patch_norm=True, - out_indices=(0, 1, 2, 3), - with_cp=False, - convert_weights=True, - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - neck=dict(in_channels=[96, 192, 384, 768])) - -optimizer = dict( - _delete_=True, - type='AdamW', - lr=0.0001, - betas=(0.9, 0.999), - weight_decay=0.05, - paramwise_cfg=dict( - custom_keys={ - 'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.) - })) -lr_config = dict(warmup_iters=1000, step=[8, 11]) -runner = dict(max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py b/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py deleted file mode 100644 index 2be31143df5dcfbe8a9582d556f398ccda293464..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py' -# you need to set mode='dynamic' if you are using pytorch<=1.5.0 -fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py b/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py deleted file mode 100644 index 2612f6e331e4fafe87945a990801122b7e620f69..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py +++ /dev/null @@ -1,91 +0,0 @@ -_base_ = [ - '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa - -model = dict( - type='MaskRCNN', - backbone=dict( - _delete_=True, - type='SwinTransformer', - embed_dims=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - patch_norm=True, - out_indices=(0, 1, 2, 3), - with_cp=False, - convert_weights=True, - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - neck=dict(in_channels=[96, 192, 384, 768])) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# augmentation strategy originates from DETR / Sparse RCNN -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[[ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), - (608, 1333), (640, 1333), (672, 1333), (704, 1333), - (736, 1333), (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - img_scale=[(400, 1333), (500, 1333), (600, 1333)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ]]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -data = dict(train=dict(pipeline=train_pipeline)) - -optimizer = dict( - _delete_=True, - type='AdamW', - lr=0.0001, - betas=(0.9, 0.999), - weight_decay=0.05, - paramwise_cfg=dict( - custom_keys={ - 'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.) - })) -lr_config = dict(warmup_iters=1000, step=[27, 33]) -runner = dict(max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/swin/metafile.yml b/cv/detection/co-detr/pytorch/configs/swin/metafile.yml deleted file mode 100644 index 6c07f17512e7e6c2d6075e9fab38ad91b09f2cb2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/swin/metafile.yml +++ /dev/null @@ -1,120 +0,0 @@ -Models: - - Name: mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco - In Collection: Mask R-CNN - Config: configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py - Metadata: - Training Memory (GB): 11.9 - Epochs: 36 - Training Data: COCO - Training Techniques: - - AdamW - Training Resources: 8x V100 GPUs - Architecture: - - Swin Transformer - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 48.2 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 43.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth - Paper: - URL: https://arxiv.org/abs/2107.08430 - Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' - README: configs/swin/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 - Version: v2.16.0 - - - Name: mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco - In Collection: Mask R-CNN - Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py - Metadata: - Training Memory (GB): 10.2 - Epochs: 36 - Training Data: COCO - Training Techniques: - - AdamW - Training Resources: 8x V100 GPUs - Architecture: - - Swin Transformer - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth - Paper: - URL: https://arxiv.org/abs/2107.08430 - Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' - README: configs/swin/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 - Version: v2.16.0 - - - Name: mask_rcnn_swin-t-p4-w7_fpn_1x_coco - In Collection: Mask R-CNN - Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py - Metadata: - Training Memory (GB): 7.6 - Epochs: 12 - Training Data: COCO - Training Techniques: - - AdamW - Training Resources: 8x V100 GPUs - Architecture: - - Swin Transformer - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.7 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 39.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth - Paper: - URL: https://arxiv.org/abs/2107.08430 - Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' - README: configs/swin/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 - Version: v2.16.0 - - - Name: mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco - In Collection: Mask R-CNN - Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py - Metadata: - Training Memory (GB): 7.8 - Epochs: 36 - Training Data: COCO - Training Techniques: - - AdamW - Training Resources: 8x V100 GPUs - Architecture: - - Swin Transformer - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.0 - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 41.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth - Paper: - URL: https://arxiv.org/abs/2107.08430 - Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' - README: configs/swin/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 - Version: v2.16.0 diff --git a/cv/detection/co-detr/pytorch/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py deleted file mode 100644 index 331509323d4712bb8fa56f4d194ec451c9b1956c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py +++ /dev/null @@ -1,30 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformer', - embed_dims=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - patch_norm=True, - out_indices=(1, 2, 3), - # Please only add indices that would be used - # in FPN, otherwise some parameter will not be used - with_cp=False, - convert_weights=True, - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), - neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5)) - -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/timm_example/README.md b/cv/detection/co-detr/pytorch/configs/timm_example/README.md deleted file mode 100644 index 43748553270c6ec6df447d9e934a960335bae229..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/timm_example/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# Timm Example - -> [PyTorch Image Models](https://github.com/rwightman/pytorch-image-models) - - - -## Abstract - -Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results. - - - -## Results and Models - -### RetinaNet - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------: | :------: | -| R-50 | pytorch | 1x | | | | [config](./retinanet_timm_tv_resnet50_fpn_1x_coco.py) | | -| EfficientNet-B1 | - | 1x | | | | [config](./retinanet_timm_efficientnet_b1_fpn_1x_coco.py) | | - -## Usage - -### Install additional requirements - -MMDetection supports timm backbones via `TIMMBackbone`, a wrapper class in MMClassification. -Thus, you need to install `mmcls` in addition to timm. -If you have already installed requirements for mmdet, run - -```shell -pip install 'dataclasses; python_version<"3.7"' -pip install timm -pip install 'mmcls>=0.20.0' -``` - -See [this document](https://mmclassification.readthedocs.io/en/latest/install.html) for the details of MMClassification installation. - -### Edit config - -- See example configs for basic usage. -- See the documents of [timm feature extraction](https://rwightman.github.io/pytorch-image-models/feature_extraction/#multi-scale-feature-maps-feature-pyramid) and [TIMMBackbone](https://mmclassification.readthedocs.io/en/latest/api.html#mmcls.models.backbones.TIMMBackbone) for details. -- Which feature map is output depends on the backbone. - Please check `backbone out_channels` and `backbone out_strides` in your log, and modify `model.neck.in_channels` and `model.backbone.out_indices` if necessary. -- If you use Vision Transformer models that do not support `features_only=True`, add `custom_hooks = []` to your config to disable `NumClassCheckHook`. - -## Citation - -```latex -@misc{rw2019timm, - author = {Ross Wightman}, - title = {PyTorch Image Models}, - year = {2019}, - publisher = {GitHub}, - journal = {GitHub repository}, - doi = {10.5281/zenodo.4414861}, - howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py deleted file mode 100644 index 65001167cbf81517c23cf47d1839e8b65134544c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py +++ /dev/null @@ -1,20 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# please install mmcls>=0.20.0 -# import mmcls.models to trigger register_module in mmcls -custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) -model = dict( - backbone=dict( - _delete_=True, - type='mmcls.TIMMBackbone', - model_name='efficientnet_b1', - features_only=True, - pretrained=True, - out_indices=(1, 2, 3, 4)), - neck=dict(in_channels=[24, 40, 112, 320])) - -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/timm_example/retinanet_timm_tv_resnet50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/timm_example/retinanet_timm_tv_resnet50_fpn_1x_coco.py deleted file mode 100644 index 0c5b7a89f65bdb394b92b6a33d342e4e71f94a35..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/timm_example/retinanet_timm_tv_resnet50_fpn_1x_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = [ - '../_base_/models/retinanet_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -# please install mmcls>=0.20.0 -# import mmcls.models to trigger register_module in mmcls -custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) -model = dict( - backbone=dict( - _delete_=True, - type='mmcls.TIMMBackbone', - model_name='tv_resnet50', # ResNet-50 with torchvision weights - features_only=True, - pretrained=True, - out_indices=(1, 2, 3, 4))) - -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/cv/detection/co-detr/pytorch/configs/tood/README.md b/cv/detection/co-detr/pytorch/configs/tood/README.md deleted file mode 100644 index 925f0ed0f3d8ebe9fccce74934d63032974e873c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# TOOD - -> [TOOD: Task-aligned One-stage Object Detection](https://arxiv.org/abs/2108.07755) - - - -## Abstract - -One-stage object detection is commonly implemented by optimizing two sub-tasks: object classification and localization, using heads with two parallel branches, which might lead to a certain level of spatial misalignment in predictions between the two tasks. In this work, we propose a Task-aligned One-stage Object Detection (TOOD) that explicitly aligns the two tasks in a learning-based manner. First, we design a novel Task-aligned Head (T-Head) which offers a better balance between learning task-interactive and task-specific features, as well as a greater flexibility to learn the alignment via a task-aligned predictor. Second, we propose Task Alignment Learning (TAL) to explicitly pull closer (or even unify) the optimal anchors for the two tasks during training via a designed sample assignment scheme and a task-aligned loss. Extensive experiments are conducted on MS-COCO, where TOOD achieves a 51.1 AP at single-model single-scale testing. This surpasses the recent one-stage detectors by a large margin, such as ATSS (47.7 AP), GFL (48.2 AP), and PAA (49.0 AP), with fewer parameters and FLOPs. Qualitative results also demonstrate the effectiveness of TOOD for better aligning the tasks of object classification and localization. - -
- -
- -## Results and Models - -| Backbone | Style | Anchor Type | Lr schd | Multi-scale Training | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :---------------: | :-----: | :----------: | :-----: | :------------------: | :------: | :------------: | :----: | :------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | Anchor-free | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425.log) | -| R-50 | pytorch | Anchor-based | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_anchor_based_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105.log) | -| R-50 | pytorch | Anchor-free | 2x | Y | 4.1 | | 44.5 | [config](./tood_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231.log) | -| R-101 | pytorch | Anchor-free | 2x | Y | 6.0 | | 46.1 | [config](./tood_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232.log) | -| R-101-dcnv2 | pytorch | Anchor-free | 2x | Y | 6.2 | | 49.3 | [config](./tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728.log) | -| X-101-64x4d | pytorch | Anchor-free | 2x | Y | 10.2 | | 47.6 | [config](./tood_x101_64x4d_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519.log) | -| X-101-64x4d-dcnv2 | pytorch | Anchor-free | 2x | Y | | | | [config](./tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py) | [model](<>) \| [log](<>) | - -\[1\] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ -\[2\] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ -\[3\] *`dcnv2` denotes deformable convolutional networks v2.* \\ - -## Citation - -```latex -@inproceedings{feng2021tood, - title={TOOD: Task-aligned One-stage Object Detection}, - author={Feng, Chengjian and Zhong, Yujie and Gao, Yu and Scott, Matthew R and Huang, Weilin}, - booktitle={ICCV}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/tood/metafile.yml b/cv/detection/co-detr/pytorch/configs/tood/metafile.yml deleted file mode 100644 index 27a0f8dbfc59614ffd39f22d49ac3eabce8e3b62..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/metafile.yml +++ /dev/null @@ -1,95 +0,0 @@ -Collections: - - Name: TOOD - Metadata: - Training Data: COCO - Training Techniques: - - SGD - Training Resources: 8x V100 GPUs - Architecture: - - TOOD - Paper: - URL: https://arxiv.org/abs/2108.07755 - Title: 'TOOD: Task-aligned One-stage Object Detection' - README: configs/tood/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.20.0/mmdet/models/detectors/tood.py#L7 - Version: v2.20.0 - -Models: - - Name: tood_r101_fpn_mstrain_2x_coco - In Collection: TOOD - Config: configs/tood/tood_r101_fpn_mstrain_2x_coco.py - Metadata: - Training Memory (GB): 6.0 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.1 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth - - - Name: tood_x101_64x4d_fpn_mstrain_2x_coco - In Collection: TOOD - Config: configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py - Metadata: - Training Memory (GB): 10.2 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 47.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth - - - Name: tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco - In Collection: TOOD - Config: configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py - Metadata: - Training Memory (GB): 6.2 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 49.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth - - - Name: tood_r50_fpn_anchor_based_1x_coco - In Collection: TOOD - Config: configs/tood/tood_r50_fpn_anchor_based_1x_coco.py - Metadata: - Training Memory (GB): 4.1 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth - - - Name: tood_r50_fpn_1x_coco - In Collection: TOOD - Config: configs/tood/tood_r50_fpn_1x_coco.py - Metadata: - Training Memory (GB): 4.1 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 42.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth - - - Name: tood_r50_fpn_mstrain_2x_coco - In Collection: TOOD - Config: configs/tood/tood_r50_fpn_mstrain_2x_coco.py - Metadata: - Training Memory (GB): 4.1 - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth diff --git a/cv/detection/co-detr/pytorch/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py deleted file mode 100644 index c7f1bbcbaf17381f6917f5fe7dda8d5b40dd9170..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './tood_r101_fpn_mstrain_2x_coco.py' - -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True)), - bbox_head=dict(num_dcn=2)) diff --git a/cv/detection/co-detr/pytorch/configs/tood/tood_r101_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/tood/tood_r101_fpn_mstrain_2x_coco.py deleted file mode 100644 index d9d2c32d8ceba33a8efa0ddd7074426480301512..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/tood_r101_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './tood_r50_fpn_mstrain_2x_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_1x_coco.py deleted file mode 100644 index 35a77a400e155c7e08253bb526b4592c2fca405c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_1x_coco.py +++ /dev/null @@ -1,74 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='TOOD', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='TOODHead', - num_classes=80, - in_channels=256, - stacked_convs=6, - feat_channels=256, - anchor_type='anchor_free', - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - initial_loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - activated=True, # use probability instead of logit as input - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_cls=dict( - type='QualityFocalLoss', - use_sigmoid=True, - activated=True, # use probability instead of logit as input - beta=2.0, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), - train_cfg=dict( - initial_epoch=4, - initial_assigner=dict(type='ATSSAssigner', topk=9), - assigner=dict(type='TaskAlignedAssigner', topk=13), - alpha=1, - beta=6, - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) - -# custom hooks -custom_hooks = [dict(type='SetEpochInfoHook')] diff --git a/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py b/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py deleted file mode 100644 index c7fbf6aff197b821de07f8d4a73f9c72e5f76288..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './tood_r50_fpn_1x_coco.py' -model = dict(bbox_head=dict(anchor_type='anchor_based')) diff --git a/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_mstrain_2x_coco.py deleted file mode 100644 index 157d13a4a17b0aaae3faf23b70a5c7d64b682d32..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/tood_r50_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,22 +0,0 @@ -_base_ = './tood_r50_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) -# multi-scale training -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 480), (1333, 800)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py deleted file mode 100644 index 47c92695a92dae83217eaacb9788f88e6c801272..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './tood_x101_64x4d_fpn_mstrain_2x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, False, True, True), - ), - bbox_head=dict(num_dcn=2)) diff --git a/cv/detection/co-detr/pytorch/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py deleted file mode 100644 index 842f320e83966e9c8dbbf337cfcef2bcb8d782db..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = './tood_r50_fpn_mstrain_2x_coco.py' - -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/tridentnet/README.md b/cv/detection/co-detr/pytorch/configs/tridentnet/README.md deleted file mode 100644 index b972b3a3c9b2de5409af9f76622e8947fd6eace1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tridentnet/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# TridentNet - -> [Scale-Aware Trident Networks for Object Detection](https://arxiv.org/abs/1901.01892) - - - -## Abstract - -Scale variation is one of the key challenges in object detection. In this work, we first present a controlled experiment to investigate the effect of receptive fields for scale variation in object detection. Based on the findings from the exploration experiments, we propose a novel Trident Network (TridentNet) aiming to generate scale-specific feature maps with a uniform representational power. We construct a parallel multi-branch architecture in which each branch shares the same transformation parameters but with different receptive fields. Then, we adopt a scale-aware training scheme to specialize each branch by sampling object instances of proper scales for training. As a bonus, a fast approximation version of TridentNet could achieve significant improvements without any additional parameters and computational cost compared with the vanilla detector. On the COCO dataset, our TridentNet with ResNet-101 backbone achieves state-of-the-art single-model results of 48.4 mAP. - -
- -
- -## Results and Models - -We reports the test results using only one branch for inference. - -| Backbone | Style | mstrain | Lr schd | Mem (GB) | Inf time (fps) | box AP | Download | -| :------: | :---: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | caffe | N | 1x | | | 37.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838.log.json) | -| R-50 | caffe | Y | 1x | | | 37.6 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839.log.json) | -| R-50 | caffe | Y | 3x | | | 40.3 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539.log.json) | - -**Note** - -Similar to [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/projects/TridentNet), we haven't implemented the Scale-aware Training Scheme in section 4.2 of the paper. - -## Citation - -```latex -@InProceedings{li2019scale, - title={Scale-Aware Trident Networks for Object Detection}, - author={Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang}, - journal={The International Conference on Computer Vision (ICCV)}, - year={2019} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/tridentnet/metafile.yml b/cv/detection/co-detr/pytorch/configs/tridentnet/metafile.yml deleted file mode 100644 index 2536f976fcf9d75744332b0040792f2e3b65b4cb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tridentnet/metafile.yml +++ /dev/null @@ -1,55 +0,0 @@ -Collections: - - Name: TridentNet - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - ResNet - - TridentNet Block - Paper: - URL: https://arxiv.org/abs/1901.01892 - Title: 'Scale-Aware Trident Networks for Object Detection' - README: configs/tridentnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/detectors/trident_faster_rcnn.py#L6 - Version: v2.8.0 - -Models: - - Name: tridentnet_r50_caffe_1x_coco - In Collection: TridentNet - Config: configs/tridentnet/tridentnet_r50_caffe_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth - - - Name: tridentnet_r50_caffe_mstrain_1x_coco - In Collection: TridentNet - Config: configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth - - - Name: tridentnet_r50_caffe_mstrain_3x_coco - In Collection: TridentNet - Config: configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py - Metadata: - Epochs: 36 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.3 - Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth diff --git a/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py b/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py deleted file mode 100644 index d779f75f8395c9d25345b936029ffc1628b5d4cb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py +++ /dev/null @@ -1,55 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_caffe_c4.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='TridentFasterRCNN', - backbone=dict( - type='TridentResNet', - trident_dilations=(1, 2, 3), - num_branch=3, - test_branch_idx=1, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1), - train_cfg=dict( - rpn_proposal=dict(max_per_img=500), - rcnn=dict( - sampler=dict(num=128, pos_fraction=0.5, - add_gt_as_proposals=False)))) - -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py b/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py deleted file mode 100644 index c73d9eaa96c7f88dd33eb55f21848db2421bea1e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py +++ /dev/null @@ -1,22 +0,0 @@ -_base_ = 'tridentnet_r50_caffe_1x_coco.py' - -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] - -data = dict(train=dict(pipeline=train_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py b/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py deleted file mode 100644 index 0f402826d3a22714078d8c50ed6bd8959018e4e7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = 'tridentnet_r50_caffe_mstrain_1x_coco.py' - -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/README.md b/cv/detection/co-detr/pytorch/configs/vfnet/README.md deleted file mode 100644 index a492bece506c8bb2f95cf07158721a04208d7729..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# VarifocalNet - -> [VarifocalNet: An IoU-aware Dense Object Detector](https://arxiv.org/abs/2008.13367) - - - -## Abstract - -Accurately ranking the vast number of candidate detections is crucial for dense object detectors to achieve high performance. Prior work uses the classification score or a combination of classification and predicted localization scores to rank candidates. However, neither option results in a reliable ranking, thus degrading detection performance. In this paper, we propose to learn an Iou-aware Classification Score (IACS) as a joint representation of object presence confidence and localization accuracy. We show that dense object detectors can achieve a more accurate ranking of candidate detections based on the IACS. We design a new loss function, named Varifocal Loss, to train a dense object detector to predict the IACS, and propose a new star-shaped bounding box feature representation for IACS prediction and bounding box refinement. Combining these two new components and a bounding box refinement branch, we build an IoU-aware dense object detector based on the FCOS+ATSS architecture, that we call VarifocalNet or VFNet for short. Extensive experiments on MS COCO show that our VFNet consistently surpasses the strong baseline by ∼2.0 AP with different backbones. Our best model VFNet-X-1200 with Res2Net-101-DCN achieves a single-model single-scale AP of 55.1 on COCO test-dev, which is state-of-the-art among various object detectors. - -
- -
- -## Introduction - -**VarifocalNet (VFNet)** learns to predict the IoU-aware classification score which mixes the object presence confidence and localization accuracy together as the detection score for a bounding box. The learning is supervised by the proposed Varifocal Loss (VFL), based on a new star-shaped bounding box feature representation (the features at nine yellow sampling points). Given the new representation, the object localization accuracy is further improved by refining the initially regressed bounding box. The full paper is available at: [https://arxiv.org/abs/2008.13367](https://arxiv.org/abs/2008.13367). - -## Results and Models - -| Backbone | Style | DCN | MS train | Lr schd | Inf time (fps) | box AP (val) | box AP (test-dev) | Config | Download | -| :---------: | :-----: | :-: | :------: | :-----: | :------------: | :----------: | :---------------: | :--------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50 | pytorch | N | N | 1x | - | 41.6 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco.json) | -| R-50 | pytorch | N | Y | 2x | - | 44.5 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco.json) | -| R-50 | pytorch | Y | Y | 2x | - | 47.8 | 48.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | -| R-101 | pytorch | N | N | 1x | - | 43.0 | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco.json) | -| R-101 | pytorch | N | Y | 2x | - | 46.2 | 46.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco.json) | -| R-101 | pytorch | Y | Y | 2x | - | 49.0 | 49.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | -| X-101-32x4d | pytorch | Y | Y | 2x | - | 49.7 | 50.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | -| X-101-64x4d | pytorch | Y | Y | 2x | - | 50.4 | 50.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | - -**Notes:** - -- The MS-train scale range is 1333x\[480:960\] (`range` mode) and the inference scale keeps 1333x800. -- DCN means using `DCNv2` in both backbone and head. -- Inference time will be updated soon. -- More results and pre-trained models can be found in [VarifocalNet-Github](https://github.com/hyz-xmaster/VarifocalNet) - -## Citation - -```latex -@article{zhang2020varifocalnet, - title={VarifocalNet: An IoU-aware Dense Object Detector}, - author={Zhang, Haoyang and Wang, Ying and Dayoub, Feras and S{\"u}nderhauf, Niko}, - journal={arXiv preprint arXiv:2008.13367}, - year={2020} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/metafile.yml b/cv/detection/co-detr/pytorch/configs/vfnet/metafile.yml deleted file mode 100644 index bcbe576fa6f229d04ebcf15e391782bedbbc8310..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/metafile.yml +++ /dev/null @@ -1,116 +0,0 @@ -Collections: - - Name: VFNet - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - ResNet - - Varifocal Loss - Paper: - URL: https://arxiv.org/abs/2008.13367 - Title: 'VarifocalNet: An IoU-aware Dense Object Detector' - README: configs/vfnet/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.6.0/mmdet/models/detectors/vfnet.py#L6 - Version: v2.6.0 - -Models: - - Name: vfnet_r50_fpn_1x_coco - In Collection: VFNet - Config: configs/vfnet/vfnet_r50_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 41.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth - - - Name: vfnet_r50_fpn_mstrain_2x_coco - In Collection: VFNet - Config: configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 44.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth - - - Name: vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco - In Collection: VFNet - Config: configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 48.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth - - - Name: vfnet_r101_fpn_1x_coco - In Collection: VFNet - Config: configs/vfnet/vfnet_r101_fpn_1x_coco.py - Metadata: - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 43.6 - Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth - - - Name: vfnet_r101_fpn_mstrain_2x_coco - In Collection: VFNet - Config: configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 46.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth - - - Name: vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco - In Collection: VFNet - Config: configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 49.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth - - - Name: vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco - In Collection: VFNet - Config: configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 50.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth - - - Name: vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco - In Collection: VFNet - Config: configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py - Metadata: - Epochs: 24 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 50.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_1x_coco.py deleted file mode 100644 index b296a07959e43517d792f36f356404a232fb0dc3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './vfnet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_2x_coco.py deleted file mode 100644 index 27962f3a88d850edb38360b6988584f7438691b7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_2x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = './vfnet_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py deleted file mode 100644 index e438c247cf4c9c5b2b5aabffda535bec61d4a21e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='ResNet', - depth=101, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py deleted file mode 100644 index eae69a01e801ae0422cdb8f8e58fd02a1720fee9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py deleted file mode 100644 index 815a36e079111ee605c46d27bda9962dabdd6cdd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='Res2Net', - depth=101, - scales=4, - base_width=26, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py deleted file mode 100644 index 58022e0eeac5fba20b2360e0578aa9b9c781f287..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='Res2Net', - depth=101, - scales=4, - base_width=26, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_1x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_1x_coco.py deleted file mode 100644 index 7de64296cd78ce12a1d3df281bdffb8c393543be..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_1x_coco.py +++ /dev/null @@ -1,107 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -# model settings -model = dict( - type='VFNet', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', # use P5 - num_outs=5, - relu_before_extra_convs=True), - bbox_head=dict( - type='VFNetHead', - num_classes=80, - in_channels=256, - stacked_convs=3, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - center_sampling=False, - dcn_on_last_conv=False, - use_atss=True, - use_vfl=True, - loss_cls=dict( - type='VarifocalLoss', - use_sigmoid=True, - alpha=0.75, - gamma=2.0, - iou_weighted=True, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.5), - loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -# data setting -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict( - lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.1, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py deleted file mode 100644 index 24d2093b8b537a365c3e07261921b120b422918c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True)), - bbox_head=dict(dcn_on_last_conv=True)) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py deleted file mode 100644 index 6078bb98cacc04da23dcb7a661047902e0adefb3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './vfnet_r50_fpn_1x_coco.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', - img_scale=[(1333, 480), (1333, 960)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py deleted file mode 100644 index 7efa0517eb72395a2ff24992318fcb4667fc033d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py deleted file mode 100644 index 49a4312107d9ff045bc626802fa23cf01f54d10e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py deleted file mode 100644 index 7e1ee429f3dbaa895018a1b280ff312d01965e03..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py b/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py deleted file mode 100644 index e51064e7ec003604edb99c2759b3f5fe4b95423e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py +++ /dev/null @@ -1,15 +0,0 @@ -_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' -model = dict( - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'))) diff --git a/cv/detection/co-detr/pytorch/configs/wider_face/README.md b/cv/detection/co-detr/pytorch/configs/wider_face/README.md deleted file mode 100644 index 1904506c64a893f2bfd3881c7e95bd7100fcc6f4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/wider_face/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# WIDER FACE - -> [WIDER FACE: A Face Detection Benchmark](https://arxiv.org/abs/1511.06523) - - - -## Abstract - -Face detection is one of the most studied topics in the computer vision community. Much of the progresses have been made by the availability of face detection benchmark datasets. We show that there is a gap between current face detection performance and the real world requirements. To facilitate future face detection research, we introduce the WIDER FACE dataset, which is 10 times larger than existing datasets. The dataset contains rich annotations, including occlusions, poses, event categories, and face bounding boxes. Faces in the proposed dataset are extremely challenging due to large variations in scale, pose and occlusion, as shown in Fig. 1. Furthermore, we show that WIDER FACE dataset is an effective training source for face detection. We benchmark several representative detection systems, providing an overview of state-of-the-art performance and propose a solution to deal with large scale variation. Finally, we discuss common failure cases that worth to be further investigated. - -
- -
- -## Introduction - -To use the WIDER Face dataset you need to download it -and extract to the `data/WIDERFace` folder. Annotation in the VOC format -can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git). -You should move the annotation files from `WIDER_train_annotations` and `WIDER_val_annotations` folders -to the `Annotation` folders inside the corresponding directories `WIDER_train` and `WIDER_val`. -Also annotation lists `val.txt` and `train.txt` should be copied to `data/WIDERFace` from `WIDER_train_annotations` and `WIDER_val_annotations`. -The directory should be like this: - -``` -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── WIDERFace -│ │ ├── WIDER_train -│ | │ ├──0--Parade -│ | │ ├── ... -│ | │ ├── Annotations -│ │ ├── WIDER_val -│ | │ ├──0--Parade -│ | │ ├── ... -│ | │ ├── Annotations -│ │ ├── val.txt -│ │ ├── train.txt - -``` - -After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or -create your own config based on the presented one. - -## Citation - -```latex -@inproceedings{yang2016wider, - Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou}, - Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - Title = {WIDER FACE: A Face Detection Benchmark}, - Year = {2016} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/wider_face/ssd300_wider_face.py b/cv/detection/co-detr/pytorch/configs/wider_face/ssd300_wider_face.py deleted file mode 100644 index 5a3eb38df3dc75af176cc6972af88e76124ba4dc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/wider_face/ssd300_wider_face.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', - '../_base_/default_runtime.py' -] -model = dict(bbox_head=dict(num_classes=1)) -# optimizer -optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.001, - step=[16, 20]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=24) -log_config = dict(interval=1) diff --git a/cv/detection/co-detr/pytorch/configs/yolact/README.md b/cv/detection/co-detr/pytorch/configs/yolact/README.md deleted file mode 100644 index 9eb51b4ab990f946bfc41c198dd41c1d572e00c1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolact/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# YOLACT - -> [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) - - - -## Abstract - -We present a simple, fully-convolutional model for real-time instance segmentation that achieves 29.8 mAP on MS COCO at 33.5 fps evaluated on a single Titan Xp, which is significantly faster than any previous competitive approach. Moreover, we obtain this result after training on only one GPU. We accomplish this by breaking instance segmentation into two parallel subtasks: (1) generating a set of prototype masks and (2) predicting per-instance mask coefficients. Then we produce instance masks by linearly combining the prototypes with the mask coefficients. We find that because this process doesn't depend on repooling, this approach produces very high-quality masks and exhibits temporal stability for free. Furthermore, we analyze the emergent behavior of our prototypes and show they learn to localize instances on their own in a translation variant manner, despite being fully-convolutional. Finally, we also propose Fast NMS, a drop-in 12 ms faster replacement for standard NMS that only has a marginal performance penalty. - -
- -
- -## Introduction - -A simple, fully convolutional model for real-time instance segmentation. This is the code for our paper: - -- [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) - - - -For a real-time demo, check out our ICCV video: -[![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/0pMfmo8qfpQ/0.jpg)](https://www.youtube.com/watch?v=0pMfmo8qfpQ) - -## Evaluation - -Here are our YOLACT models along with their FPS on a Titan Xp and mAP on COCO's `val`: - -| Image Size | GPU x BS | Backbone | \*FPS | mAP | Weights | Configs | Download | -| :--------: | :------: | :-----------: | :---: | :--: | :-----: | :----------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | -| 550 | 1x8 | Resnet50-FPN | 42.5 | 29.0 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_1x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth) | -| 550 | 8x8 | Resnet50-FPN | 42.5 | 28.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_8x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth) | -| 550 | 1x8 | Resnet101-FPN | 33.5 | 30.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r101_1x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth) | - -\*Note: The FPS is evaluated by the [original implementation](https://github.com/dbolya/yolact). When calculating FPS, only the model inference time is taken into account. Data loading and post-processing operations such as converting masks to RLE code, generating COCO JSON results, image rendering are not included. - -## Training - -All the aforementioned models are trained with a single GPU. It typically takes ~12GB VRAM when using resnet-101 as the backbone. If you want to try multiple GPUs training, you may have to modify the configuration files accordingly, such as adjusting the training schedule and freezing batch norm. - -```Shell -# Trains using the resnet-101 backbone with a batch size of 8 on a single GPU. -./tools/dist_train.sh configs/yolact/yolact_r101.py 1 -``` - -## Testing - -Please refer to [mmdetection/docs/getting_started.md](https://mmdetection.readthedocs.io/en/latest/1_exist_data_model.html#test-existing-models). - -## Citation - -If you use YOLACT or this code base in your work, please cite - -```latex -@inproceedings{yolact-iccv2019, - author = {Daniel Bolya and Chong Zhou and Fanyi Xiao and Yong Jae Lee}, - title = {YOLACT: {Real-time} Instance Segmentation}, - booktitle = {ICCV}, - year = {2019}, -} -``` - - diff --git a/cv/detection/co-detr/pytorch/configs/yolact/metafile.yml b/cv/detection/co-detr/pytorch/configs/yolact/metafile.yml deleted file mode 100644 index e7019ae62ce981eaf7c4e4704ea223f48b464ead..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolact/metafile.yml +++ /dev/null @@ -1,78 +0,0 @@ -Collections: - - Name: YOLACT - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - FPN - - ResNet - Paper: - URL: https://arxiv.org/abs/1904.02689 - Title: 'YOLACT: Real-time Instance Segmentation' - README: configs/yolact/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/yolact.py#L9 - Version: v2.5.0 - -Models: - - Name: yolact_r50_1x8_coco - In Collection: YOLACT - Config: configs/yolact/yolact_r50_1x8_coco.py - Metadata: - Training Resources: 1x V100 GPU - Batch Size: 8 - inference time (ms/im): - - value: 23.53 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (550, 550) - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 29.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth - - - Name: yolact_r50_8x8_coco - In Collection: YOLACT - Config: configs/yolact/yolact_r50_8x8_coco.py - Metadata: - Batch Size: 64 - inference time (ms/im): - - value: 23.53 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (550, 550) - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 28.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth - - - Name: yolact_r101_1x8_coco - In Collection: YOLACT - Config: configs/yolact/yolact_r101_1x8_coco.py - Metadata: - Training Resources: 1x V100 GPU - Batch Size: 8 - inference time (ms/im): - - value: 29.85 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (550, 550) - Results: - - Task: Instance Segmentation - Dataset: COCO - Metrics: - mask AP: 30.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth diff --git a/cv/detection/co-detr/pytorch/configs/yolact/yolact_r101_1x8_coco.py b/cv/detection/co-detr/pytorch/configs/yolact/yolact_r101_1x8_coco.py deleted file mode 100644 index 532631dd5f8483dfb61488e4f445f1f50a71fbde..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolact/yolact_r101_1x8_coco.py +++ /dev/null @@ -1,7 +0,0 @@ -_base_ = './yolact_r50_1x8_coco.py' - -model = dict( - backbone=dict( - depth=101, - init_cfg=dict(type='Pretrained', - checkpoint='torchvision://resnet101'))) diff --git a/cv/detection/co-detr/pytorch/configs/yolact/yolact_r50_1x8_coco.py b/cv/detection/co-detr/pytorch/configs/yolact/yolact_r50_1x8_coco.py deleted file mode 100644 index dbced5a1a69a4b2030a5539682b3821002957e7a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolact/yolact_r50_1x8_coco.py +++ /dev/null @@ -1,165 +0,0 @@ -_base_ = '../_base_/default_runtime.py' - -# model settings -img_size = 550 -model = dict( - type='YOLACT', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, # do not freeze stem - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=False, # update the statistics of bn - zero_init_residual=False, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_input', - num_outs=5, - upsample_cfg=dict(mode='bilinear')), - bbox_head=dict( - type='YOLACTHead', - num_classes=80, - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=3, - scales_per_octave=1, - base_sizes=[8, 16, 32, 64, 128], - ratios=[0.5, 1.0, 2.0], - strides=[550.0 / x for x in [69, 35, 18, 9, 5]], - centers=[(550 * 0.5 / x, 550 * 0.5 / x) - for x in [69, 35, 18, 9, 5]]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - reduction='none', - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5), - num_head_convs=1, - num_protos=32, - use_ohem=True), - mask_head=dict( - type='YOLACTProtonet', - in_channels=256, - num_protos=32, - num_classes=80, - max_masks_to_train=100, - loss_mask_weight=6.125), - segm_head=dict( - type='YOLACTSegmHead', - num_classes=80, - in_channels=256, - loss_segm=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0., - ignore_iof_thr=-1, - gt_max_assign_all=False), - # smoothl1_beta=1., - allowed_border=-1, - pos_weight=-1, - neg_pos_ratio=3, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - iou_thr=0.5, - top_k=200, - max_per_img=100)) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(img_size, img_size), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(img_size, img_size), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.1, - step=[20, 42, 49, 52]) -runner = dict(type='EpochBasedRunner', max_epochs=55) -cudnn_benchmark = True -evaluation = dict(metric=['bbox', 'segm']) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (1 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=8) diff --git a/cv/detection/co-detr/pytorch/configs/yolact/yolact_r50_8x8_coco.py b/cv/detection/co-detr/pytorch/configs/yolact/yolact_r50_8x8_coco.py deleted file mode 100644 index 41003ab42bfb6872a2511b636461fbb325326dfb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolact/yolact_r50_8x8_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = 'yolact_r50_1x8_coco.py' - -optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.1, - step=[20, 42, 49, 52]) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/yolo/README.md b/cv/detection/co-detr/pytorch/configs/yolo/README.md deleted file mode 100644 index c9eb8a61143668cb63fe8d269490e57cf90ea836..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolo/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# YOLOv3 - -> [YOLOv3: An Incremental Improvement](https://arxiv.org/abs/1804.02767) - - - -## Abstract - -We present some updates to YOLO! We made a bunch of little design changes to make it better. We also trained this new network that's pretty swell. It's a little bigger than last time but more accurate. It's still fast though, don't worry. At 320x320 YOLOv3 runs in 22 ms at 28.2 mAP, as accurate as SSD but three times faster. When we look at the old .5 IOU mAP detection metric YOLOv3 is quite good. It achieves 57.9 mAP@50 in 51 ms on a Titan X, compared to 57.5 mAP@50 in 198 ms by RetinaNet, similar performance but 3.8x faster. - -
- -
- -## Results and Models - -| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :--------: | :---: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| DarkNet-53 | 320 | 273e | 2.7 | 63.9 | 27.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_320_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-20200819_172101.log.json) | -| DarkNet-53 | 416 | 273e | 3.8 | 61.2 | 30.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-20200819_173424.log.json) | -| DarkNet-53 | 608 | 273e | 7.4 | 48.1 | 33.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020.log.json) | - -## Mixed Precision Training - -We also train YOLOv3 with mixed precision training. - -| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :--------: | :---: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| DarkNet-53 | 608 | 273e | 4.7 | 48.1 | 33.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542.log.json) | - -## Lightweight models - -| Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -| :---------: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| MobileNetV2 | 416 | 300e | 5.3 | | 23.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823.log.json) | -| MobileNetV2 | 320 | 300e | 3.2 | | 22.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349.log.json) | - -Notice: We reduce the number of channels to 96 in both head and neck. It can reduce the flops and parameters, which makes these models more suitable for edge devices. - -## Credit - -This implementation originates from the project of Haoyu Wu(@wuhy08) at Western Digital. - -## Citation - -```latex -@misc{redmon2018yolov3, - title={YOLOv3: An Incremental Improvement}, - author={Joseph Redmon and Ali Farhadi}, - year={2018}, - eprint={1804.02767}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/yolo/metafile.yml b/cv/detection/co-detr/pytorch/configs/yolo/metafile.yml deleted file mode 100644 index 22c35da550e2bfbdd645f3fee40428137a1d8534..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolo/metafile.yml +++ /dev/null @@ -1,124 +0,0 @@ -Collections: - - Name: YOLOv3 - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - DarkNet - Paper: - URL: https://arxiv.org/abs/1804.02767 - Title: 'YOLOv3: An Incremental Improvement' - README: configs/yolo/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/yolo.py#L8 - Version: v2.4.0 - -Models: - - Name: yolov3_d53_320_273e_coco - In Collection: YOLOv3 - Config: configs/yolo/yolov3_d53_320_273e_coco.py - Metadata: - Training Memory (GB): 2.7 - inference time (ms/im): - - value: 15.65 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (320, 320) - Epochs: 273 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 27.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth - - - Name: yolov3_d53_mstrain-416_273e_coco - In Collection: YOLOv3 - Config: configs/yolo/yolov3_d53_mstrain-416_273e_coco.py - Metadata: - Training Memory (GB): 3.8 - inference time (ms/im): - - value: 16.34 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (416, 416) - Epochs: 273 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 30.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth - - - Name: yolov3_d53_mstrain-608_273e_coco - In Collection: YOLOv3 - Config: configs/yolo/yolov3_d53_mstrain-608_273e_coco.py - Metadata: - Training Memory (GB): 7.4 - inference time (ms/im): - - value: 20.79 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP32 - resolution: (608, 608) - Epochs: 273 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 33.7 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth - - - Name: yolov3_d53_fp16_mstrain-608_273e_coco - In Collection: YOLOv3 - Config: configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py - Metadata: - Training Memory (GB): 4.7 - inference time (ms/im): - - value: 20.79 - hardware: V100 - backend: PyTorch - batch size: 1 - mode: FP16 - resolution: (608, 608) - Epochs: 273 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 33.8 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth - - - Name: yolov3_mobilenetv2_320_300e_coco - In Collection: YOLOv3 - Config: configs/yolo/yolov3_mobilenetv2_320_300e_coco.py - Metadata: - Training Memory (GB): 3.2 - Epochs: 300 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 22.2 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth - - - Name: yolov3_mobilenetv2_mstrain-416_300e_coco - In Collection: YOLOv3 - Config: configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py - Metadata: - Training Memory (GB): 5.3 - Epochs: 300 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 23.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth diff --git a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_320_273e_coco.py b/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_320_273e_coco.py deleted file mode 100644 index d4785e3133c91a8d11b7a6ac6f7106a9310af65e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_320_273e_coco.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = './yolov3_d53_mstrain-608_273e_coco.py' -# dataset settings -img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 2)), - dict( - type='MinIoURandomCrop', - min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(320, 320), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(320, 320), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py b/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py deleted file mode 100644 index 4ef2422dada278c1e28b48d333437c7994832eba..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './yolov3_d53_mstrain-608_273e_coco.py' -# fp16 settings -fp16 = dict(loss_scale='dynamic') diff --git a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py b/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py deleted file mode 100644 index 94325c5a18a0b78788c1bdcccb68c179297bc084..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py +++ /dev/null @@ -1,42 +0,0 @@ -_base_ = './yolov3_d53_mstrain-608_273e_coco.py' -# dataset settings -img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 2)), - dict( - type='MinIoURandomCrop', - min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=[(320, 320), (416, 416)], keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(416, 416), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py b/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py deleted file mode 100644 index 43aa2f03f2a8cbb2090694e60e7af4daf39d3950..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py +++ /dev/null @@ -1,132 +0,0 @@ -_base_ = '../_base_/default_runtime.py' -# model settings -model = dict( - type='YOLOV3', - backbone=dict( - type='Darknet', - depth=53, - out_indices=(3, 4, 5), - init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')), - neck=dict( - type='YOLOV3Neck', - num_scales=3, - in_channels=[1024, 512, 256], - out_channels=[512, 256, 128]), - bbox_head=dict( - type='YOLOV3Head', - num_classes=80, - in_channels=[512, 256, 128], - out_channels=[1024, 512, 256], - anchor_generator=dict( - type='YOLOAnchorGenerator', - base_sizes=[[(116, 90), (156, 198), (373, 326)], - [(30, 61), (62, 45), (59, 119)], - [(10, 13), (16, 30), (33, 23)]], - strides=[32, 16, 8]), - bbox_coder=dict(type='YOLOBBoxCoder'), - featmap_strides=[32, 16, 8], - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0, - reduction='sum'), - loss_conf=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0, - reduction='sum'), - loss_xy=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=2.0, - reduction='sum'), - loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='GridAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0)), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - conf_thr=0.005, - nms=dict(type='nms', iou_threshold=0.45), - max_per_img=100)) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 2)), - dict( - type='MinIoURandomCrop', - min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(608, 608), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=2000, # same as burn-in in darknet - warmup_ratio=0.1, - step=[218, 246]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=273) -evaluation = dict(interval=1, metric=['bbox']) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py b/cv/detection/co-detr/pytorch/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py deleted file mode 100644 index 477d2530ac255e9fff4deabc650e26b326f14af4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py +++ /dev/null @@ -1,53 +0,0 @@ -_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py'] - -# yapf:disable -model = dict( - bbox_head=dict( - anchor_generator=dict( - base_sizes=[[(220, 125), (128, 222), (264, 266)], - [(35, 87), (102, 96), (60, 170)], - [(10, 15), (24, 36), (72, 42)]]))) -# yapf:enable - -# dataset settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 2)), - dict( - type='MinIoURandomCrop', - min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(320, 320), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(320, 320), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - train=dict(dataset=dict(pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py b/cv/detection/co-detr/pytorch/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py deleted file mode 100644 index 18e0622e73a1ddb9e65be53ea36595673109a72d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py +++ /dev/null @@ -1,142 +0,0 @@ -_base_ = '../_base_/default_runtime.py' -# model settings -model = dict( - type='YOLOV3', - backbone=dict( - type='MobileNetV2', - out_indices=(2, 4, 6), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')), - neck=dict( - type='YOLOV3Neck', - num_scales=3, - in_channels=[320, 96, 32], - out_channels=[96, 96, 96]), - bbox_head=dict( - type='YOLOV3Head', - num_classes=80, - in_channels=[96, 96, 96], - out_channels=[96, 96, 96], - anchor_generator=dict( - type='YOLOAnchorGenerator', - base_sizes=[[(116, 90), (156, 198), (373, 326)], - [(30, 61), (62, 45), (59, 119)], - [(10, 13), (16, 30), (33, 23)]], - strides=[32, 16, 8]), - bbox_coder=dict(type='YOLOBBoxCoder'), - featmap_strides=[32, 16, 8], - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0, - reduction='sum'), - loss_conf=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0, - reduction='sum'), - loss_xy=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=2.0, - reduction='sum'), - loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='GridAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0)), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - conf_thr=0.005, - nms=dict(type='nms', iou_threshold=0.45), - max_per_img=100)) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 2)), - dict( - type='MinIoURandomCrop', - min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), - min_crop_size=0.3), - dict( - type='Resize', - img_scale=[(320, 320), (416, 416)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(416, 416), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=24, - workers_per_gpu=4, - train=dict( - type='RepeatDataset', # use RepeatDataset to speed up training - times=10, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=4000, - warmup_ratio=0.0001, - step=[24, 28]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=30) -evaluation = dict(interval=1, metric=['bbox']) -find_unused_parameters = True - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (24 samples per GPU) -auto_scale_lr = dict(base_batch_size=192) diff --git a/cv/detection/co-detr/pytorch/configs/yolof/README.md b/cv/detection/co-detr/pytorch/configs/yolof/README.md deleted file mode 100644 index e88da02255b2d404b87e3f9d2260e798e0c1cbee..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolof/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# YOLOF - -> [You Only Look One-level Feature](https://arxiv.org/abs/2103.09460) - - - -## Abstract - -This paper revisits feature pyramids networks (FPN) for one-stage detectors and points out that the success of FPN is due to its divide-and-conquer solution to the optimization problem in object detection rather than multi-scale feature fusion. From the perspective of optimization, we introduce an alternative way to address the problem instead of adopting the complex feature pyramids - {\\em utilizing only one-level feature for detection}. Based on the simple and efficient solution, we present You Only Look One-level Feature (YOLOF). In our method, two key components, Dilated Encoder and Uniform Matching, are proposed and bring considerable improvements. Extensive experiments on the COCO benchmark prove the effectiveness of the proposed model. Our YOLOF achieves comparable results with its feature pyramids counterpart RetinaNet while being 2.5× faster. Without transformer layers, YOLOF can match the performance of DETR in a single-level feature manner with 7× less training epochs. With an image size of 608×608, YOLOF achieves 44.3 mAP running at 60 fps on 2080Ti, which is 13% faster than YOLOv4. - -
- -
- -## Results and Models - -| Backbone | Style | Epoch | Lr schd | Mem (GB) | box AP | Config | Download | -| :------: | :---: | :---: | :-----: | :------: | :----: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| R-50-C5 | caffe | Y | 1x | 8.3 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolof/yolof_r50_c5_8x8_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427.log.json) | - -**Note**: - -1. We find that the performance is unstable and may fluctuate by about 0.3 mAP. mAP 37.4 ~ 37.7 is acceptable in YOLOF_R_50_C5_1x. Such fluctuation can also be found in the [original implementation](https://github.com/chensnathan/YOLOF). -2. In addition to instability issues, sometimes there are large loss fluctuations and NAN, so there may still be problems with this project, which will be improved subsequently. - -## Citation - -```latex -@inproceedings{chen2021you, - title={You Only Look One-level Feature}, - author={Chen, Qiang and Wang, Yingming and Yang, Tong and Zhang, Xiangyu and Cheng, Jian and Sun, Jian}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/yolof/metafile.yml b/cv/detection/co-detr/pytorch/configs/yolof/metafile.yml deleted file mode 100644 index 9436fee2d05328f5d514c09f0b2d9c42121c550b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolof/metafile.yml +++ /dev/null @@ -1,32 +0,0 @@ -Collections: - - Name: YOLOF - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Momentum - - Weight Decay - Training Resources: 8x V100 GPUs - Architecture: - - Dilated Encoder - - ResNet - Paper: - URL: https://arxiv.org/abs/2103.09460 - Title: 'You Only Look One-level Feature' - README: configs/yolof/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/yolof.py#L6 - Version: v2.12.0 - -Models: - - Name: yolof_r50_c5_8x8_1x_coco - In Collection: YOLOF - Config: configs/yolof/yolof_r50_c5_8x8_1x_coco.py - Metadata: - Training Memory (GB): 8.3 - Epochs: 12 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 37.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth diff --git a/cv/detection/co-detr/pytorch/configs/yolof/yolof_r50_c5_8x8_1x_coco.py b/cv/detection/co-detr/pytorch/configs/yolof/yolof_r50_c5_8x8_1x_coco.py deleted file mode 100644 index d0b9649cf624e0ecea88f0f7a49b4f8925d2e074..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolof/yolof_r50_c5_8x8_1x_coco.py +++ /dev/null @@ -1,111 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -model = dict( - type='YOLOF', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(3, ), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron/resnet50_caffe')), - neck=dict( - type='DilatedEncoder', - in_channels=2048, - out_channels=512, - block_mid_channels=128, - num_residual_blocks=4, - block_dilations=[2, 4, 6, 8]), - bbox_head=dict( - type='YOLOFHead', - num_classes=80, - in_channels=512, - reg_decoded_bbox=True, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[1, 2, 4, 8, 16], - strides=[32]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1., 1., 1., 1.], - add_ctr_clamp=True, - ctr_clamp=32), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), - # training and testing settings - train_cfg=dict( - assigner=dict( - type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) -# optimizer -optimizer = dict( - type='SGD', - lr=0.12, - momentum=0.9, - weight_decay=0.0001, - paramwise_cfg=dict( - norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)})) -lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667) - -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=8, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/yolof/yolof_r50_c5_8x8_iter-1x_coco.py b/cv/detection/co-detr/pytorch/configs/yolof/yolof_r50_c5_8x8_iter-1x_coco.py deleted file mode 100644 index c95c02da103bdd499063312c36ade30601bb7380..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolof/yolof_r50_c5_8x8_iter-1x_coco.py +++ /dev/null @@ -1,14 +0,0 @@ -_base_ = './yolof_r50_c5_8x8_1x_coco.py' - -# We implemented the iter-based config according to the source code. -# COCO dataset has 117266 images after filtering. We use 8 gpu and -# 8 batch size training, so 22500 is equivalent to -# 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch, -# 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large, -# the iter-based and epoch-based setting have about 0.2 difference on -# the mAP evaluation value. -lr_config = dict(step=[15000, 20000]) -runner = dict(_delete_=True, type='IterBasedRunner', max_iters=22500) -checkpoint_config = dict(interval=2500) -evaluation = dict(interval=4500) -log_config = dict(interval=20) diff --git a/cv/detection/co-detr/pytorch/configs/yolox/README.md b/cv/detection/co-detr/pytorch/configs/yolox/README.md deleted file mode 100644 index 4890fbddf3f8750e00bd32246ee614fd89d74d4f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolox/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# YOLOX - -> [YOLOX: Exceeding YOLO Series in 2021](https://arxiv.org/abs/2107.08430) - - - -## Abstract - -In this report, we present some experienced improvements to YOLO series, forming a new high-performance detector -- YOLOX. We switch the YOLO detector to an anchor-free manner and conduct other advanced detection techniques, i.e., a decoupled head and the leading label assignment strategy SimOTA to achieve state-of-the-art results across a large scale range of models: For YOLO-Nano with only 0.91M parameters and 1.08G FLOPs, we get 25.3% AP on COCO, surpassing NanoDet by 1.8% AP; for YOLOv3, one of the most widely used detectors in industry, we boost it to 47.3% AP on COCO, outperforming the current best practice by 3.0% AP; for YOLOX-L with roughly the same amount of parameters as YOLOv4-CSP, YOLOv5-L, we achieve 50.0% AP on COCO at a speed of 68.9 FPS on Tesla V100, exceeding YOLOv5-L by 1.8% AP. Further, we won the 1st Place on Streaming Perception Challenge (Workshop on Autonomous Driving at CVPR 2021) using a single YOLOX-L model. We hope this report can provide useful experience for developers and researchers in practical scenes, and we also provide deploy versions with ONNX, TensorRT, NCNN, and Openvino supported. - -
- -
- -## Results and Models - -| Backbone | size | Mem (GB) | box AP | Config | Download | -| :--------: | :--: | :------: | :----: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| YOLOX-tiny | 416 | 3.5 | 32.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_tiny_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234.log.json) | -| YOLOX-s | 640 | 7.6 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_s_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711.log.json) | -| YOLOX-l | 640 | 19.9 | 49.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_l_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236.log.json) | -| YOLOX-x | 640 | 28.1 | 50.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_x_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254.log.json) | - -**Note**: - -1. The test score threshold is 0.001, and the box AP indicates the best AP. -2. Due to the need for pre-training weights, we cannot reproduce the performance of the `yolox-nano` model. Please refer to https://github.com/Megvii-BaseDetection/YOLOX/issues/674 for more information. -3. We also trained the model by the official release of YOLOX based on [Megvii-BaseDetection/YOLOX#735](https://github.com/Megvii-BaseDetection/YOLOX/issues/735) with commit ID [38c633](https://github.com/Megvii-BaseDetection/YOLOX/tree/38c633bf176462ee42b110c70e4ffe17b5753208). We found that the best AP of `YOLOX-tiny`, `YOLOX-s`, `YOLOX-l`, and `YOLOX-x` is 31.8, 40.3, 49.2, and 50.9, respectively. The performance is consistent with that of our re-implementation (see Table above) but still has a gap (0.3~0.8 AP) in comparison with the reported performance in their [README](https://github.com/Megvii-BaseDetection/YOLOX/blob/38c633bf176462ee42b110c70e4ffe17b5753208/README.md#benchmark). - -## Citation - -```latex -@article{yolox2021, - title={{YOLOX}: Exceeding YOLO Series in 2021}, - author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, - journal={arXiv preprint arXiv:2107.08430}, - year={2021} -} -``` diff --git a/cv/detection/co-detr/pytorch/configs/yolox/metafile.yml b/cv/detection/co-detr/pytorch/configs/yolox/metafile.yml deleted file mode 100644 index 845cb0a4c45a993e18b288c7509735e984aaa5c6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolox/metafile.yml +++ /dev/null @@ -1,70 +0,0 @@ -Collections: - - Name: YOLOX - Metadata: - Training Data: COCO - Training Techniques: - - SGD with Nesterov - - Weight Decay - - Cosine Annealing Lr Updater - Training Resources: 8x TITANXp GPUs - Architecture: - - CSPDarkNet - - PAFPN - Paper: - URL: https://arxiv.org/abs/2107.08430 - Title: 'YOLOX: Exceeding YOLO Series in 2021' - README: configs/yolox/README.md - Code: - URL: https://github.com/open-mmlab/mmdetection/blob/v2.15.1/mmdet/models/detectors/yolox.py#L6 - Version: v2.15.1 - - -Models: - - Name: yolox_s_8x8_300e_coco - In Collection: YOLOX - Config: configs/yolox/yolox_s_8x8_300e_coco.py - Metadata: - Training Memory (GB): 7.6 - Epochs: 300 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 40.5 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth - - Name: yolox_l_8x8_300e_coco - In Collection: YOLOX - Config: configs/yolox/yolox_l_8x8_300e_coco.py - Metadata: - Training Memory (GB): 19.9 - Epochs: 300 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 49.4 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth - - Name: yolox_x_8x8_300e_coco - In Collection: YOLOX - Config: configs/yolox/yolox_x_8x8_300e_coco.py - Metadata: - Training Memory (GB): 28.1 - Epochs: 300 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 50.9 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth - - Name: yolox_tiny_8x8_300e_coco - In Collection: YOLOX - Config: configs/yolox/yolox_tiny_8x8_300e_coco.py - Metadata: - Training Memory (GB): 3.5 - Epochs: 300 - Results: - - Task: Object Detection - Dataset: COCO - Metrics: - box AP: 32.0 - Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth diff --git a/cv/detection/co-detr/pytorch/configs/yolox/yolox_l_8x8_300e_coco.py b/cv/detection/co-detr/pytorch/configs/yolox/yolox_l_8x8_300e_coco.py deleted file mode 100644 index dcbfa183a6739623553e8a0345875a707d68f2b4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolox/yolox_l_8x8_300e_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = './yolox_s_8x8_300e_coco.py' - -# model settings -model = dict( - backbone=dict(deepen_factor=1.0, widen_factor=1.0), - neck=dict( - in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3), - bbox_head=dict(in_channels=256, feat_channels=256)) diff --git a/cv/detection/co-detr/pytorch/configs/yolox/yolox_m_8x8_300e_coco.py b/cv/detection/co-detr/pytorch/configs/yolox/yolox_m_8x8_300e_coco.py deleted file mode 100644 index 3048c95c6860c0af055df9dc05d4f90f427fd371..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolox/yolox_m_8x8_300e_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = './yolox_s_8x8_300e_coco.py' - -# model settings -model = dict( - backbone=dict(deepen_factor=0.67, widen_factor=0.75), - neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), - bbox_head=dict(in_channels=192, feat_channels=192), -) diff --git a/cv/detection/co-detr/pytorch/configs/yolox/yolox_nano_8x8_300e_coco.py b/cv/detection/co-detr/pytorch/configs/yolox/yolox_nano_8x8_300e_coco.py deleted file mode 100644 index d33ed04bc08f3dafd327206c4bb888b9acadfd70..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolox/yolox_nano_8x8_300e_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = './yolox_tiny_8x8_300e_coco.py' - -# model settings -model = dict( - backbone=dict(deepen_factor=0.33, widen_factor=0.25, use_depthwise=True), - neck=dict( - in_channels=[64, 128, 256], - out_channels=64, - num_csp_blocks=1, - use_depthwise=True), - bbox_head=dict(in_channels=64, feat_channels=64, use_depthwise=True)) diff --git a/cv/detection/co-detr/pytorch/configs/yolox/yolox_s_8x8_300e_coco.py b/cv/detection/co-detr/pytorch/configs/yolox/yolox_s_8x8_300e_coco.py deleted file mode 100644 index 97ff23e896db0978539d57df1471c7bea6b79be8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolox/yolox_s_8x8_300e_coco.py +++ /dev/null @@ -1,165 +0,0 @@ -_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] - -img_scale = (640, 640) # height, width - -# model settings -model = dict( - type='YOLOX', - input_size=img_scale, - random_size_range=(15, 25), - random_size_interval=10, - backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5), - neck=dict( - type='YOLOXPAFPN', - in_channels=[128, 256, 512], - out_channels=128, - num_csp_blocks=1), - bbox_head=dict( - type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128), - train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), - # In order to align the source code, the threshold of the val phase is - # 0.01, and the threshold of the test phase is 0.001. - test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) - -# dataset settings -data_root = 'data/coco/' -dataset_type = 'CocoDataset' - -train_pipeline = [ - dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), - dict( - type='RandomAffine', - scaling_ratio_range=(0.1, 2), - border=(-img_scale[0] // 2, -img_scale[1] // 2)), - dict( - type='MixUp', - img_scale=img_scale, - ratio_range=(0.8, 1.6), - pad_val=114.0), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', flip_ratio=0.5), - # According to the official implementation, multi-scale - # training is not considered here but in the - # 'mmdet/models/detectors/yolox.py'. - dict(type='Resize', img_scale=img_scale, keep_ratio=True), - dict( - type='Pad', - pad_to_square=True, - # If the image is three-channel, the pad value needs - # to be set separately for each channel. - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] - -train_dataset = dict( - type='MultiImageMixDataset', - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True) - ], - filter_empty_gt=False, - ), - pipeline=train_pipeline) - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=8, - workers_per_gpu=4, - persistent_workers=True, - train=train_dataset, - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) - -# optimizer -# default 8 gpu -optimizer = dict( - type='SGD', - lr=0.01, - momentum=0.9, - weight_decay=5e-4, - nesterov=True, - paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) -optimizer_config = dict(grad_clip=None) - -max_epochs = 300 -num_last_epochs = 15 -resume_from = None -interval = 10 - -# learning policy -lr_config = dict( - _delete_=True, - policy='YOLOX', - warmup='exp', - by_epoch=False, - warmup_by_epoch=True, - warmup_ratio=1, - warmup_iters=5, # 5 epoch - num_last_epochs=num_last_epochs, - min_lr_ratio=0.05) - -runner = dict(type='EpochBasedRunner', max_epochs=max_epochs) - -custom_hooks = [ - dict( - type='YOLOXModeSwitchHook', - num_last_epochs=num_last_epochs, - priority=48), - dict( - type='SyncNormHook', - num_last_epochs=num_last_epochs, - interval=interval, - priority=48), - dict( - type='ExpMomentumEMAHook', - resume_from=resume_from, - momentum=0.0001, - priority=49) -] -checkpoint_config = dict(interval=interval) -evaluation = dict( - save_best='auto', - # The evaluation interval is 'interval' when running epoch is - # less than ‘max_epochs - num_last_epochs’. - # The evaluation interval is 1 when running epoch is greater than - # or equal to ‘max_epochs - num_last_epochs’. - interval=interval, - dynamic_intervals=[(max_epochs - num_last_epochs, 1)], - metric='bbox') -log_config = dict(interval=50) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/yolox/yolox_tiny_8x8_300e_coco.py b/cv/detection/co-detr/pytorch/configs/yolox/yolox_tiny_8x8_300e_coco.py deleted file mode 100644 index 75931bad5921c96d3f8ed0a6c63e330ba20f1df2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolox/yolox_tiny_8x8_300e_coco.py +++ /dev/null @@ -1,58 +0,0 @@ -_base_ = './yolox_s_8x8_300e_coco.py' - -# model settings -model = dict( - random_size_range=(10, 20), - backbone=dict(deepen_factor=0.33, widen_factor=0.375), - neck=dict(in_channels=[96, 192, 384], out_channels=96), - bbox_head=dict(in_channels=96, feat_channels=96)) - -img_scale = (640, 640) # height, width - -train_pipeline = [ - dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), - dict( - type='RandomAffine', - scaling_ratio_range=(0.5, 1.5), - border=(-img_scale[0] // 2, -img_scale[1] // 2)), - dict(type='YOLOXHSVRandomAug'), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Resize', img_scale=img_scale, keep_ratio=True), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(416, 416), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Pad', - pad_to_square=True, - pad_val=dict(img=(114.0, 114.0, 114.0))), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']) - ]) -] - -train_dataset = dict(pipeline=train_pipeline) - -data = dict( - train=train_dataset, - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (8 samples per GPU) -auto_scale_lr = dict(base_batch_size=64) diff --git a/cv/detection/co-detr/pytorch/configs/yolox/yolox_x_8x8_300e_coco.py b/cv/detection/co-detr/pytorch/configs/yolox/yolox_x_8x8_300e_coco.py deleted file mode 100644 index 65c0b75c186c56b3dcb55db76d74e879b413f862..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/configs/yolox/yolox_x_8x8_300e_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = './yolox_s_8x8_300e_coco.py' - -# model settings -model = dict( - backbone=dict(deepen_factor=1.33, widen_factor=1.25), - neck=dict( - in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), - bbox_head=dict(in_channels=320, feat_channels=320)) diff --git a/cv/detection/co-detr/pytorch/mmcv_custom/__init__.py b/cv/detection/co-detr/pytorch/mmcv_custom/__init__.py deleted file mode 100644 index 7e0e39b03e2a149c33c372472b2b814a872ec55c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmcv_custom/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- - -from .checkpoint import load_checkpoint - -__all__ = ['load_checkpoint'] diff --git a/cv/detection/co-detr/pytorch/mmcv_custom/checkpoint.py b/cv/detection/co-detr/pytorch/mmcv_custom/checkpoint.py deleted file mode 100644 index d72f05e363be42f86d86c9a8b2c81fe5bec8ba7f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmcv_custom/checkpoint.py +++ /dev/null @@ -1,487 +0,0 @@ -# Copyright (c) Open-MMLab. All rights reserved. -import io -import os -import os.path as osp -import pkgutil -import time -import warnings -from collections import OrderedDict -from importlib import import_module -from tempfile import TemporaryDirectory - -import torch -import torchvision -from torch.optim import Optimizer -from torch.utils import model_zoo -from torch.nn import functional as F - -import mmcv -from mmcv.fileio import FileClient -from mmcv.fileio import load as load_file -from mmcv.parallel import is_module_wrapper -from mmcv.utils import mkdir_or_exist -from mmcv.runner import get_dist_info - -ENV_MMCV_HOME = 'MMCV_HOME' -ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' -DEFAULT_CACHE_DIR = '~/.cache' - - -def _get_mmcv_home(): - mmcv_home = os.path.expanduser( - os.getenv( - ENV_MMCV_HOME, - os.path.join( - os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) - - mkdir_or_exist(mmcv_home) - return mmcv_home - - -def load_state_dict(module, state_dict, strict=False, logger=None): - """Load state_dict to a module. - This method is modified from :meth:`torch.nn.Module.load_state_dict`. - Default value for ``strict`` is set to ``False`` and the message for - param mismatch will be shown even if strict is False. - Args: - module (Module): Module that receives the state_dict. - state_dict (OrderedDict): Weights. - strict (bool): whether to strictly enforce that the keys - in :attr:`state_dict` match the keys returned by this module's - :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. - logger (:obj:`logging.Logger`, optional): Logger to log the error - message. If not specified, print function will be used. - """ - unexpected_keys = [] - all_missing_keys = [] - err_msg = [] - - metadata = getattr(state_dict, '_metadata', None) - state_dict = state_dict.copy() - if metadata is not None: - state_dict._metadata = metadata - - # use _load_from_state_dict to enable checkpoint version control - def load(module, prefix=''): - # recursively check parallel module in case that the model has a - # complicated structure, e.g., nn.Module(nn.Module(DDP)) - if is_module_wrapper(module): - module = module.module - local_metadata = {} if metadata is None else metadata.get( - prefix[:-1], {}) - module._load_from_state_dict(state_dict, prefix, local_metadata, True, - all_missing_keys, unexpected_keys, - err_msg) - for name, child in module._modules.items(): - if child is not None: - load(child, prefix + name + '.') - - load(module) - load = None # break load->load reference cycle - - # ignore "num_batches_tracked" of BN layers - missing_keys = [ - key for key in all_missing_keys if 'num_batches_tracked' not in key - ] - - if unexpected_keys: - err_msg.append('unexpected key in source ' - f'state_dict: {", ".join(unexpected_keys)}\n') - if missing_keys: - err_msg.append( - f'missing keys in source state_dict: {", ".join(missing_keys)}\n') - - rank, _ = get_dist_info() - if len(err_msg) > 0 and rank == 0: - err_msg.insert( - 0, 'The model and loaded state dict do not match exactly\n') - err_msg = '\n'.join(err_msg) - if strict: - raise RuntimeError(err_msg) - elif logger is not None: - logger.warning(err_msg) - else: - print(err_msg) - - -def load_url_dist(url, model_dir=None): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - if rank == 0: - checkpoint = model_zoo.load_url(url, model_dir=model_dir) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - checkpoint = model_zoo.load_url(url, model_dir=model_dir) - return checkpoint - - -def load_pavimodel_dist(model_path, map_location=None): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - try: - from pavi import modelcloud - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - if rank == 0: - model = modelcloud.get(model_path) - with TemporaryDirectory() as tmp_dir: - downloaded_file = osp.join(tmp_dir, model.name) - model.download(downloaded_file) - checkpoint = torch.load(downloaded_file, map_location=map_location) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - model = modelcloud.get(model_path) - with TemporaryDirectory() as tmp_dir: - downloaded_file = osp.join(tmp_dir, model.name) - model.download(downloaded_file) - checkpoint = torch.load( - downloaded_file, map_location=map_location) - return checkpoint - - -def load_fileclient_dist(filename, backend, map_location): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - allowed_backends = ['ceph', 'petrel'] - if backend not in allowed_backends: - raise ValueError(f'Load from Backend {backend} is not supported.') - if rank == 0: - fileclient = FileClient(backend=backend) - buffer = io.BytesIO(fileclient.get(filename)) - checkpoint = torch.load(buffer, map_location=map_location) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - fileclient = FileClient(backend=backend) - buffer = io.BytesIO(fileclient.get(filename)) - checkpoint = torch.load(buffer, map_location=map_location) - return checkpoint - - -def get_torchvision_models(): - model_urls = dict() - for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): - if ispkg: - continue - _zoo = import_module(f'torchvision.models.{name}') - if hasattr(_zoo, 'model_urls'): - _urls = getattr(_zoo, 'model_urls') - model_urls.update(_urls) - return model_urls - - -def get_external_models(): - mmcv_home = _get_mmcv_home() - default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') - default_urls = load_file(default_json_path) - assert isinstance(default_urls, dict) - external_json_path = osp.join(mmcv_home, 'open_mmlab.json') - if osp.exists(external_json_path): - external_urls = load_file(external_json_path) - assert isinstance(external_urls, dict) - default_urls.update(external_urls) - - return default_urls - - -def get_mmcls_models(): - mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') - mmcls_urls = load_file(mmcls_json_path) - - return mmcls_urls - - -def get_deprecated_model_names(): - deprecate_json_path = osp.join(mmcv.__path__[0], - 'model_zoo/deprecated.json') - deprecate_urls = load_file(deprecate_json_path) - assert isinstance(deprecate_urls, dict) - - return deprecate_urls - - -def _process_mmcls_checkpoint(checkpoint): - state_dict = checkpoint['state_dict'] - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k.startswith('backbone.'): - new_state_dict[k[9:]] = v - new_checkpoint = dict(state_dict=new_state_dict) - - return new_checkpoint - - -def _load_checkpoint(filename, map_location=None): - """Load checkpoint from somewhere (modelzoo, file, url). - Args: - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str | None): Same as :func:`torch.load`. Default: None. - Returns: - dict | OrderedDict: The loaded checkpoint. It can be either an - OrderedDict storing model weights or a dict containing other - information, which depends on the checkpoint. - """ - if filename.startswith('modelzoo://'): - warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' - 'use "torchvision://" instead') - model_urls = get_torchvision_models() - model_name = filename[11:] - checkpoint = load_url_dist(model_urls[model_name]) - elif filename.startswith('torchvision://'): - model_urls = get_torchvision_models() - model_name = filename[14:] - checkpoint = load_url_dist(model_urls[model_name]) - elif filename.startswith('open-mmlab://'): - model_urls = get_external_models() - model_name = filename[13:] - deprecated_urls = get_deprecated_model_names() - if model_name in deprecated_urls: - warnings.warn(f'open-mmlab://{model_name} is deprecated in favor ' - f'of open-mmlab://{deprecated_urls[model_name]}') - model_name = deprecated_urls[model_name] - model_url = model_urls[model_name] - # check if is url - if model_url.startswith(('http://', 'https://')): - checkpoint = load_url_dist(model_url) - else: - filename = osp.join(_get_mmcv_home(), model_url) - if not osp.isfile(filename): - raise IOError(f'{filename} is not a checkpoint file') - checkpoint = torch.load(filename, map_location=map_location) - elif filename.startswith('mmcls://'): - model_urls = get_mmcls_models() - model_name = filename[8:] - checkpoint = load_url_dist(model_urls[model_name]) - checkpoint = _process_mmcls_checkpoint(checkpoint) - elif filename.startswith(('http://', 'https://')): - checkpoint = load_url_dist(filename) - elif filename.startswith('pavi://'): - model_path = filename[7:] - checkpoint = load_pavimodel_dist(model_path, map_location=map_location) - elif filename.startswith('s3://'): - checkpoint = load_fileclient_dist( - filename, backend='petrel', map_location=map_location) - else: - if not osp.isfile(filename): - raise IOError(f'{filename} is not a checkpoint file') - checkpoint = torch.load(filename, map_location=map_location) - return checkpoint - - -def load_checkpoint(model, - filename, - map_location='cpu', - strict=False, - logger=None): - """Load checkpoint from a file or URI. - Args: - model (Module): Module to load checkpoint. - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str): Same as :func:`torch.load`. - strict (bool): Whether to allow different params for the model and - checkpoint. - logger (:mod:`logging.Logger` or None): The logger for error message. - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - checkpoint = _load_checkpoint(filename, map_location) - # OrderedDict is a subclass of dict - if not isinstance(checkpoint, dict): - raise RuntimeError( - f'No state_dict found in checkpoint file {filename}') - # get state_dict from checkpoint - if 'state_dict' in checkpoint: - state_dict = checkpoint['state_dict'] - elif 'model' in checkpoint: - state_dict = checkpoint['model'] - else: - state_dict = checkpoint - # strip prefix of state_dict - if list(state_dict.keys())[0].startswith('module.'): - state_dict = {k[7:]: v for k, v in state_dict.items()} - - # for MoBY, load model of online branch - if sorted(list(state_dict.keys()))[0].startswith('encoder'): - state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')} - - # reshape absolute position embedding - if state_dict.get('absolute_pos_embed') is not None: - absolute_pos_embed = state_dict['absolute_pos_embed'] - N1, L, C1 = absolute_pos_embed.size() - N2, C2, H, W = model.absolute_pos_embed.size() - if N1 != N2 or C1 != C2 or L != H*W: - logger.warning("Error in loading absolute_pos_embed, pass") - else: - state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2) - - # interpolate position bias table if needed - relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] - for table_key in relative_position_bias_table_keys: - table_pretrained = state_dict[table_key] - if not table_key in model.state_dict().keys(): - print(table_key) - continue - table_current = model.state_dict()[table_key] - L1, nH1 = table_pretrained.size() - L2, nH2 = table_current.size() - if nH1 != nH2: - logger.warning(f"Error in loading {table_key}, pass") - else: - if L1 != L2: - S1 = int(L1 ** 0.5) - S2 = int(L2 ** 0.5) - table_pretrained_resized = F.interpolate( - table_pretrained.permute(1, 0).view(1, nH1, S1, S1), - size=(S2, S2), mode='bicubic') - state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) - - # load state_dict - load_state_dict(model, state_dict, strict, logger) - return checkpoint - - -def weights_to_cpu(state_dict): - """Copy a model state_dict to cpu. - Args: - state_dict (OrderedDict): Model weights on GPU. - Returns: - OrderedDict: Model weights on GPU. - """ - state_dict_cpu = OrderedDict() - for key, val in state_dict.items(): - state_dict_cpu[key] = val.cpu() - return state_dict_cpu - - -def _save_to_state_dict(module, destination, prefix, keep_vars): - """Saves module state to `destination` dictionary. - This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. - Args: - module (nn.Module): The module to generate state_dict. - destination (dict): A dict where state will be stored. - prefix (str): The prefix for parameters and buffers used in this - module. - """ - for name, param in module._parameters.items(): - if param is not None: - destination[prefix + name] = param if keep_vars else param.detach() - for name, buf in module._buffers.items(): - # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d - if buf is not None: - destination[prefix + name] = buf if keep_vars else buf.detach() - - -def get_state_dict(module, destination=None, prefix='', keep_vars=False): - """Returns a dictionary containing a whole state of the module. - Both parameters and persistent buffers (e.g. running averages) are - included. Keys are corresponding parameter and buffer names. - This method is modified from :meth:`torch.nn.Module.state_dict` to - recursively check parallel module in case that the model has a complicated - structure, e.g., nn.Module(nn.Module(DDP)). - Args: - module (nn.Module): The module to generate state_dict. - destination (OrderedDict): Returned dict for the state of the - module. - prefix (str): Prefix of the key. - keep_vars (bool): Whether to keep the variable property of the - parameters. Default: False. - Returns: - dict: A dictionary containing a whole state of the module. - """ - # recursively check parallel module in case that the model has a - # complicated structure, e.g., nn.Module(nn.Module(DDP)) - if is_module_wrapper(module): - module = module.module - - # below is the same as torch.nn.Module.state_dict() - if destination is None: - destination = OrderedDict() - destination._metadata = OrderedDict() - destination._metadata[prefix[:-1]] = local_metadata = dict( - version=module._version) - _save_to_state_dict(module, destination, prefix, keep_vars) - for name, child in module._modules.items(): - if child is not None: - get_state_dict( - child, destination, prefix + name + '.', keep_vars=keep_vars) - for hook in module._state_dict_hooks.values(): - hook_result = hook(module, destination, prefix, local_metadata) - if hook_result is not None: - destination = hook_result - return destination - - -def save_checkpoint(model, filename, optimizer=None, meta=None): - """Save checkpoint to file. - The checkpoint will have 3 fields: ``meta``, ``state_dict`` and - ``optimizer``. By default ``meta`` will contain version and time info. - Args: - model (Module): Module whose params are to be saved. - filename (str): Checkpoint filename. - optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. - meta (dict, optional): Metadata to be saved in checkpoint. - """ - if meta is None: - meta = {} - elif not isinstance(meta, dict): - raise TypeError(f'meta must be a dict or None, but got {type(meta)}') - meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) - - if is_module_wrapper(model): - model = model.module - - if hasattr(model, 'CLASSES') and model.CLASSES is not None: - # save class name to the meta - meta.update(CLASSES=model.CLASSES) - - checkpoint = { - 'meta': meta, - 'state_dict': weights_to_cpu(get_state_dict(model)) - } - # save optimizer state dict in the checkpoint - if isinstance(optimizer, Optimizer): - checkpoint['optimizer'] = optimizer.state_dict() - elif isinstance(optimizer, dict): - checkpoint['optimizer'] = {} - for name, optim in optimizer.items(): - checkpoint['optimizer'][name] = optim.state_dict() - - if filename.startswith('pavi://'): - try: - from pavi import modelcloud - from pavi.exception import NodeNotFoundError - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - model_path = filename[7:] - root = modelcloud.Folder() - model_dir, model_name = osp.split(model_path) - try: - model = modelcloud.get(model_dir) - except NodeNotFoundError: - model = root.create_training_model(model_dir) - with TemporaryDirectory() as tmp_dir: - checkpoint_file = osp.join(tmp_dir, model_name) - with open(checkpoint_file, 'wb') as f: - torch.save(checkpoint, f) - f.flush() - model.create_file(checkpoint_file, name=model_name) - else: - mmcv.mkdir_or_exist(osp.dirname(filename)) - # immediately flush buffer - with open(filename, 'wb') as f: - torch.save(checkpoint, f) - f.flush() diff --git a/cv/detection/co-detr/pytorch/mmdet/__init__.py b/cv/detection/co-detr/pytorch/mmdet/__init__.py deleted file mode 100644 index cd672c1a38970d416a06eeecaab4ae692a510789..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv - -from .version import __version__, short_version - - -def digit_version(version_str): - digit_version = [] - for x in version_str.split('.'): - if x.isdigit(): - digit_version.append(int(x)) - elif x.find('rc') != -1: - patch_version = x.split('rc') - digit_version.append(int(patch_version[0]) - 1) - digit_version.append(int(patch_version[1])) - return digit_version - - -mmcv_minimum_version = '1.3.17' -mmcv_maximum_version = '1.7.0' -mmcv_version = digit_version(mmcv.__version__) - - -assert (mmcv_version >= digit_version(mmcv_minimum_version) - and mmcv_version <= digit_version(mmcv_maximum_version)), \ - f'MMCV=={mmcv.__version__} is used but incompatible. ' \ - f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' - -__all__ = ['__version__', 'short_version'] diff --git a/cv/detection/co-detr/pytorch/mmdet/apis/__init__.py b/cv/detection/co-detr/pytorch/mmdet/apis/__init__.py deleted file mode 100644 index a865e942afd03ddc60ffedbabf9716e769f5bcfe..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/apis/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .inference import (async_inference_detector, inference_detector, - init_detector, show_result_pyplot) -from .test import multi_gpu_test, single_gpu_test -from .train import (get_root_logger, init_random_seed, set_random_seed, - train_detector) - -__all__ = [ - 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', - 'async_inference_detector', 'inference_detector', 'show_result_pyplot', - 'multi_gpu_test', 'single_gpu_test', 'init_random_seed' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/apis/inference.py b/cv/detection/co-detr/pytorch/mmdet/apis/inference.py deleted file mode 100644 index a35f8c0cd72ac3acb6dadf4532ab96b88fda4dd2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/apis/inference.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from pathlib import Path - -import mmcv -import numpy as np -import torch -from mmcv.ops import RoIPool -from mmcv.parallel import collate, scatter -from mmcv.runner import load_checkpoint - -from mmdet.core import get_classes, DatasetEnum -from mmdet.datasets import replace_ImageToTensor -from mmdet.datasets.pipelines import Compose -from mmdet.models import build_detector -from projects import * - - -def init_detector(config, checkpoint=None, dataset=DatasetEnum.COCO, device='cuda:0', cfg_options=None): - """Initialize a detector from config file. - - Args: - config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path, - :obj:`Path`, or the config object. - checkpoint (str, optional): Checkpoint path. If left as None, the model - will not load any weights. - cfg_options (dict): Options to override some settings in the used - config. - - Returns: - nn.Module: The constructed detector. - """ - if isinstance(config, (str, Path)): - config = mmcv.Config.fromfile(config) - elif not isinstance(config, mmcv.Config): - raise TypeError('config must be a filename or Config object, ' - f'but got {type(config)}') - if cfg_options is not None: - config.merge_from_dict(cfg_options) - if 'pretrained' in config.model: - config.model.pretrained = None - elif 'init_cfg' in config.model.backbone: - config.model.backbone.init_cfg = None - config.model.train_cfg = None - model = build_detector(config.model, test_cfg=config.get('test_cfg')) - if checkpoint is not None: - checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') - if 'CLASSES' in checkpoint.get('meta', {}): - model.CLASSES = checkpoint['meta']['CLASSES'] - else: - warnings.simplefilter('once') - warnings.warn(f'Class names are not saved in the checkpoint\'s ' - f'meta data, use {dataset.value} classes.') - model.CLASSES = get_classes(dataset) - model.cfg = config # save the config in the model for convenience - model.to(device) - model.eval() - return model - - -class LoadImage: - """Deprecated. - - A simple pipeline to load image. - """ - - def __call__(self, results): - """Call function to load images into results. - - Args: - results (dict): A result dict contains the file name - of the image to be read. - Returns: - dict: ``results`` will be returned containing loaded image. - """ - warnings.simplefilter('once') - warnings.warn('`LoadImage` is deprecated and will be removed in ' - 'future releases. You may use `LoadImageFromWebcam` ' - 'from `mmdet.datasets.pipelines.` instead.') - if isinstance(results['img'], str): - results['filename'] = results['img'] - results['ori_filename'] = results['img'] - else: - results['filename'] = None - results['ori_filename'] = None - img = mmcv.imread(results['img']) - results['img'] = img - results['img_fields'] = ['img'] - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - return results - - -def inference_detector(model, imgs): - """Inference image(s) with the detector. - - Args: - model (nn.Module): The loaded detector. - imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]): - Either image files or loaded images. - - Returns: - If imgs is a list or tuple, the same length list type results - will be returned, otherwise return the detection results directly. - """ - - if isinstance(imgs, (list, tuple)): - is_batch = True - else: - imgs = [imgs] - is_batch = False - - cfg = model.cfg - device = next(model.parameters()).device # model device - - if isinstance(imgs[0], np.ndarray): - cfg = cfg.copy() - # set loading pipeline type - cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' - - cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) - test_pipeline = Compose(cfg.data.test.pipeline) - - datas = [] - for img in imgs: - # prepare data - if isinstance(img, np.ndarray): - # directly add img - data = dict(img=img) - else: - # add information into dict - data = dict(img_info=dict(filename=img), img_prefix=None) - # build the data pipeline - data = test_pipeline(data) - datas.append(data) - - data = collate(datas, samples_per_gpu=len(imgs)) - # just get the actual data from DataContainer - data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] - data['img'] = [img.data[0] for img in data['img']] - if next(model.parameters()).is_cuda: - # scatter to specified GPU - data = scatter(data, [device])[0] - else: - for m in model.modules(): - assert not isinstance( - m, RoIPool - ), 'CPU inference with RoIPool is not supported currently.' - - # forward the model - with torch.no_grad(): - results = model(return_loss=False, rescale=True, **data) - - if not is_batch: - return results[0] - else: - return results - - -async def async_inference_detector(model, imgs): - """Async inference image(s) with the detector. - - Args: - model (nn.Module): The loaded detector. - img (str | ndarray): Either image files or loaded images. - - Returns: - Awaitable detection results. - """ - if not isinstance(imgs, (list, tuple)): - imgs = [imgs] - - cfg = model.cfg - device = next(model.parameters()).device # model device - - if isinstance(imgs[0], np.ndarray): - cfg = cfg.copy() - # set loading pipeline type - cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' - - cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) - test_pipeline = Compose(cfg.data.test.pipeline) - - datas = [] - for img in imgs: - # prepare data - if isinstance(img, np.ndarray): - # directly add img - data = dict(img=img) - else: - # add information into dict - data = dict(img_info=dict(filename=img), img_prefix=None) - # build the data pipeline - data = test_pipeline(data) - datas.append(data) - - data = collate(datas, samples_per_gpu=len(imgs)) - # just get the actual data from DataContainer - data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] - data['img'] = [img.data[0] for img in data['img']] - if next(model.parameters()).is_cuda: - # scatter to specified GPU - data = scatter(data, [device])[0] - else: - for m in model.modules(): - assert not isinstance( - m, RoIPool - ), 'CPU inference with RoIPool is not supported currently.' - - # We don't restore `torch.is_grad_enabled()` value during concurrent - # inference since execution can overlap - torch.set_grad_enabled(False) - results = await model.aforward_test(rescale=True, **data) - return results - - -def show_result_pyplot(model, - img, - result, - score_thr=0.3, - title='result', - wait_time=0, - palette=None, - out_file=None): - """Visualize the detection results on the image. - - Args: - model (nn.Module): The loaded detector. - img (str or np.ndarray): Image filename or loaded image. - result (tuple[list] or list): The detection result, can be either - (bbox, segm) or just bbox. - score_thr (float): The threshold to visualize the bboxes and masks. - title (str): Title of the pyplot figure. - wait_time (float): Value of waitKey param. Default: 0. - palette (str or tuple(int) or :obj:`Color`): Color. - The tuple of color should be in BGR order. - out_file (str or None): The path to write the image. - Default: None. - """ - if hasattr(model, 'module'): - model = model.module - model.show_result( - img, - result, - score_thr=score_thr, - show=True, - wait_time=wait_time, - win_name=title, - bbox_color=palette, - text_color=(200, 200, 200), - mask_color=palette, - out_file=out_file) diff --git a/cv/detection/co-detr/pytorch/mmdet/apis/test.py b/cv/detection/co-detr/pytorch/mmdet/apis/test.py deleted file mode 100644 index 45fca20292b5dc339f1b0acac2f3d10c15c727f8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/apis/test.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import pickle -import shutil -import tempfile -import time - -import mmcv -import torch -import torch.distributed as dist -from mmcv.image import tensor2imgs -from mmcv.runner import get_dist_info - -from mmdet.core import encode_mask_results -from projects import * - -def single_gpu_test(model, - data_loader, - show=False, - out_dir=None, - show_score_thr=0.3): - model.eval() - results = [] - dataset = data_loader.dataset - PALETTE = getattr(dataset, 'PALETTE', None) - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - - batch_size = len(result) - if show or out_dir: - if batch_size == 1 and isinstance(data['img'][0], torch.Tensor): - img_tensor = data['img'][0] - else: - img_tensor = data['img'][0].data[0] - img_metas = data['img_metas'][0].data[0] - imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) - assert len(imgs) == len(img_metas) - - for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): - h, w, _ = img_meta['img_shape'] - img_show = img[:h, :w, :] - - ori_h, ori_w = img_meta['ori_shape'][:-1] - img_show = mmcv.imresize(img_show, (ori_w, ori_h)) - - if out_dir: - out_file = osp.join(out_dir, img_meta['ori_filename']) - else: - out_file = None - - model.module.show_result( - img_show, - result[i], - bbox_color=PALETTE, - text_color=PALETTE, - mask_color=PALETTE, - show=show, - out_file=out_file, - score_thr=show_score_thr) - - # encode mask results - if isinstance(result[0], tuple): - result = [(bbox_results, encode_mask_results(mask_results)) - for bbox_results, mask_results in result] - # This logic is only used in panoptic segmentation test. - elif isinstance(result[0], dict) and 'ins_results' in result[0]: - for j in range(len(result)): - bbox_results, mask_results = result[j]['ins_results'] - result[j]['ins_results'] = (bbox_results, - encode_mask_results(mask_results)) - - results.extend(result) - - for _ in range(batch_size): - prog_bar.update() - return results - - -def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): - """Test model with multiple gpus. - - This method tests model with multiple gpus and collects the results - under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' - it encodes results to gpu tensors and use gpu communication for results - collection. On cpu mode it saves the results on different gpus to 'tmpdir' - and collects them by the rank 0 worker. - - Args: - model (nn.Module): Model to be tested. - data_loader (nn.Dataloader): Pytorch data loader. - tmpdir (str): Path of directory to save the temporary results from - different gpus under cpu mode. - gpu_collect (bool): Option to use either gpu or cpu to collect results. - - Returns: - list: The prediction results. - """ - model.eval() - results = [] - dataset = data_loader.dataset - rank, world_size = get_dist_info() - if rank == 0: - prog_bar = mmcv.ProgressBar(len(dataset)) - time.sleep(2) # This line can prevent deadlock problem in some cases. - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - # encode mask results - if isinstance(result[0], tuple): - result = [(bbox_results, encode_mask_results(mask_results)) - for bbox_results, mask_results in result] - # This logic is only used in panoptic segmentation test. - elif isinstance(result[0], dict) and 'ins_results' in result[0]: - for j in range(len(result)): - bbox_results, mask_results = result[j]['ins_results'] - result[j]['ins_results'] = ( - bbox_results, encode_mask_results(mask_results)) - - results.extend(result) - - if rank == 0: - batch_size = len(result) - for _ in range(batch_size * world_size): - prog_bar.update() - - # collect results from all ranks - if gpu_collect: - results = collect_results_gpu(results, len(dataset)) - else: - results = collect_results_cpu(results, len(dataset), tmpdir) - return results - - -def collect_results_cpu(result_part, size, tmpdir=None): - rank, world_size = get_dist_info() - # create a tmp dir if it is not specified - if tmpdir is None: - MAX_LEN = 512 - # 32 is whitespace - dir_tensor = torch.full((MAX_LEN, ), - 32, - dtype=torch.uint8, - device='cuda') - if rank == 0: - mmcv.mkdir_or_exist('.dist_test') - tmpdir = tempfile.mkdtemp(dir='.dist_test') - tmpdir = torch.tensor( - bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') - dir_tensor[:len(tmpdir)] = tmpdir - dist.broadcast(dir_tensor, 0) - tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() - else: - mmcv.mkdir_or_exist(tmpdir) - # dump the part result to the dir - mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) - dist.barrier() - # collect all parts - if rank != 0: - return None - else: - # load results of all parts from tmp dir - part_list = [] - for i in range(world_size): - part_file = osp.join(tmpdir, f'part_{i}.pkl') - part_list.append(mmcv.load(part_file)) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - # remove tmp dir - shutil.rmtree(tmpdir) - return ordered_results - - -def collect_results_gpu(result_part, size): - rank, world_size = get_dist_info() - # dump result part to tensor with pickle - part_tensor = torch.tensor( - bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') - # gather all result part tensor shape - shape_tensor = torch.tensor(part_tensor.shape, device='cuda') - shape_list = [shape_tensor.clone() for _ in range(world_size)] - dist.all_gather(shape_list, shape_tensor) - # padding result part tensor to max length - shape_max = torch.tensor(shape_list).max() - part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') - part_send[:shape_tensor[0]] = part_tensor - part_recv_list = [ - part_tensor.new_zeros(shape_max) for _ in range(world_size) - ] - # gather all result part - dist.all_gather(part_recv_list, part_send) - - if rank == 0: - part_list = [] - for recv, shape in zip(part_recv_list, shape_list): - part_list.append( - pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - return ordered_results diff --git a/cv/detection/co-detr/pytorch/mmdet/apis/train.py b/cv/detection/co-detr/pytorch/mmdet/apis/train.py deleted file mode 100644 index 0c9ecd0a7f8b309e9304aa9f598c8d1194b502b3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/apis/train.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import random - -import numpy as np -import torch -import torch.distributed as dist -from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, - Fp16OptimizerHook, OptimizerHook, build_runner, - get_dist_info) - -from mmdet.core import DistEvalHook, EvalHook, build_optimizer -from mmdet.datasets import (build_dataloader, build_dataset, - replace_ImageToTensor) -from mmdet.utils import (build_ddp, build_dp, compat_cfg, - find_latest_checkpoint, get_root_logger) -from projects import * - - -def init_random_seed(seed=None, device='cuda'): - """Initialize random seed. - - If the seed is not set, the seed will be automatically randomized, - and then broadcast to all processes to prevent some potential bugs. - - Args: - seed (int, Optional): The seed. Default to None. - device (str): The device where the seed will be put on. - Default to 'cuda'. - - Returns: - int: Seed to be used. - """ - if seed is not None: - return seed - - # Make sure all ranks share the same random seed to prevent - # some potential bugs. Please refer to - # https://github.com/open-mmlab/mmdetection/issues/6339 - rank, world_size = get_dist_info() - seed = np.random.randint(2**31) - if world_size == 1: - return seed - - if rank == 0: - random_num = torch.tensor(seed, dtype=torch.int32, device=device) - else: - random_num = torch.tensor(0, dtype=torch.int32, device=device) - dist.broadcast(random_num, src=0) - return random_num.item() - - -def set_random_seed(seed, deterministic=False): - """Set random seed. - - Args: - seed (int): Seed to be used. - deterministic (bool): Whether to set the deterministic option for - CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` - to True and `torch.backends.cudnn.benchmark` to False. - Default: False. - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - if deterministic: - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def auto_scale_lr(cfg, distributed, logger): - """Automatically scaling LR according to GPU number and sample per GPU. - - Args: - cfg (config): Training config. - distributed (bool): Using distributed or not. - logger (logging.Logger): Logger. - """ - # Get flag from config - if ('auto_scale_lr' not in cfg) or \ - (not cfg.auto_scale_lr.get('enable', False)): - logger.info('Automatic scaling of learning rate (LR)' - ' has been disabled.') - return - - # Get base batch size from config - base_batch_size = cfg.auto_scale_lr.get('base_batch_size', None) - if base_batch_size is None: - return - - # Get gpu number - if distributed: - _, world_size = get_dist_info() - num_gpus = len(range(world_size)) - else: - num_gpus = len(cfg.gpu_ids) - - # calculate the batch size - samples_per_gpu = cfg.data.train_dataloader.samples_per_gpu - batch_size = num_gpus * samples_per_gpu - logger.info(f'Training with {num_gpus} GPU(s) with {samples_per_gpu} ' - f'samples per GPU. The total batch size is {batch_size}.') - - if batch_size != base_batch_size: - # scale LR with - # [linear scaling rule](https://arxiv.org/abs/1706.02677) - scaled_lr = (batch_size / base_batch_size) * cfg.optimizer.lr - logger.info('LR has been automatically scaled ' - f'from {cfg.optimizer.lr} to {scaled_lr}') - cfg.optimizer.lr = scaled_lr - else: - logger.info('The batch size match the ' - f'base batch size: {base_batch_size}, ' - f'will not scaling the LR ({cfg.optimizer.lr}).') - - -def train_detector(model, - dataset, - cfg, - distributed=False, - validate=False, - timestamp=None, - meta=None): - - cfg = compat_cfg(cfg) - logger = get_root_logger(log_level=cfg.log_level) - - # prepare data loaders - dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] - - runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[ - 'type'] - - train_dataloader_default_args = dict( - samples_per_gpu=2, - workers_per_gpu=2, - # `num_gpus` will be ignored if distributed - num_gpus=len(cfg.gpu_ids), - dist=distributed, - seed=cfg.seed, - runner_type=runner_type, - persistent_workers=False) - - train_loader_cfg = { - **train_dataloader_default_args, - **cfg.data.get('train_dataloader', {}) - } - - data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] - - # put model on gpus - if distributed: - find_unused_parameters = cfg.get('find_unused_parameters', False) - # Sets the `find_unused_parameters` parameter in - # torch.nn.parallel.DistributedDataParallel - model = build_ddp( - model, - cfg.device, - device_ids=[int(os.environ['LOCAL_RANK'])], - broadcast_buffers=False, - find_unused_parameters=find_unused_parameters) - else: - model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) - - # build optimizer - auto_scale_lr(cfg, distributed, logger) - optimizer = build_optimizer(model, cfg.optimizer) - - runner = build_runner( - cfg.runner, - default_args=dict( - model=model, - optimizer=optimizer, - work_dir=cfg.work_dir, - logger=logger, - meta=meta)) - - # an ugly workaround to make .log and .log.json filenames the same - runner.timestamp = timestamp - - # fp16 setting - fp16_cfg = cfg.get('fp16', None) - if fp16_cfg is not None: - optimizer_config = Fp16OptimizerHook( - **cfg.optimizer_config, **fp16_cfg, distributed=distributed) - elif distributed and 'type' not in cfg.optimizer_config: - optimizer_config = OptimizerHook(**cfg.optimizer_config) - else: - optimizer_config = cfg.optimizer_config - - # register hooks - runner.register_training_hooks( - cfg.lr_config, - optimizer_config, - cfg.checkpoint_config, - cfg.log_config, - cfg.get('momentum_config', None), - custom_hooks_config=cfg.get('custom_hooks', None)) - - if distributed: - if isinstance(runner, EpochBasedRunner): - runner.register_hook(DistSamplerSeedHook()) - - # register eval hooks - if validate: - val_dataloader_default_args = dict( - samples_per_gpu=1, - workers_per_gpu=2, - dist=distributed, - shuffle=False, - persistent_workers=False) - - val_dataloader_args = { - **val_dataloader_default_args, - **cfg.data.get('val_dataloader', {}) - } - # Support batch_size > 1 in validation - - if val_dataloader_args['samples_per_gpu'] > 1: - # Replace 'ImageToTensor' to 'DefaultFormatBundle' - cfg.data.val.pipeline = replace_ImageToTensor( - cfg.data.val.pipeline) - val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) - - val_dataloader = build_dataloader(val_dataset, **val_dataloader_args) - eval_cfg = cfg.get('evaluation', {}) - eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' - eval_hook = DistEvalHook if distributed else EvalHook - # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the - # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'. - runner.register_hook( - eval_hook(val_dataloader, **eval_cfg), priority='LOW') - - resume_from = None - if cfg.resume_from is None and cfg.get('auto_resume'): - resume_from = find_latest_checkpoint(cfg.work_dir) - if resume_from is not None: - cfg.resume_from = resume_from - - if cfg.resume_from: - runner.resume(cfg.resume_from) - elif cfg.load_from: - runner.load_checkpoint(cfg.load_from) - runner.run(data_loaders, cfg.workflow) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/__init__.py deleted file mode 100644 index 2a6203879840c80c7f89b348f02e4d45b33e5de4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .anchor import * # noqa: F401, F403 -from .bbox import * # noqa: F401, F403 -from .data_structures import * # noqa: F401, F403 -from .evaluation import * # noqa: F401, F403 -from .hook import * # noqa: F401, F403 -from .mask import * # noqa: F401, F403 -from .optimizers import * # noqa: F401, F403 -from .post_processing import * # noqa: F401, F403 -from .utils import * # noqa: F401, F403 diff --git a/cv/detection/co-detr/pytorch/mmdet/core/anchor/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/anchor/__init__.py deleted file mode 100644 index fcc7e4af36fd12a7c9de6ffe07f77aafad5731ba..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/anchor/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, - YOLOAnchorGenerator) -from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS, - build_anchor_generator, build_prior_generator) -from .point_generator import MlvlPointGenerator, PointGenerator -from .utils import anchor_inside_flags, calc_region, images_to_levels - -__all__ = [ - 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', - 'PointGenerator', 'images_to_levels', 'calc_region', - 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator', - 'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/anchor/anchor_generator.py b/cv/detection/co-detr/pytorch/mmdet/core/anchor/anchor_generator.py deleted file mode 100644 index 20886fbda65dbf0737565ec6dba59e9fc7bb73ff..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/anchor/anchor_generator.py +++ /dev/null @@ -1,866 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import mmcv -import numpy as np -import torch -from torch.nn.modules.utils import _pair - -from .builder import PRIOR_GENERATORS - - -@PRIOR_GENERATORS.register_module() -class AnchorGenerator: - """Standard anchor generator for 2D anchor-based detectors. - - Args: - strides (list[int] | list[tuple[int, int]]): Strides of anchors - in multiple feature levels in order (w, h). - ratios (list[float]): The list of ratios between the height and width - of anchors in a single level. - scales (list[int] | None): Anchor scales for anchors in a single level. - It cannot be set at the same time if `octave_base_scale` and - `scales_per_octave` are set. - base_sizes (list[int] | None): The basic sizes - of anchors in multiple levels. - If None is given, strides will be used as base_sizes. - (If strides are non square, the shortest stride is taken.) - scale_major (bool): Whether to multiply scales first when generating - base anchors. If true, the anchors in the same row will have the - same scales. By default it is True in V2.0 - octave_base_scale (int): The base scale of octave. - scales_per_octave (int): Number of scales for each octave. - `octave_base_scale` and `scales_per_octave` are usually used in - retinanet and the `scales` should be None when they are set. - centers (list[tuple[float, float]] | None): The centers of the anchor - relative to the feature grid center in multiple feature levels. - By default it is set to be None and not used. If a list of tuple of - float is given, they will be used to shift the centers of anchors. - center_offset (float): The offset of center in proportion to anchors' - width and height. By default it is 0 in V2.0. - - Examples: - >>> from mmdet.core import AnchorGenerator - >>> self = AnchorGenerator([16], [1.], [1.], [9]) - >>> all_anchors = self.grid_priors([(2, 2)], device='cpu') - >>> print(all_anchors) - [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], - [11.5000, -4.5000, 20.5000, 4.5000], - [-4.5000, 11.5000, 4.5000, 20.5000], - [11.5000, 11.5000, 20.5000, 20.5000]])] - >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) - >>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu') - >>> print(all_anchors) - [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], - [11.5000, -4.5000, 20.5000, 4.5000], - [-4.5000, 11.5000, 4.5000, 20.5000], - [11.5000, 11.5000, 20.5000, 20.5000]]), \ - tensor([[-9., -9., 9., 9.]])] - """ - - def __init__(self, - strides, - ratios, - scales=None, - base_sizes=None, - scale_major=True, - octave_base_scale=None, - scales_per_octave=None, - centers=None, - center_offset=0.): - # check center and center_offset - if center_offset != 0: - assert centers is None, 'center cannot be set when center_offset' \ - f'!=0, {centers} is given.' - if not (0 <= center_offset <= 1): - raise ValueError('center_offset should be in range [0, 1], ' - f'{center_offset} is given.') - if centers is not None: - assert len(centers) == len(strides), \ - 'The number of strides should be the same as centers, got ' \ - f'{strides} and {centers}' - - # calculate base sizes of anchors - self.strides = [_pair(stride) for stride in strides] - self.base_sizes = [min(stride) for stride in self.strides - ] if base_sizes is None else base_sizes - assert len(self.base_sizes) == len(self.strides), \ - 'The number of strides should be the same as base sizes, got ' \ - f'{self.strides} and {self.base_sizes}' - - # calculate scales of anchors - assert ((octave_base_scale is not None - and scales_per_octave is not None) ^ (scales is not None)), \ - 'scales and octave_base_scale with scales_per_octave cannot' \ - ' be set at the same time' - if scales is not None: - self.scales = torch.Tensor(scales) - elif octave_base_scale is not None and scales_per_octave is not None: - octave_scales = np.array( - [2**(i / scales_per_octave) for i in range(scales_per_octave)]) - scales = octave_scales * octave_base_scale - self.scales = torch.Tensor(scales) - else: - raise ValueError('Either scales or octave_base_scale with ' - 'scales_per_octave should be set') - - self.octave_base_scale = octave_base_scale - self.scales_per_octave = scales_per_octave - self.ratios = torch.Tensor(ratios) - self.scale_major = scale_major - self.centers = centers - self.center_offset = center_offset - self.base_anchors = self.gen_base_anchors() - - @property - def num_base_anchors(self): - """list[int]: total number of base anchors in a feature grid""" - return self.num_base_priors - - @property - def num_base_priors(self): - """list[int]: The number of priors (anchors) at a point - on the feature grid""" - return [base_anchors.size(0) for base_anchors in self.base_anchors] - - @property - def num_levels(self): - """int: number of feature levels that the generator will be applied""" - return len(self.strides) - - def gen_base_anchors(self): - """Generate base anchors. - - Returns: - list(torch.Tensor): Base anchors of a feature grid in multiple \ - feature levels. - """ - multi_level_base_anchors = [] - for i, base_size in enumerate(self.base_sizes): - center = None - if self.centers is not None: - center = self.centers[i] - multi_level_base_anchors.append( - self.gen_single_level_base_anchors( - base_size, - scales=self.scales, - ratios=self.ratios, - center=center)) - return multi_level_base_anchors - - def gen_single_level_base_anchors(self, - base_size, - scales, - ratios, - center=None): - """Generate base anchors of a single level. - - Args: - base_size (int | float): Basic size of an anchor. - scales (torch.Tensor): Scales of the anchor. - ratios (torch.Tensor): The ratio between between the height - and width of anchors in a single level. - center (tuple[float], optional): The center of the base anchor - related to a single feature grid. Defaults to None. - - Returns: - torch.Tensor: Anchors in a single-level feature maps. - """ - w = base_size - h = base_size - if center is None: - x_center = self.center_offset * w - y_center = self.center_offset * h - else: - x_center, y_center = center - - h_ratios = torch.sqrt(ratios) - w_ratios = 1 / h_ratios - if self.scale_major: - ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) - hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) - else: - ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) - hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) - - # use float anchor and the anchor's center is aligned with the - # pixel center - base_anchors = [ - x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, - y_center + 0.5 * hs - ] - base_anchors = torch.stack(base_anchors, dim=-1) - - return base_anchors - - def _meshgrid(self, x, y, row_major=True): - """Generate mesh grid of x and y. - - Args: - x (torch.Tensor): Grids of x dimension. - y (torch.Tensor): Grids of y dimension. - row_major (bool, optional): Whether to return y grids first. - Defaults to True. - - Returns: - tuple[torch.Tensor]: The mesh grids of x and y. - """ - # use shape instead of len to keep tracing while exporting to onnx - xx = x.repeat(y.shape[0]) - yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) - if row_major: - return xx, yy - else: - return yy, xx - - def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'): - """Generate grid anchors in multiple feature levels. - - Args: - featmap_sizes (list[tuple]): List of feature map sizes in - multiple feature levels. - dtype (:obj:`torch.dtype`): Dtype of priors. - Default: torch.float32. - device (str): The device where the anchors will be put on. - - Return: - list[torch.Tensor]: Anchors in multiple feature levels. \ - The sizes of each tensor should be [N, 4], where \ - N = width * height * num_base_anchors, width and height \ - are the sizes of the corresponding feature level, \ - num_base_anchors is the number of anchors for that level. - """ - assert self.num_levels == len(featmap_sizes) - multi_level_anchors = [] - for i in range(self.num_levels): - anchors = self.single_level_grid_priors( - featmap_sizes[i], level_idx=i, dtype=dtype, device=device) - multi_level_anchors.append(anchors) - return multi_level_anchors - - def single_level_grid_priors(self, - featmap_size, - level_idx, - dtype=torch.float32, - device='cuda'): - """Generate grid anchors of a single level. - - Note: - This function is usually called by method ``self.grid_priors``. - - Args: - featmap_size (tuple[int]): Size of the feature maps. - level_idx (int): The index of corresponding feature map level. - dtype (obj:`torch.dtype`): Date type of points.Defaults to - ``torch.float32``. - device (str, optional): The device the tensor will be put on. - Defaults to 'cuda'. - - Returns: - torch.Tensor: Anchors in the overall feature maps. - """ - - base_anchors = self.base_anchors[level_idx].to(device).to(dtype) - feat_h, feat_w = featmap_size - stride_w, stride_h = self.strides[level_idx] - # First create Range with the default dtype, than convert to - # target `dtype` for onnx exporting. - shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w - shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h - - shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) - shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) - # first feat_w elements correspond to the first row of shifts - # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get - # shifted anchors (K, A, 4), reshape to (K*A, 4) - - all_anchors = base_anchors[None, :, :] + shifts[:, None, :] - all_anchors = all_anchors.view(-1, 4) - # first A rows correspond to A anchors of (0, 0) in feature map, - # then (0, 1), (0, 2), ... - return all_anchors - - def sparse_priors(self, - prior_idxs, - featmap_size, - level_idx, - dtype=torch.float32, - device='cuda'): - """Generate sparse anchors according to the ``prior_idxs``. - - Args: - prior_idxs (Tensor): The index of corresponding anchors - in the feature map. - featmap_size (tuple[int]): feature map size arrange as (h, w). - level_idx (int): The level index of corresponding feature - map. - dtype (obj:`torch.dtype`): Date type of points.Defaults to - ``torch.float32``. - device (obj:`torch.device`): The device where the points is - located. - Returns: - Tensor: Anchor with shape (N, 4), N should be equal to - the length of ``prior_idxs``. - """ - - height, width = featmap_size - num_base_anchors = self.num_base_anchors[level_idx] - base_anchor_id = prior_idxs % num_base_anchors - x = (prior_idxs // - num_base_anchors) % width * self.strides[level_idx][0] - y = (prior_idxs // width // - num_base_anchors) % height * self.strides[level_idx][1] - priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \ - self.base_anchors[level_idx][base_anchor_id, :].to(device) - - return priors - - def grid_anchors(self, featmap_sizes, device='cuda'): - """Generate grid anchors in multiple feature levels. - - Args: - featmap_sizes (list[tuple]): List of feature map sizes in - multiple feature levels. - device (str): Device where the anchors will be put on. - - Return: - list[torch.Tensor]: Anchors in multiple feature levels. \ - The sizes of each tensor should be [N, 4], where \ - N = width * height * num_base_anchors, width and height \ - are the sizes of the corresponding feature level, \ - num_base_anchors is the number of anchors for that level. - """ - warnings.warn('``grid_anchors`` would be deprecated soon. ' - 'Please use ``grid_priors`` ') - - assert self.num_levels == len(featmap_sizes) - multi_level_anchors = [] - for i in range(self.num_levels): - anchors = self.single_level_grid_anchors( - self.base_anchors[i].to(device), - featmap_sizes[i], - self.strides[i], - device=device) - multi_level_anchors.append(anchors) - return multi_level_anchors - - def single_level_grid_anchors(self, - base_anchors, - featmap_size, - stride=(16, 16), - device='cuda'): - """Generate grid anchors of a single level. - - Note: - This function is usually called by method ``self.grid_anchors``. - - Args: - base_anchors (torch.Tensor): The base anchors of a feature grid. - featmap_size (tuple[int]): Size of the feature maps. - stride (tuple[int], optional): Stride of the feature map in order - (w, h). Defaults to (16, 16). - device (str, optional): Device the tensor will be put on. - Defaults to 'cuda'. - - Returns: - torch.Tensor: Anchors in the overall feature maps. - """ - - warnings.warn( - '``single_level_grid_anchors`` would be deprecated soon. ' - 'Please use ``single_level_grid_priors`` ') - - # keep featmap_size as Tensor instead of int, so that we - # can convert to ONNX correctly - feat_h, feat_w = featmap_size - shift_x = torch.arange(0, feat_w, device=device) * stride[0] - shift_y = torch.arange(0, feat_h, device=device) * stride[1] - - shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) - shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) - shifts = shifts.type_as(base_anchors) - # first feat_w elements correspond to the first row of shifts - # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get - # shifted anchors (K, A, 4), reshape to (K*A, 4) - - all_anchors = base_anchors[None, :, :] + shifts[:, None, :] - all_anchors = all_anchors.view(-1, 4) - # first A rows correspond to A anchors of (0, 0) in feature map, - # then (0, 1), (0, 2), ... - return all_anchors - - def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): - """Generate valid flags of anchors in multiple feature levels. - - Args: - featmap_sizes (list(tuple)): List of feature map sizes in - multiple feature levels. - pad_shape (tuple): The padded shape of the image. - device (str): Device where the anchors will be put on. - - Return: - list(torch.Tensor): Valid flags of anchors in multiple levels. - """ - assert self.num_levels == len(featmap_sizes) - multi_level_flags = [] - for i in range(self.num_levels): - anchor_stride = self.strides[i] - feat_h, feat_w = featmap_sizes[i] - h, w = pad_shape[:2] - valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h) - valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w) - flags = self.single_level_valid_flags((feat_h, feat_w), - (valid_feat_h, valid_feat_w), - self.num_base_anchors[i], - device=device) - multi_level_flags.append(flags) - return multi_level_flags - - def single_level_valid_flags(self, - featmap_size, - valid_size, - num_base_anchors, - device='cuda'): - """Generate the valid flags of anchor in a single feature map. - - Args: - featmap_size (tuple[int]): The size of feature maps, arrange - as (h, w). - valid_size (tuple[int]): The valid size of the feature maps. - num_base_anchors (int): The number of base anchors. - device (str, optional): Device where the flags will be put on. - Defaults to 'cuda'. - - Returns: - torch.Tensor: The valid flags of each anchor in a single level \ - feature map. - """ - feat_h, feat_w = featmap_size - valid_h, valid_w = valid_size - assert valid_h <= feat_h and valid_w <= feat_w - valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) - valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) - valid_x[:valid_w] = 1 - valid_y[:valid_h] = 1 - valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) - valid = valid_xx & valid_yy - valid = valid[:, None].expand(valid.size(0), - num_base_anchors).contiguous().view(-1) - return valid - - def __repr__(self): - """str: a string that describes the module""" - indent_str = ' ' - repr_str = self.__class__.__name__ + '(\n' - repr_str += f'{indent_str}strides={self.strides},\n' - repr_str += f'{indent_str}ratios={self.ratios},\n' - repr_str += f'{indent_str}scales={self.scales},\n' - repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' - repr_str += f'{indent_str}scale_major={self.scale_major},\n' - repr_str += f'{indent_str}octave_base_scale=' - repr_str += f'{self.octave_base_scale},\n' - repr_str += f'{indent_str}scales_per_octave=' - repr_str += f'{self.scales_per_octave},\n' - repr_str += f'{indent_str}num_levels={self.num_levels}\n' - repr_str += f'{indent_str}centers={self.centers},\n' - repr_str += f'{indent_str}center_offset={self.center_offset})' - return repr_str - - -@PRIOR_GENERATORS.register_module() -class SSDAnchorGenerator(AnchorGenerator): - """Anchor generator for SSD. - - Args: - strides (list[int] | list[tuple[int, int]]): Strides of anchors - in multiple feature levels. - ratios (list[float]): The list of ratios between the height and width - of anchors in a single level. - min_sizes (list[float]): The list of minimum anchor sizes on each - level. - max_sizes (list[float]): The list of maximum anchor sizes on each - level. - basesize_ratio_range (tuple(float)): Ratio range of anchors. Being - used when not setting min_sizes and max_sizes. - input_size (int): Size of feature map, 300 for SSD300, 512 for - SSD512. Being used when not setting min_sizes and max_sizes. - scale_major (bool): Whether to multiply scales first when generating - base anchors. If true, the anchors in the same row will have the - same scales. It is always set to be False in SSD. - """ - - def __init__(self, - strides, - ratios, - min_sizes=None, - max_sizes=None, - basesize_ratio_range=(0.15, 0.9), - input_size=300, - scale_major=True): - assert len(strides) == len(ratios) - assert not (min_sizes is None) ^ (max_sizes is None) - self.strides = [_pair(stride) for stride in strides] - self.centers = [(stride[0] / 2., stride[1] / 2.) - for stride in self.strides] - - if min_sizes is None and max_sizes is None: - # use hard code to generate SSD anchors - self.input_size = input_size - assert mmcv.is_tuple_of(basesize_ratio_range, float) - self.basesize_ratio_range = basesize_ratio_range - # calculate anchor ratios and sizes - min_ratio, max_ratio = basesize_ratio_range - min_ratio = int(min_ratio * 100) - max_ratio = int(max_ratio * 100) - step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) - min_sizes = [] - max_sizes = [] - for ratio in range(int(min_ratio), int(max_ratio) + 1, step): - min_sizes.append(int(self.input_size * ratio / 100)) - max_sizes.append(int(self.input_size * (ratio + step) / 100)) - if self.input_size == 300: - if basesize_ratio_range[0] == 0.15: # SSD300 COCO - min_sizes.insert(0, int(self.input_size * 7 / 100)) - max_sizes.insert(0, int(self.input_size * 15 / 100)) - elif basesize_ratio_range[0] == 0.2: # SSD300 VOC - min_sizes.insert(0, int(self.input_size * 10 / 100)) - max_sizes.insert(0, int(self.input_size * 20 / 100)) - else: - raise ValueError( - 'basesize_ratio_range[0] should be either 0.15' - 'or 0.2 when input_size is 300, got ' - f'{basesize_ratio_range[0]}.') - elif self.input_size == 512: - if basesize_ratio_range[0] == 0.1: # SSD512 COCO - min_sizes.insert(0, int(self.input_size * 4 / 100)) - max_sizes.insert(0, int(self.input_size * 10 / 100)) - elif basesize_ratio_range[0] == 0.15: # SSD512 VOC - min_sizes.insert(0, int(self.input_size * 7 / 100)) - max_sizes.insert(0, int(self.input_size * 15 / 100)) - else: - raise ValueError( - 'When not setting min_sizes and max_sizes,' - 'basesize_ratio_range[0] should be either 0.1' - 'or 0.15 when input_size is 512, got' - f' {basesize_ratio_range[0]}.') - else: - raise ValueError( - 'Only support 300 or 512 in SSDAnchorGenerator when ' - 'not setting min_sizes and max_sizes, ' - f'got {self.input_size}.') - - assert len(min_sizes) == len(max_sizes) == len(strides) - - anchor_ratios = [] - anchor_scales = [] - for k in range(len(self.strides)): - scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] - anchor_ratio = [1.] - for r in ratios[k]: - anchor_ratio += [1 / r, r] # 4 or 6 ratio - anchor_ratios.append(torch.Tensor(anchor_ratio)) - anchor_scales.append(torch.Tensor(scales)) - - self.base_sizes = min_sizes - self.scales = anchor_scales - self.ratios = anchor_ratios - self.scale_major = scale_major - self.center_offset = 0 - self.base_anchors = self.gen_base_anchors() - - def gen_base_anchors(self): - """Generate base anchors. - - Returns: - list(torch.Tensor): Base anchors of a feature grid in multiple \ - feature levels. - """ - multi_level_base_anchors = [] - for i, base_size in enumerate(self.base_sizes): - base_anchors = self.gen_single_level_base_anchors( - base_size, - scales=self.scales[i], - ratios=self.ratios[i], - center=self.centers[i]) - indices = list(range(len(self.ratios[i]))) - indices.insert(1, len(indices)) - base_anchors = torch.index_select(base_anchors, 0, - torch.LongTensor(indices)) - multi_level_base_anchors.append(base_anchors) - return multi_level_base_anchors - - def __repr__(self): - """str: a string that describes the module""" - indent_str = ' ' - repr_str = self.__class__.__name__ + '(\n' - repr_str += f'{indent_str}strides={self.strides},\n' - repr_str += f'{indent_str}scales={self.scales},\n' - repr_str += f'{indent_str}scale_major={self.scale_major},\n' - repr_str += f'{indent_str}input_size={self.input_size},\n' - repr_str += f'{indent_str}scales={self.scales},\n' - repr_str += f'{indent_str}ratios={self.ratios},\n' - repr_str += f'{indent_str}num_levels={self.num_levels},\n' - repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' - repr_str += f'{indent_str}basesize_ratio_range=' - repr_str += f'{self.basesize_ratio_range})' - return repr_str - - -@PRIOR_GENERATORS.register_module() -class LegacyAnchorGenerator(AnchorGenerator): - """Legacy anchor generator used in MMDetection V1.x. - - Note: - Difference to the V2.0 anchor generator: - - 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. - 2. The width/height are minused by 1 when calculating the anchors' \ - centers and corners to meet the V1.x coordinate system. - 3. The anchors' corners are quantized. - - Args: - strides (list[int] | list[tuple[int]]): Strides of anchors - in multiple feature levels. - ratios (list[float]): The list of ratios between the height and width - of anchors in a single level. - scales (list[int] | None): Anchor scales for anchors in a single level. - It cannot be set at the same time if `octave_base_scale` and - `scales_per_octave` are set. - base_sizes (list[int]): The basic sizes of anchors in multiple levels. - If None is given, strides will be used to generate base_sizes. - scale_major (bool): Whether to multiply scales first when generating - base anchors. If true, the anchors in the same row will have the - same scales. By default it is True in V2.0 - octave_base_scale (int): The base scale of octave. - scales_per_octave (int): Number of scales for each octave. - `octave_base_scale` and `scales_per_octave` are usually used in - retinanet and the `scales` should be None when they are set. - centers (list[tuple[float, float]] | None): The centers of the anchor - relative to the feature grid center in multiple feature levels. - By default it is set to be None and not used. It a list of float - is given, this list will be used to shift the centers of anchors. - center_offset (float): The offset of center in proportion to anchors' - width and height. By default it is 0.5 in V2.0 but it should be 0.5 - in v1.x models. - - Examples: - >>> from mmdet.core import LegacyAnchorGenerator - >>> self = LegacyAnchorGenerator( - >>> [16], [1.], [1.], [9], center_offset=0.5) - >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') - >>> print(all_anchors) - [tensor([[ 0., 0., 8., 8.], - [16., 0., 24., 8.], - [ 0., 16., 8., 24.], - [16., 16., 24., 24.]])] - """ - - def gen_single_level_base_anchors(self, - base_size, - scales, - ratios, - center=None): - """Generate base anchors of a single level. - - Note: - The width/height of anchors are minused by 1 when calculating \ - the centers and corners to meet the V1.x coordinate system. - - Args: - base_size (int | float): Basic size of an anchor. - scales (torch.Tensor): Scales of the anchor. - ratios (torch.Tensor): The ratio between between the height. - and width of anchors in a single level. - center (tuple[float], optional): The center of the base anchor - related to a single feature grid. Defaults to None. - - Returns: - torch.Tensor: Anchors in a single-level feature map. - """ - w = base_size - h = base_size - if center is None: - x_center = self.center_offset * (w - 1) - y_center = self.center_offset * (h - 1) - else: - x_center, y_center = center - - h_ratios = torch.sqrt(ratios) - w_ratios = 1 / h_ratios - if self.scale_major: - ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) - hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) - else: - ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) - hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) - - # use float anchor and the anchor's center is aligned with the - # pixel center - base_anchors = [ - x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), - x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) - ] - base_anchors = torch.stack(base_anchors, dim=-1).round() - - return base_anchors - - -@PRIOR_GENERATORS.register_module() -class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): - """Legacy anchor generator used in MMDetection V1.x. - - The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` - can be found in `LegacyAnchorGenerator`. - """ - - def __init__(self, - strides, - ratios, - basesize_ratio_range, - input_size=300, - scale_major=True): - super(LegacySSDAnchorGenerator, self).__init__( - strides=strides, - ratios=ratios, - basesize_ratio_range=basesize_ratio_range, - input_size=input_size, - scale_major=scale_major) - self.centers = [((stride - 1) / 2., (stride - 1) / 2.) - for stride in strides] - self.base_anchors = self.gen_base_anchors() - - -@PRIOR_GENERATORS.register_module() -class YOLOAnchorGenerator(AnchorGenerator): - """Anchor generator for YOLO. - - Args: - strides (list[int] | list[tuple[int, int]]): Strides of anchors - in multiple feature levels. - base_sizes (list[list[tuple[int, int]]]): The basic sizes - of anchors in multiple levels. - """ - - def __init__(self, strides, base_sizes): - self.strides = [_pair(stride) for stride in strides] - self.centers = [(stride[0] / 2., stride[1] / 2.) - for stride in self.strides] - self.base_sizes = [] - num_anchor_per_level = len(base_sizes[0]) - for base_sizes_per_level in base_sizes: - assert num_anchor_per_level == len(base_sizes_per_level) - self.base_sizes.append( - [_pair(base_size) for base_size in base_sizes_per_level]) - self.base_anchors = self.gen_base_anchors() - - @property - def num_levels(self): - """int: number of feature levels that the generator will be applied""" - return len(self.base_sizes) - - def gen_base_anchors(self): - """Generate base anchors. - - Returns: - list(torch.Tensor): Base anchors of a feature grid in multiple \ - feature levels. - """ - multi_level_base_anchors = [] - for i, base_sizes_per_level in enumerate(self.base_sizes): - center = None - if self.centers is not None: - center = self.centers[i] - multi_level_base_anchors.append( - self.gen_single_level_base_anchors(base_sizes_per_level, - center)) - return multi_level_base_anchors - - def gen_single_level_base_anchors(self, base_sizes_per_level, center=None): - """Generate base anchors of a single level. - - Args: - base_sizes_per_level (list[tuple[int, int]]): Basic sizes of - anchors. - center (tuple[float], optional): The center of the base anchor - related to a single feature grid. Defaults to None. - - Returns: - torch.Tensor: Anchors in a single-level feature maps. - """ - x_center, y_center = center - base_anchors = [] - for base_size in base_sizes_per_level: - w, h = base_size - - # use float anchor and the anchor's center is aligned with the - # pixel center - base_anchor = torch.Tensor([ - x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, - y_center + 0.5 * h - ]) - base_anchors.append(base_anchor) - base_anchors = torch.stack(base_anchors, dim=0) - - return base_anchors - - def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'): - """Generate responsible anchor flags of grid cells in multiple scales. - - Args: - featmap_sizes (list(tuple)): List of feature map sizes in multiple - feature levels. - gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). - device (str): Device where the anchors will be put on. - - Return: - list(torch.Tensor): responsible flags of anchors in multiple level - """ - assert self.num_levels == len(featmap_sizes) - multi_level_responsible_flags = [] - for i in range(self.num_levels): - anchor_stride = self.strides[i] - flags = self.single_level_responsible_flags( - featmap_sizes[i], - gt_bboxes, - anchor_stride, - self.num_base_anchors[i], - device=device) - multi_level_responsible_flags.append(flags) - return multi_level_responsible_flags - - def single_level_responsible_flags(self, - featmap_size, - gt_bboxes, - stride, - num_base_anchors, - device='cuda'): - """Generate the responsible flags of anchor in a single feature map. - - Args: - featmap_size (tuple[int]): The size of feature maps. - gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). - stride (tuple(int)): stride of current level - num_base_anchors (int): The number of base anchors. - device (str, optional): Device where the flags will be put on. - Defaults to 'cuda'. - - Returns: - torch.Tensor: The valid flags of each anchor in a single level \ - feature map. - """ - feat_h, feat_w = featmap_size - gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) - gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) - gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long() - gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long() - - # row major indexing - gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x - - responsible_grid = torch.zeros( - feat_h * feat_w, dtype=torch.uint8, device=device) - responsible_grid[gt_bboxes_grid_idx] = 1 - - responsible_grid = responsible_grid[:, None].expand( - responsible_grid.size(0), num_base_anchors).contiguous().view(-1) - return responsible_grid diff --git a/cv/detection/co-detr/pytorch/mmdet/core/anchor/builder.py b/cv/detection/co-detr/pytorch/mmdet/core/anchor/builder.py deleted file mode 100644 index ddb25ad37937bcf227832e37469a0e31cae77826..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/anchor/builder.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -from mmcv.utils import Registry, build_from_cfg - -PRIOR_GENERATORS = Registry('Generator for anchors and points') - -ANCHOR_GENERATORS = PRIOR_GENERATORS - - -def build_prior_generator(cfg, default_args=None): - return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) - - -def build_anchor_generator(cfg, default_args=None): - warnings.warn( - '``build_anchor_generator`` would be deprecated soon, please use ' - '``build_prior_generator`` ') - return build_prior_generator(cfg, default_args=default_args) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/anchor/point_generator.py b/cv/detection/co-detr/pytorch/mmdet/core/anchor/point_generator.py deleted file mode 100644 index cc9c3887dd7c1d3afe30b705f16162d1d03c9b5d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/anchor/point_generator.py +++ /dev/null @@ -1,263 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -from torch.nn.modules.utils import _pair - -from .builder import PRIOR_GENERATORS - - -@PRIOR_GENERATORS.register_module() -class PointGenerator: - - def _meshgrid(self, x, y, row_major=True): - xx = x.repeat(len(y)) - yy = y.view(-1, 1).repeat(1, len(x)).view(-1) - if row_major: - return xx, yy - else: - return yy, xx - - def grid_points(self, featmap_size, stride=16, device='cuda'): - feat_h, feat_w = featmap_size - shift_x = torch.arange(0., feat_w, device=device) * stride - shift_y = torch.arange(0., feat_h, device=device) * stride - shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) - stride = shift_x.new_full((shift_xx.shape[0], ), stride) - shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) - all_points = shifts.to(device) - return all_points - - def valid_flags(self, featmap_size, valid_size, device='cuda'): - feat_h, feat_w = featmap_size - valid_h, valid_w = valid_size - assert valid_h <= feat_h and valid_w <= feat_w - valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) - valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) - valid_x[:valid_w] = 1 - valid_y[:valid_h] = 1 - valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) - valid = valid_xx & valid_yy - return valid - - -@PRIOR_GENERATORS.register_module() -class MlvlPointGenerator: - """Standard points generator for multi-level (Mlvl) feature maps in 2D - points-based detectors. - - Args: - strides (list[int] | list[tuple[int, int]]): Strides of anchors - in multiple feature levels in order (w, h). - offset (float): The offset of points, the value is normalized with - corresponding stride. Defaults to 0.5. - """ - - def __init__(self, strides, offset=0.5): - self.strides = [_pair(stride) for stride in strides] - self.offset = offset - - @property - def num_levels(self): - """int: number of feature levels that the generator will be applied""" - return len(self.strides) - - @property - def num_base_priors(self): - """list[int]: The number of priors (points) at a point - on the feature grid""" - return [1 for _ in range(len(self.strides))] - - def _meshgrid(self, x, y, row_major=True): - yy, xx = torch.meshgrid(y, x) - if row_major: - # warning .flatten() would cause error in ONNX exporting - # have to use reshape here - return xx.reshape(-1), yy.reshape(-1) - - else: - return yy.reshape(-1), xx.reshape(-1) - - def grid_priors(self, - featmap_sizes, - dtype=torch.float32, - device='cuda', - with_stride=False): - """Generate grid points of multiple feature levels. - - Args: - featmap_sizes (list[tuple]): List of feature map sizes in - multiple feature levels, each size arrange as - as (h, w). - dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. - device (str): The device where the anchors will be put on. - with_stride (bool): Whether to concatenate the stride to - the last dimension of points. - - Return: - list[torch.Tensor]: Points of multiple feature levels. - The sizes of each tensor should be (N, 2) when with stride is - ``False``, where N = width * height, width and height - are the sizes of the corresponding feature level, - and the last dimension 2 represent (coord_x, coord_y), - otherwise the shape should be (N, 4), - and the last dimension 4 represent - (coord_x, coord_y, stride_w, stride_h). - """ - - assert self.num_levels == len(featmap_sizes) - multi_level_priors = [] - for i in range(self.num_levels): - priors = self.single_level_grid_priors( - featmap_sizes[i], - level_idx=i, - dtype=dtype, - device=device, - with_stride=with_stride) - multi_level_priors.append(priors) - return multi_level_priors - - def single_level_grid_priors(self, - featmap_size, - level_idx, - dtype=torch.float32, - device='cuda', - with_stride=False): - """Generate grid Points of a single level. - - Note: - This function is usually called by method ``self.grid_priors``. - - Args: - featmap_size (tuple[int]): Size of the feature maps, arrange as - (h, w). - level_idx (int): The index of corresponding feature map level. - dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. - device (str, optional): The device the tensor will be put on. - Defaults to 'cuda'. - with_stride (bool): Concatenate the stride to the last dimension - of points. - - Return: - Tensor: Points of single feature levels. - The shape of tensor should be (N, 2) when with stride is - ``False``, where N = width * height, width and height - are the sizes of the corresponding feature level, - and the last dimension 2 represent (coord_x, coord_y), - otherwise the shape should be (N, 4), - and the last dimension 4 represent - (coord_x, coord_y, stride_w, stride_h). - """ - feat_h, feat_w = featmap_size - stride_w, stride_h = self.strides[level_idx] - shift_x = (torch.arange(0, feat_w, device=device) + - self.offset) * stride_w - # keep featmap_size as Tensor instead of int, so that we - # can convert to ONNX correctly - shift_x = shift_x.to(dtype) - - shift_y = (torch.arange(0, feat_h, device=device) + - self.offset) * stride_h - # keep featmap_size as Tensor instead of int, so that we - # can convert to ONNX correctly - shift_y = shift_y.to(dtype) - shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) - if not with_stride: - shifts = torch.stack([shift_xx, shift_yy], dim=-1) - else: - # use `shape[0]` instead of `len(shift_xx)` for ONNX export - stride_w = shift_xx.new_full((shift_xx.shape[0], ), - stride_w).to(dtype) - stride_h = shift_xx.new_full((shift_yy.shape[0], ), - stride_h).to(dtype) - shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], - dim=-1) - all_points = shifts.to(device) - return all_points - - def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): - """Generate valid flags of points of multiple feature levels. - - Args: - featmap_sizes (list(tuple)): List of feature map sizes in - multiple feature levels, each size arrange as - as (h, w). - pad_shape (tuple(int)): The padded shape of the image, - arrange as (h, w). - device (str): The device where the anchors will be put on. - - Return: - list(torch.Tensor): Valid flags of points of multiple levels. - """ - assert self.num_levels == len(featmap_sizes) - multi_level_flags = [] - for i in range(self.num_levels): - point_stride = self.strides[i] - feat_h, feat_w = featmap_sizes[i] - h, w = pad_shape[:2] - valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h) - valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w) - flags = self.single_level_valid_flags((feat_h, feat_w), - (valid_feat_h, valid_feat_w), - device=device) - multi_level_flags.append(flags) - return multi_level_flags - - def single_level_valid_flags(self, - featmap_size, - valid_size, - device='cuda'): - """Generate the valid flags of points of a single feature map. - - Args: - featmap_size (tuple[int]): The size of feature maps, arrange as - as (h, w). - valid_size (tuple[int]): The valid size of the feature maps. - The size arrange as as (h, w). - device (str, optional): The device where the flags will be put on. - Defaults to 'cuda'. - - Returns: - torch.Tensor: The valid flags of each points in a single level \ - feature map. - """ - feat_h, feat_w = featmap_size - valid_h, valid_w = valid_size - assert valid_h <= feat_h and valid_w <= feat_w - valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) - valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) - valid_x[:valid_w] = 1 - valid_y[:valid_h] = 1 - valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) - valid = valid_xx & valid_yy - return valid - - def sparse_priors(self, - prior_idxs, - featmap_size, - level_idx, - dtype=torch.float32, - device='cuda'): - """Generate sparse points according to the ``prior_idxs``. - - Args: - prior_idxs (Tensor): The index of corresponding anchors - in the feature map. - featmap_size (tuple[int]): feature map size arrange as (w, h). - level_idx (int): The level index of corresponding feature - map. - dtype (obj:`torch.dtype`): Date type of points. Defaults to - ``torch.float32``. - device (obj:`torch.device`): The device where the points is - located. - Returns: - Tensor: Anchor with shape (N, 2), N should be equal to - the length of ``prior_idxs``. And last dimension - 2 represent (coord_x, coord_y). - """ - height, width = featmap_size - x = (prior_idxs % width + self.offset) * self.strides[level_idx][0] - y = ((prior_idxs // width) % height + - self.offset) * self.strides[level_idx][1] - prioris = torch.stack([x, y], 1).to(dtype) - prioris = prioris.to(device) - return prioris diff --git a/cv/detection/co-detr/pytorch/mmdet/core/anchor/utils.py b/cv/detection/co-detr/pytorch/mmdet/core/anchor/utils.py deleted file mode 100644 index c2f202476ca4413efbca191150719d68777e2be3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/anchor/utils.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - - -def images_to_levels(target, num_levels): - """Convert targets by image to targets by feature level. - - [target_img0, target_img1] -> [target_level0, target_level1, ...] - """ - target = torch.stack(target, 0) - level_targets = [] - start = 0 - for n in num_levels: - end = start + n - # level_targets.append(target[:, start:end].squeeze(0)) - level_targets.append(target[:, start:end]) - start = end - return level_targets - - -def anchor_inside_flags(flat_anchors, - valid_flags, - img_shape, - allowed_border=0): - """Check whether the anchors are inside the border. - - Args: - flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). - valid_flags (torch.Tensor): An existing valid flags of anchors. - img_shape (tuple(int)): Shape of current image. - allowed_border (int, optional): The border to allow the valid anchor. - Defaults to 0. - - Returns: - torch.Tensor: Flags indicating whether the anchors are inside a \ - valid range. - """ - img_h, img_w = img_shape[:2] - if allowed_border >= 0: - inside_flags = valid_flags & \ - (flat_anchors[:, 0] >= -allowed_border) & \ - (flat_anchors[:, 1] >= -allowed_border) & \ - (flat_anchors[:, 2] < img_w + allowed_border) & \ - (flat_anchors[:, 3] < img_h + allowed_border) - else: - inside_flags = valid_flags - return inside_flags - - -def calc_region(bbox, ratio, featmap_size=None): - """Calculate a proportional bbox region. - - The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. - - Args: - bbox (Tensor): Bboxes to calculate regions, shape (n, 4). - ratio (float): Ratio of the output region. - featmap_size (tuple): Feature map size used for clipping the boundary. - - Returns: - tuple: x1, y1, x2, y2 - """ - x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() - y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() - x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() - y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() - if featmap_size is not None: - x1 = x1.clamp(min=0, max=featmap_size[1]) - y1 = y1.clamp(min=0, max=featmap_size[0]) - x2 = x2.clamp(min=0, max=featmap_size[1]) - y2 = y2.clamp(min=0, max=featmap_size[0]) - return (x1, y1, x2, y2) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/__init__.py deleted file mode 100644 index 371eba198e9fad1b0c3697d6c9f250c930f844d7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, - MaxIoUAssigner, RegionAssigner) -from .builder import build_assigner, build_bbox_coder, build_sampler -from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder, - PseudoBBoxCoder, TBLRBBoxCoder) -from .iou_calculators import BboxOverlaps2D, bbox_overlaps -from .samplers import (BaseSampler, CombinedSampler, - InstanceBalancedPosSampler, IoUBalancedNegSampler, - OHEMSampler, PseudoSampler, RandomSampler, - SamplingResult, ScoreHLRSampler) -from .transforms import (bbox2distance, bbox2result, bbox2roi, - bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping, - bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh, - distance2bbox, find_inside_bboxes, roi2bbox) - -__all__ = [ - 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner', - 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler', - 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', - 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner', - 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back', - 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance', - 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder', - 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder', - 'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy', - 'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/__init__.py deleted file mode 100644 index 5eaf7fa3af66edb34fc13da54706873cebcb8cf8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .approx_max_iou_assigner import ApproxMaxIoUAssigner -from .assign_result import AssignResult -from .atss_assigner import ATSSAssigner -from .base_assigner import BaseAssigner -from .center_region_assigner import CenterRegionAssigner -from .grid_assigner import GridAssigner -from .hungarian_assigner import HungarianAssigner -from .mask_hungarian_assigner import MaskHungarianAssigner -from .max_iou_assigner import MaxIoUAssigner -from .point_assigner import PointAssigner -from .region_assigner import RegionAssigner -from .sim_ota_assigner import SimOTAAssigner -from .task_aligned_assigner import TaskAlignedAssigner -from .uniform_assigner import UniformAssigner - -__all__ = [ - 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', - 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner', - 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner', - 'TaskAlignedAssigner', 'MaskHungarianAssigner' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/approx_max_iou_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/approx_max_iou_assigner.py deleted file mode 100644 index 304d09c3fba3def3fb0320eaba67d3b967cf5f11..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/approx_max_iou_assigner.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .max_iou_assigner import MaxIoUAssigner - - -@BBOX_ASSIGNERS.register_module() -class ApproxMaxIoUAssigner(MaxIoUAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with an integer indicating the ground truth - index. (semi-positive index: gt label (0-based), -1: background) - - - -1: negative sample, no assigned gt - - semi-positive integer: positive sample, index (0-based) of assigned gt - - Args: - pos_iou_thr (float): IoU threshold for positive bboxes. - neg_iou_thr (float or tuple): IoU threshold for negative bboxes. - min_pos_iou (float): Minimum iou for a bbox to be considered as a - positive bbox. Positive samples can have smaller IoU than - pos_iou_thr due to the 4th step (assign max IoU sample to each gt). - gt_max_assign_all (bool): Whether to assign all bboxes with the same - highest overlap with some gt to that gt. - ignore_iof_thr (float): IoF threshold for ignoring bboxes (if - `gt_bboxes_ignore` is specified). Negative values mean not - ignoring any bboxes. - ignore_wrt_candidates (bool): Whether to compute the iof between - `bboxes` and `gt_bboxes_ignore`, or the contrary. - match_low_quality (bool): Whether to allow quality matches. This is - usually allowed for RPN and single stage detectors, but not allowed - in the second stage. - gpu_assign_thr (int): The upper bound of the number of GT for GPU - assign. When the number of gt is above this threshold, will assign - on CPU device. Negative values mean not assign on CPU. - """ - - def __init__(self, - pos_iou_thr, - neg_iou_thr, - min_pos_iou=.0, - gt_max_assign_all=True, - ignore_iof_thr=-1, - ignore_wrt_candidates=True, - match_low_quality=True, - gpu_assign_thr=-1, - iou_calculator=dict(type='BboxOverlaps2D')): - self.pos_iou_thr = pos_iou_thr - self.neg_iou_thr = neg_iou_thr - self.min_pos_iou = min_pos_iou - self.gt_max_assign_all = gt_max_assign_all - self.ignore_iof_thr = ignore_iof_thr - self.ignore_wrt_candidates = ignore_wrt_candidates - self.gpu_assign_thr = gpu_assign_thr - self.match_low_quality = match_low_quality - self.iou_calculator = build_iou_calculator(iou_calculator) - - def assign(self, - approxs, - squares, - approxs_per_octave, - gt_bboxes, - gt_bboxes_ignore=None, - gt_labels=None): - """Assign gt to approxs. - - This method assign a gt bbox to each group of approxs (bboxes), - each group of approxs is represent by a base approx (bbox) and - will be assigned with -1, or a semi-positive number. - background_label (-1) means negative sample, - semi-positive number is the index (0-based) of assigned gt. - The assignment is done in following steps, the order matters. - - 1. assign every bbox to background_label (-1) - 2. use the max IoU of each group of approxs to assign - 2. assign proposals whose iou with all gts < neg_iou_thr to background - 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, - assign it to that bbox - 4. for each gt bbox, assign its nearest proposals (may be more than - one) to itself - - Args: - approxs (Tensor): Bounding boxes to be assigned, - shape(approxs_per_octave*n, 4). - squares (Tensor): Base Bounding boxes to be assigned, - shape(n, 4). - approxs_per_octave (int): number of approxs per octave - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - """ - num_squares = squares.size(0) - num_gts = gt_bboxes.size(0) - - if num_squares == 0 or num_gts == 0: - # No predictions and/or truth, return empty assignment - overlaps = approxs.new(num_gts, num_squares) - assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) - return assign_result - - # re-organize anchors by approxs_per_octave x num_squares - approxs = torch.transpose( - approxs.view(num_squares, approxs_per_octave, 4), 0, - 1).contiguous().view(-1, 4) - assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( - num_gts > self.gpu_assign_thr) else False - # compute overlap and assign gt on CPU when number of GT is large - if assign_on_cpu: - device = approxs.device - approxs = approxs.cpu() - gt_bboxes = gt_bboxes.cpu() - if gt_bboxes_ignore is not None: - gt_bboxes_ignore = gt_bboxes_ignore.cpu() - if gt_labels is not None: - gt_labels = gt_labels.cpu() - all_overlaps = self.iou_calculator(approxs, gt_bboxes) - - overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, - num_gts).max(dim=0) - overlaps = torch.transpose(overlaps, 0, 1) - - if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None - and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0): - if self.ignore_wrt_candidates: - ignore_overlaps = self.iou_calculator( - squares, gt_bboxes_ignore, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) - else: - ignore_overlaps = self.iou_calculator( - gt_bboxes_ignore, squares, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) - overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 - - assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) - if assign_on_cpu: - assign_result.gt_inds = assign_result.gt_inds.to(device) - assign_result.max_overlaps = assign_result.max_overlaps.to(device) - if assign_result.labels is not None: - assign_result.labels = assign_result.labels.to(device) - return assign_result diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/assign_result.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/assign_result.py deleted file mode 100644 index 488010b5d903d0f51ada89a472d6843de1412116..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/assign_result.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.utils import util_mixins - - -class AssignResult(util_mixins.NiceRepr): - """Stores assignments between predicted and truth boxes. - - Attributes: - num_gts (int): the number of truth boxes considered when computing this - assignment - - gt_inds (LongTensor): for each predicted box indicates the 1-based - index of the assigned truth box. 0 means unassigned and -1 means - ignore. - - max_overlaps (FloatTensor): the iou between the predicted box and its - assigned truth box. - - labels (None | LongTensor): If specified, for each predicted box - indicates the category label of the assigned truth box. - - Example: - >>> # An assign result between 4 predicted boxes and 9 true boxes - >>> # where only two boxes were assigned. - >>> num_gts = 9 - >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) - >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) - >>> labels = torch.LongTensor([0, 3, 4, 0]) - >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) - >>> print(str(self)) # xdoctest: +IGNORE_WANT - - >>> # Force addition of gt labels (when adding gt as proposals) - >>> new_labels = torch.LongTensor([3, 4, 5]) - >>> self.add_gt_(new_labels) - >>> print(str(self)) # xdoctest: +IGNORE_WANT - - """ - - def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): - self.num_gts = num_gts - self.gt_inds = gt_inds - self.max_overlaps = max_overlaps - self.labels = labels - # Interface for possible user-defined properties - self._extra_properties = {} - - @property - def num_preds(self): - """int: the number of predictions in this assignment""" - return len(self.gt_inds) - - def set_extra_property(self, key, value): - """Set user-defined new property.""" - assert key not in self.info - self._extra_properties[key] = value - - def get_extra_property(self, key): - """Get user-defined property.""" - return self._extra_properties.get(key, None) - - @property - def info(self): - """dict: a dictionary of info about the object""" - basic_info = { - 'num_gts': self.num_gts, - 'num_preds': self.num_preds, - 'gt_inds': self.gt_inds, - 'max_overlaps': self.max_overlaps, - 'labels': self.labels, - } - basic_info.update(self._extra_properties) - return basic_info - - def __nice__(self): - """str: a "nice" summary string describing this assign result""" - parts = [] - parts.append(f'num_gts={self.num_gts!r}') - if self.gt_inds is None: - parts.append(f'gt_inds={self.gt_inds!r}') - else: - parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') - if self.max_overlaps is None: - parts.append(f'max_overlaps={self.max_overlaps!r}') - else: - parts.append('max_overlaps.shape=' - f'{tuple(self.max_overlaps.shape)!r}') - if self.labels is None: - parts.append(f'labels={self.labels!r}') - else: - parts.append(f'labels.shape={tuple(self.labels.shape)!r}') - return ', '.join(parts) - - @classmethod - def random(cls, **kwargs): - """Create random AssignResult for tests or debugging. - - Args: - num_preds: number of predicted boxes - num_gts: number of true boxes - p_ignore (float): probability of a predicted box assigned to an - ignored truth - p_assigned (float): probability of a predicted box not being - assigned - p_use_label (float | bool): with labels or not - rng (None | int | numpy.random.RandomState): seed or state - - Returns: - :obj:`AssignResult`: Randomly generated assign results. - - Example: - >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA - >>> self = AssignResult.random() - >>> print(self.info) - """ - from mmdet.core.bbox import demodata - rng = demodata.ensure_rng(kwargs.get('rng', None)) - - num_gts = kwargs.get('num_gts', None) - num_preds = kwargs.get('num_preds', None) - p_ignore = kwargs.get('p_ignore', 0.3) - p_assigned = kwargs.get('p_assigned', 0.7) - p_use_label = kwargs.get('p_use_label', 0.5) - num_classes = kwargs.get('p_use_label', 3) - - if num_gts is None: - num_gts = rng.randint(0, 8) - if num_preds is None: - num_preds = rng.randint(0, 16) - - if num_gts == 0: - max_overlaps = torch.zeros(num_preds, dtype=torch.float32) - gt_inds = torch.zeros(num_preds, dtype=torch.int64) - if p_use_label is True or p_use_label < rng.rand(): - labels = torch.zeros(num_preds, dtype=torch.int64) - else: - labels = None - else: - import numpy as np - - # Create an overlap for each predicted box - max_overlaps = torch.from_numpy(rng.rand(num_preds)) - - # Construct gt_inds for each predicted box - is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) - # maximum number of assignments constraints - n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) - - assigned_idxs = np.where(is_assigned)[0] - rng.shuffle(assigned_idxs) - assigned_idxs = assigned_idxs[0:n_assigned] - assigned_idxs.sort() - - is_assigned[:] = 0 - is_assigned[assigned_idxs] = True - - is_ignore = torch.from_numpy( - rng.rand(num_preds) < p_ignore) & is_assigned - - gt_inds = torch.zeros(num_preds, dtype=torch.int64) - - true_idxs = np.arange(num_gts) - rng.shuffle(true_idxs) - true_idxs = torch.from_numpy(true_idxs) - gt_inds[is_assigned] = true_idxs[:n_assigned].long() - - gt_inds = torch.from_numpy( - rng.randint(1, num_gts + 1, size=num_preds)) - gt_inds[is_ignore] = -1 - gt_inds[~is_assigned] = 0 - max_overlaps[~is_assigned] = 0 - - if p_use_label is True or p_use_label < rng.rand(): - if num_classes == 0: - labels = torch.zeros(num_preds, dtype=torch.int64) - else: - labels = torch.from_numpy( - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - rng.randint(0, num_classes, size=num_preds)) - labels[~is_assigned] = 0 - else: - labels = None - - self = cls(num_gts, gt_inds, max_overlaps, labels) - return self - - def add_gt_(self, gt_labels): - """Add ground truth as assigned results. - - Args: - gt_labels (torch.Tensor): Labels of gt boxes - """ - self_inds = torch.arange( - 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) - self.gt_inds = torch.cat([self_inds, self.gt_inds]) - - self.max_overlaps = torch.cat( - [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) - - if self.labels is not None: - self.labels = torch.cat([gt_labels, self.labels]) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/atss_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/atss_assigner.py deleted file mode 100644 index 79c8281e50b38df5a663ef183ff75e8cf7b0b195..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/atss_assigner.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class ATSSAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `0` or a positive integer - indicating the ground truth index. - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - If ``alpha`` is not None, it means that the dynamic cost - ATSSAssigner is adopted, which is currently only used in the DDOD. - - Args: - topk (float): number of bbox selected in each level - """ - - def __init__(self, - topk, - alpha=None, - iou_calculator=dict(type='BboxOverlaps2D'), - ignore_iof_thr=-1): - self.topk = topk - self.alpha = alpha - self.iou_calculator = build_iou_calculator(iou_calculator) - self.ignore_iof_thr = ignore_iof_thr - - """Assign a corresponding gt bbox or background to each bbox. - - Args: - topk (int): number of bbox selected in each level. - alpha (float): param of cost rate for each proposal only in DDOD. - Default None. - iou_calculator (dict): builder of IoU calculator. - Default dict(type='BboxOverlaps2D'). - ignore_iof_thr (int): whether ignore max overlaps or not. - Default -1 (1 or -1). - """ - - # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py - def assign(self, - bboxes, - num_level_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - gt_labels=None, - cls_scores=None, - bbox_preds=None): - """Assign gt to bboxes. - - The assignment is done in following steps - - 1. compute iou between all bbox (bbox of all pyramid levels) and gt - 2. compute center distance between all bbox and gt - 3. on each pyramid level, for each gt, select k bbox whose center - are closest to the gt center, so we total select k*l bbox as - candidates for each gt - 4. get corresponding iou for the these candidates, and compute the - mean and std, set mean + std as the iou threshold - 5. select these candidates whose iou are greater than or equal to - the threshold as positive - 6. limit the positive sample's center in gt - - If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds` - are not None, the overlaps calculation in the first step - will also include dynamic cost, which is currently only used in - the DDOD. - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - num_level_bboxes (List): num of bboxes in each level - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. Default None. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * num_classes. Default None. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * 4. Default None. - - Returns: - :obj:`AssignResult`: The assign result. - """ - INF = 100000000 - bboxes = bboxes[:, :4] - num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) - - message = 'Invalid alpha parameter because cls_scores or ' \ - 'bbox_preds are None. If you want to use the ' \ - 'cost-based ATSSAssigner, please set cls_scores, ' \ - 'bbox_preds and self.alpha at the same time. ' - - if self.alpha is None: - # ATSSAssigner - overlaps = self.iou_calculator(bboxes, gt_bboxes) - if cls_scores is not None or bbox_preds is not None: - warnings.warn(message) - else: - # Dynamic cost ATSSAssigner in DDOD - assert cls_scores is not None and bbox_preds is not None, message - - # compute cls cost for bbox and GT - cls_cost = torch.sigmoid(cls_scores[:, gt_labels]) - - # compute iou between all bbox and gt - overlaps = self.iou_calculator(bbox_preds, gt_bboxes) - - # make sure that we are in element-wise multiplication - assert cls_cost.shape == overlaps.shape - - # overlaps is actually a cost matrix - overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha - - # assign 0 by default - assigned_gt_inds = overlaps.new_full((num_bboxes, ), - 0, - dtype=torch.long) - - if num_gt == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = overlaps.new_zeros((num_bboxes, )) - if num_gt == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - # compute center distance between all bbox and gt - gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 - gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 - gt_points = torch.stack((gt_cx, gt_cy), dim=1) - - bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 - bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 - bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) - - distances = (bboxes_points[:, None, :] - - gt_points[None, :, :]).pow(2).sum(-1).sqrt() - - if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None - and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): - ignore_overlaps = self.iou_calculator( - bboxes, gt_bboxes_ignore, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) - ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr - distances[ignore_idxs, :] = INF - assigned_gt_inds[ignore_idxs] = -1 - - # Selecting candidates based on the center distance - candidate_idxs = [] - start_idx = 0 - for level, bboxes_per_level in enumerate(num_level_bboxes): - # on each pyramid level, for each gt, - # select k bbox whose center are closest to the gt center - end_idx = start_idx + bboxes_per_level - distances_per_level = distances[start_idx:end_idx, :] - selectable_k = min(self.topk, bboxes_per_level) - - _, topk_idxs_per_level = distances_per_level.topk( - selectable_k, dim=0, largest=False) - candidate_idxs.append(topk_idxs_per_level + start_idx) - start_idx = end_idx - candidate_idxs = torch.cat(candidate_idxs, dim=0) - - # get corresponding iou for the these candidates, and compute the - # mean and std, set mean + std as the iou threshold - candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] - overlaps_mean_per_gt = candidate_overlaps.mean(0) - overlaps_std_per_gt = candidate_overlaps.std(0) - overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt - - is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] - - # limit the positive sample's center in gt - for gt_idx in range(num_gt): - candidate_idxs[:, gt_idx] += gt_idx * num_bboxes - ep_bboxes_cx = bboxes_cx.view(1, -1).expand( - num_gt, num_bboxes).contiguous().view(-1) - ep_bboxes_cy = bboxes_cy.view(1, -1).expand( - num_gt, num_bboxes).contiguous().view(-1) - candidate_idxs = candidate_idxs.view(-1) - - # calculate the left, top, right, bottom distance between positive - # bbox center and gt side - l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] - t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] - r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 - - is_pos = is_pos & is_in_gts - - # if an anchor box is assigned to multiple gts, - # the one with the highest IoU will be selected. - overlaps_inf = torch.full_like(overlaps, - -INF).t().contiguous().view(-1) - index = candidate_idxs.view(-1)[is_pos.view(-1)] - overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] - overlaps_inf = overlaps_inf.view(num_gt, -1).t() - - max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) - assigned_gt_inds[ - max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/base_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/base_assigner.py deleted file mode 100644 index 3c2d597a5b12275a8941a5d87c56f05dbc955071..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/base_assigner.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - - -class BaseAssigner(metaclass=ABCMeta): - """Base assigner that assigns boxes to ground truth boxes.""" - - @abstractmethod - def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): - """Assign boxes to either a ground truth boxes or a negative boxes.""" diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/center_region_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/center_region_assigner.py deleted file mode 100644 index 86e78597d8efa3313d126cc4707d9c6ef1d16e85..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/center_region_assigner.py +++ /dev/null @@ -1,336 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -def scale_boxes(bboxes, scale): - """Expand an array of boxes by a given scale. - - Args: - bboxes (Tensor): Shape (m, 4) - scale (float): The scale factor of bboxes - - Returns: - (Tensor): Shape (m, 4). Scaled bboxes - """ - assert bboxes.size(1) == 4 - w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 - h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 - x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 - y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 - - w_half *= scale - h_half *= scale - - boxes_scaled = torch.zeros_like(bboxes) - boxes_scaled[:, 0] = x_c - w_half - boxes_scaled[:, 2] = x_c + w_half - boxes_scaled[:, 1] = y_c - h_half - boxes_scaled[:, 3] = y_c + h_half - return boxes_scaled - - -def is_located_in(points, bboxes): - """Are points located in bboxes. - - Args: - points (Tensor): Points, shape: (m, 2). - bboxes (Tensor): Bounding boxes, shape: (n, 4). - - Return: - Tensor: Flags indicating if points are located in bboxes, shape: (m, n). - """ - assert points.size(1) == 2 - assert bboxes.size(1) == 4 - return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ - (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ - (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ - (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) - - -def bboxes_area(bboxes): - """Compute the area of an array of bboxes. - - Args: - bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) - - Returns: - Tensor: Area of the bboxes. Shape: (m, ) - """ - assert bboxes.size(1) == 4 - w = (bboxes[:, 2] - bboxes[:, 0]) - h = (bboxes[:, 3] - bboxes[:, 1]) - areas = w * h - return areas - - -@BBOX_ASSIGNERS.register_module() -class CenterRegionAssigner(BaseAssigner): - """Assign pixels at the center region of a bbox as positive. - - Each proposals will be assigned with `-1`, `0`, or a positive integer - indicating the ground truth index. - - -1: negative samples - - semi-positive numbers: positive sample, index (0-based) of assigned gt - - Args: - pos_scale (float): Threshold within which pixels are - labelled as positive. - neg_scale (float): Threshold above which pixels are - labelled as positive. - min_pos_iof (float): Minimum iof of a pixel with a gt to be - labelled as positive. Default: 1e-2 - ignore_gt_scale (float): Threshold within which the pixels - are ignored when the gt is labelled as shadowed. Default: 0.5 - foreground_dominate (bool): If True, the bbox will be assigned as - positive when a gt's kernel region overlaps with another's shadowed - (ignored) region, otherwise it is set as ignored. Default to False. - """ - - def __init__(self, - pos_scale, - neg_scale, - min_pos_iof=1e-2, - ignore_gt_scale=0.5, - foreground_dominate=False, - iou_calculator=dict(type='BboxOverlaps2D')): - self.pos_scale = pos_scale - self.neg_scale = neg_scale - self.min_pos_iof = min_pos_iof - self.ignore_gt_scale = ignore_gt_scale - self.foreground_dominate = foreground_dominate - self.iou_calculator = build_iou_calculator(iou_calculator) - - def get_gt_priorities(self, gt_bboxes): - """Get gt priorities according to their areas. - - Smaller gt has higher priority. - - Args: - gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). - - Returns: - Tensor: The priority of gts so that gts with larger priority is \ - more likely to be assigned. Shape (k, ) - """ - gt_areas = bboxes_area(gt_bboxes) - # Rank all gt bbox areas. Smaller objects has larger priority - _, sort_idx = gt_areas.sort(descending=True) - sort_idx = sort_idx.argsort() - return sort_idx - - def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): - """Assign gt to bboxes. - - This method assigns gts to every bbox (proposal/anchor), each bbox \ - will be assigned with -1, or a semi-positive number. -1 means \ - negative sample, semi-positive number is the index (0-based) of \ - assigned gt. - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,). - - Returns: - :obj:`AssignResult`: The assigned result. Note that \ - shadowed_labels of shape (N, 2) is also added as an \ - `assign_result` attribute. `shadowed_labels` is a tensor \ - composed of N pairs of anchor_ind, class_label], where N \ - is the number of anchors that lie in the outer region of a \ - gt, anchor_ind is the shadowed anchor index and class_label \ - is the shadowed class label. - - Example: - >>> self = CenterRegionAssigner(0.2, 0.2) - >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) - >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]]) - >>> assign_result = self.assign(bboxes, gt_bboxes) - >>> expected_gt_inds = torch.LongTensor([1, 0]) - >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) - """ - # There are in total 5 steps in the pixel assignment - # 1. Find core (the center region, say inner 0.2) - # and shadow (the relatively ourter part, say inner 0.2-0.5) - # regions of every gt. - # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions - # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in - # the image. - # 3.1. For overlapping objects, the prior bboxes in gt_core is - # assigned with the object with smallest area - # 4. Assign prior bboxes with class label according to its gt id. - # 4.1. Assign -1 to prior bboxes lying in shadowed gts - # 4.2. Assign positive prior boxes with the corresponding label - # 5. Find pixels lying in the shadow of an object and assign them with - # background label, but set the loss weight of its corresponding - # gt to zero. - assert bboxes.size(1) == 4, 'bboxes must have size of 4' - # 1. Find core positive and shadow region of every gt - gt_core = scale_boxes(gt_bboxes, self.pos_scale) - gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) - - # 2. Find prior bboxes that lie in gt_core and gt_shadow regions - bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2 - # The center points lie within the gt boxes - is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes) - # Only calculate bbox and gt_core IoF. This enables small prior bboxes - # to match large gts - bbox_and_gt_core_overlaps = self.iou_calculator( - bboxes, gt_core, mode='iof') - # The center point of effective priors should be within the gt box - is_bbox_in_gt_core = is_bbox_in_gt & ( - bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) - - is_bbox_in_gt_shadow = ( - self.iou_calculator(bboxes, gt_shadow, mode='iof') > - self.min_pos_iof) - # Rule out center effective positive pixels - is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core) - - num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) - if num_gts == 0 or num_bboxes == 0: - # If no gts exist, assign all pixels to negative - assigned_gt_ids = \ - is_bbox_in_gt_core.new_zeros((num_bboxes,), - dtype=torch.long) - pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) - else: - # Step 3: assign a one-hot gt id to each pixel, and smaller objects - # have high priority to assign the pixel. - sort_idx = self.get_gt_priorities(gt_bboxes) - assigned_gt_ids, pixels_in_gt_shadow = \ - self.assign_one_hot_gt_indices(is_bbox_in_gt_core, - is_bbox_in_gt_shadow, - gt_priority=sort_idx) - - if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0: - # No ground truth or boxes, return empty assignment - gt_bboxes_ignore = scale_boxes( - gt_bboxes_ignore, scale=self.ignore_gt_scale) - is_bbox_in_ignored_gts = is_located_in(bbox_centers, - gt_bboxes_ignore) - is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1) - assigned_gt_ids[is_bbox_in_ignored_gts] = -1 - - # 4. Assign prior bboxes with class label according to its gt id. - assigned_labels = None - shadowed_pixel_labels = None - if gt_labels is not None: - # Default assigned label is the background (-1) - assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_ids > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] - - 1] - # 5. Find pixels lying in the shadow of an object - shadowed_pixel_labels = pixels_in_gt_shadow.clone() - if pixels_in_gt_shadow.numel() > 0: - pixel_idx, gt_idx =\ - pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] - assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ - 'Some pixels are dually assigned to ignore and gt!' - shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] - override = ( - assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) - if self.foreground_dominate: - # When a pixel is both positive and shadowed, set it as pos - shadowed_pixel_labels = shadowed_pixel_labels[~override] - else: - # When a pixel is both pos and shadowed, set it as shadowed - assigned_labels[pixel_idx[override]] = -1 - assigned_gt_ids[pixel_idx[override]] = 0 - - assign_result = AssignResult( - num_gts, assigned_gt_ids, None, labels=assigned_labels) - # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) - assign_result.set_extra_property('shadowed_labels', - shadowed_pixel_labels) - return assign_result - - def assign_one_hot_gt_indices(self, - is_bbox_in_gt_core, - is_bbox_in_gt_shadow, - gt_priority=None): - """Assign only one gt index to each prior box. - - Gts with large gt_priority are more likely to be assigned. - - Args: - is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center - is in the core area of a gt (e.g. 0-0.2). - Shape: (num_prior, num_gt). - is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox - center is in the shadowed area of a gt (e.g. 0.2-0.5). - Shape: (num_prior, num_gt). - gt_priority (Tensor): Priorities of gts. The gt with a higher - priority is more likely to be assigned to the bbox when the bbox - match with multiple gts. Shape: (num_gt, ). - - Returns: - tuple: Returns (assigned_gt_inds, shadowed_gt_inds). - - - assigned_gt_inds: The assigned gt index of each prior bbox \ - (i.e. index from 1 to num_gts). Shape: (num_prior, ). - - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ - shape (num_ignore, 2) with first column being the \ - shadowed prior bbox indices and the second column the \ - shadowed gt indices (1-based). - """ - num_bboxes, num_gts = is_bbox_in_gt_core.shape - - if gt_priority is None: - gt_priority = torch.arange( - num_gts, device=is_bbox_in_gt_core.device) - assert gt_priority.size(0) == num_gts - # The bigger gt_priority, the more preferable to be assigned - # The assigned inds are by default 0 (background) - assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ), - dtype=torch.long) - # Shadowed bboxes are assigned to be background. But the corresponding - # label is ignored during loss calculation, which is done through - # shadowed_gt_inds - shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False) - if is_bbox_in_gt_core.sum() == 0: # No gt match - shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue - return assigned_gt_inds, shadowed_gt_inds - - # The priority of each prior box and gt pair. If one prior box is - # matched bo multiple gts. Only the pair with the highest priority - # is saved - pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts), - -1, - dtype=torch.long) - - # Each bbox could match with multiple gts. - # The following codes deal with this situation - # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) - inds_of_match = torch.any(is_bbox_in_gt_core, dim=1) - # The matched gt index of each positive bbox. Length >= num_pos_anchor - # , since one bbox could match multiple gts - matched_bbox_gt_inds = torch.nonzero( - is_bbox_in_gt_core, as_tuple=False)[:, 1] - # Assign priority to each bbox-gt pair. - pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds] - _, argmax_priority = pair_priority[inds_of_match].max(dim=1) - assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based - # Zero-out the assigned anchor box to filter the shadowed gt indices - is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0 - # Concat the shadowed indices due to overlapping with that out side of - # effective scale. shape: (total_num_ignore, 2) - shadowed_gt_inds = torch.cat( - (shadowed_gt_inds, torch.nonzero( - is_bbox_in_gt_core, as_tuple=False)), - dim=0) - # `is_bbox_in_gt_core` should be changed back to keep arguments intact. - is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1 - # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` - if shadowed_gt_inds.numel() > 0: - shadowed_gt_inds[:, 1] += 1 - return assigned_gt_inds, shadowed_gt_inds diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/grid_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/grid_assigner.py deleted file mode 100644 index a0c814e782ebc79600cae4ca4e66b4ebaf47c81e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/grid_assigner.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class GridAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `-1`, `0`, or a positive integer - indicating the ground truth index. - - - -1: don't care - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - pos_iou_thr (float): IoU threshold for positive bboxes. - neg_iou_thr (float or tuple): IoU threshold for negative bboxes. - min_pos_iou (float): Minimum iou for a bbox to be considered as a - positive bbox. Positive samples can have smaller IoU than - pos_iou_thr due to the 4th step (assign max IoU sample to each gt). - gt_max_assign_all (bool): Whether to assign all bboxes with the same - highest overlap with some gt to that gt. - """ - - def __init__(self, - pos_iou_thr, - neg_iou_thr, - min_pos_iou=.0, - gt_max_assign_all=True, - iou_calculator=dict(type='BboxOverlaps2D')): - self.pos_iou_thr = pos_iou_thr - self.neg_iou_thr = neg_iou_thr - self.min_pos_iou = min_pos_iou - self.gt_max_assign_all = gt_max_assign_all - self.iou_calculator = build_iou_calculator(iou_calculator) - - def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None): - """Assign gt to bboxes. The process is very much like the max iou - assigner, except that positive samples are constrained within the cell - that the gt boxes fell in. - - This method assign a gt bbox to every bbox (proposal/anchor), each bbox - will be assigned with -1, 0, or a positive number. -1 means don't care, - 0 means negative sample, positive number is the index (1-based) of - assigned gt. - The assignment is done in following steps, the order matters. - - 1. assign every bbox to -1 - 2. assign proposals whose iou with all gts <= neg_iou_thr to 0 - 3. for each bbox within a cell, if the iou with its nearest gt > - pos_iou_thr and the center of that gt falls inside the cell, - assign it to that bbox - 4. for each gt bbox, assign its nearest proposals within the cell the - gt bbox falls in to itself. - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - box_responsible_flags (Tensor): flag to indicate whether box is - responsible for prediction, shape(n, ) - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - """ - num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) - - # compute iou between all gt and bboxes - overlaps = self.iou_calculator(gt_bboxes, bboxes) - - # 1. assign -1 by default - assigned_gt_inds = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = overlaps.new_zeros((num_bboxes, )) - if num_gts == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gts, - assigned_gt_inds, - max_overlaps, - labels=assigned_labels) - - # 2. assign negative: below - # for each anchor, which gt best overlaps with it - # for each anchor, the max iou of all gts - # shape of max_overlaps == argmax_overlaps == num_bboxes - max_overlaps, argmax_overlaps = overlaps.max(dim=0) - - if isinstance(self.neg_iou_thr, float): - assigned_gt_inds[(max_overlaps >= 0) - & (max_overlaps <= self.neg_iou_thr)] = 0 - elif isinstance(self.neg_iou_thr, (tuple, list)): - assert len(self.neg_iou_thr) == 2 - assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0]) - & (max_overlaps <= self.neg_iou_thr[1])] = 0 - - # 3. assign positive: falls into responsible cell and above - # positive IOU threshold, the order matters. - # the prior condition of comparison is to filter out all - # unrelated anchors, i.e. not box_responsible_flags - overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1. - - # calculate max_overlaps again, but this time we only consider IOUs - # for anchors responsible for prediction - max_overlaps, argmax_overlaps = overlaps.max(dim=0) - - # for each gt, which anchor best overlaps with it - # for each gt, the max iou of all proposals - # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts - gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) - - pos_inds = (max_overlaps > - self.pos_iou_thr) & box_responsible_flags.type(torch.bool) - assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 - - # 4. assign positive to max overlapped anchors within responsible cell - for i in range(num_gts): - if gt_max_overlaps[i] > self.min_pos_iou: - if self.gt_max_assign_all: - max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \ - box_responsible_flags.type(torch.bool) - assigned_gt_inds[max_iou_inds] = i + 1 - elif box_responsible_flags[gt_argmax_overlaps[i]]: - assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 - - # assign labels of positive anchors - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - - else: - assigned_labels = None - - return AssignResult( - num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/hungarian_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/hungarian_assigner.py deleted file mode 100644 index 4105fb5c431ace8551ac130f79cb178e62aaaf0b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/hungarian_assigner.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_ASSIGNERS -from ..match_costs import build_match_cost -from ..transforms import bbox_cxcywh_to_xyxy -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - -try: - from scipy.optimize import linear_sum_assignment -except ImportError: - linear_sum_assignment = None - - -@BBOX_ASSIGNERS.register_module() -class HungarianAssigner(BaseAssigner): - """Computes one-to-one matching between predictions and ground truth. - - This class computes an assignment between the targets and the predictions - based on the costs. The costs are weighted sum of three components: - classification cost, regression L1 cost and regression iou cost. The - targets don't include the no_object, so generally there are more - predictions than targets. After the one-to-one matching, the un-matched - are treated as backgrounds. Thus each query prediction will be assigned - with `0` or a positive integer indicating the ground truth index: - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - cls_weight (int | float, optional): The scale factor for classification - cost. Default 1.0. - bbox_weight (int | float, optional): The scale factor for regression - L1 cost. Default 1.0. - iou_weight (int | float, optional): The scale factor for regression - iou cost. Default 1.0. - iou_calculator (dict | optional): The config for the iou calculation. - Default type `BboxOverlaps2D`. - iou_mode (str | optional): "iou" (intersection over union), "iof" - (intersection over foreground), or "giou" (generalized - intersection over union). Default "giou". - """ - - def __init__(self, - cls_cost=dict(type='ClassificationCost', weight=1.), - reg_cost=dict(type='BBoxL1Cost', weight=1.0), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): - self.cls_cost = build_match_cost(cls_cost) - self.reg_cost = build_match_cost(reg_cost) - self.iou_cost = build_match_cost(iou_cost) - - def assign(self, - bbox_pred, - cls_pred, - gt_bboxes, - gt_labels, - img_meta, - gt_bboxes_ignore=None, - eps=1e-7): - """Computes one-to-one matching based on the weighted costs. - - This method assign each query prediction to a ground truth or - background. The `assigned_gt_inds` with -1 means don't care, - 0 means negative sample, and positive number is the index (1-based) - of assigned gt. - The assignment is done in the following steps, the order matters. - - 1. assign every prediction to -1 - 2. compute the weighted costs - 3. do Hungarian matching on CPU based on the costs - 4. assign all to 0 (background) first, then for each matched pair - between predictions and gts, treat this prediction as foreground - and assign the corresponding gt index (plus 1) to it. - - Args: - bbox_pred (Tensor): Predicted boxes with normalized coordinates - (cx, cy, w, h), which are all in range [0, 1]. Shape - [num_query, 4]. - cls_pred (Tensor): Predicted classification logits, shape - [num_query, num_class]. - gt_bboxes (Tensor): Ground truth boxes with unnormalized - coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. - gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). - img_meta (dict): Meta information for current image. - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`. Default None. - eps (int | float, optional): A value added to the denominator for - numerical stability. Default 1e-7. - - Returns: - :obj:`AssignResult`: The assigned result. - """ - assert gt_bboxes_ignore is None, \ - 'Only case when gt_bboxes_ignore is None is supported.' - num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) - - # 1. assign -1 by default - assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), - -1, - dtype=torch.long) - assigned_labels = bbox_pred.new_full((num_bboxes, ), - -1, - dtype=torch.long) - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - if num_gts == 0: - # No ground truth, assign all to background - assigned_gt_inds[:] = 0 - return AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) - img_h, img_w, _ = img_meta['img_shape'] - factor = gt_bboxes.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0) - - # 2. compute the weighted costs - # classification and bboxcost. - cls_cost = self.cls_cost(cls_pred, gt_labels) - # regression L1 cost - normalize_gt_bboxes = gt_bboxes / factor - reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) - # regression iou cost, defaultly giou is used in official DETR. - bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor - iou_cost = self.iou_cost(bboxes, gt_bboxes) - # weighted sum of above three costs - cost = cls_cost + reg_cost + iou_cost - - # 3. do Hungarian matching on CPU using linear_sum_assignment - cost = cost.detach().cpu() - if linear_sum_assignment is None: - raise ImportError('Please run "pip install scipy" ' - 'to install scipy first.') - matched_row_inds, matched_col_inds = linear_sum_assignment(cost) - matched_row_inds = torch.from_numpy(matched_row_inds).to( - bbox_pred.device) - matched_col_inds = torch.from_numpy(matched_col_inds).to( - bbox_pred.device) - - # 4. assign backgrounds and foregrounds - # assign all indices to backgrounds first - assigned_gt_inds[:] = 0 - # assign foregrounds based on matching results - assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 - assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] - return AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/mask_hungarian_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/mask_hungarian_assigner.py deleted file mode 100644 index f5f27f3f5f0cd37c05ef29bd9c26973f9ea26dfd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/mask_hungarian_assigner.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core.bbox.builder import BBOX_ASSIGNERS -from mmdet.core.bbox.match_costs.builder import build_match_cost -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - -try: - from scipy.optimize import linear_sum_assignment -except ImportError: - linear_sum_assignment = None - - -@BBOX_ASSIGNERS.register_module() -class MaskHungarianAssigner(BaseAssigner): - """Computes one-to-one matching between predictions and ground truth for - mask. - - This class computes an assignment between the targets and the predictions - based on the costs. The costs are weighted sum of three components: - classification cost, mask focal cost and mask dice cost. The - targets don't include the no_object, so generally there are more - predictions than targets. After the one-to-one matching, the un-matched - are treated as backgrounds. Thus each query prediction will be assigned - with `0` or a positive integer indicating the ground truth index: - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config. - mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config. - dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config. - """ - - def __init__(self, - cls_cost=dict(type='ClassificationCost', weight=1.0), - mask_cost=dict( - type='FocalLossCost', weight=1.0, binary_input=True), - dice_cost=dict(type='DiceCost', weight=1.0)): - self.cls_cost = build_match_cost(cls_cost) - self.mask_cost = build_match_cost(mask_cost) - self.dice_cost = build_match_cost(dice_cost) - - def assign(self, - cls_pred, - mask_pred, - gt_labels, - gt_mask, - img_meta, - gt_bboxes_ignore=None, - eps=1e-7): - """Computes one-to-one matching based on the weighted costs. - - Args: - cls_pred (Tensor | None): Class prediction in shape - (num_query, cls_out_channels). - mask_pred (Tensor): Mask prediction in shape (num_query, H, W). - gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ). - gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W). - img_meta (dict): Meta information for current image. - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`. Default None. - eps (int | float, optional): A value added to the denominator for - numerical stability. Default 1e-7. - - Returns: - :obj:`AssignResult`: The assigned result. - """ - assert gt_bboxes_ignore is None, \ - 'Only case when gt_bboxes_ignore is None is supported.' - # K-Net sometimes passes cls_pred=None to this assigner. - # So we should use the shape of mask_pred - num_gt, num_query = gt_labels.shape[0], mask_pred.shape[0] - - # 1. assign -1 by default - assigned_gt_inds = mask_pred.new_full((num_query, ), - -1, - dtype=torch.long) - assigned_labels = mask_pred.new_full((num_query, ), - -1, - dtype=torch.long) - if num_gt == 0 or num_query == 0: - # No ground truth or boxes, return empty assignment - if num_gt == 0: - # No ground truth, assign all to background - assigned_gt_inds[:] = 0 - return AssignResult( - num_gt, assigned_gt_inds, None, labels=assigned_labels) - - # 2. compute the weighted costs - # classification and maskcost. - if self.cls_cost.weight != 0 and cls_pred is not None: - cls_cost = self.cls_cost(cls_pred, gt_labels) - else: - cls_cost = 0 - - if self.mask_cost.weight != 0: - # mask_pred shape = [num_query, h, w] - # gt_mask shape = [num_gt, h, w] - # mask_cost shape = [num_query, num_gt] - mask_cost = self.mask_cost(mask_pred, gt_mask) - else: - mask_cost = 0 - - if self.dice_cost.weight != 0: - dice_cost = self.dice_cost(mask_pred, gt_mask) - else: - dice_cost = 0 - cost = cls_cost + mask_cost + dice_cost - - # 3. do Hungarian matching on CPU using linear_sum_assignment - cost = cost.detach().cpu() - if linear_sum_assignment is None: - raise ImportError('Please run "pip install scipy" ' - 'to install scipy first.') - - matched_row_inds, matched_col_inds = linear_sum_assignment(cost) - matched_row_inds = torch.from_numpy(matched_row_inds).to( - mask_pred.device) - matched_col_inds = torch.from_numpy(matched_col_inds).to( - mask_pred.device) - - # 4. assign backgrounds and foregrounds - # assign all indices to backgrounds first - assigned_gt_inds[:] = 0 - # assign foregrounds based on matching results - assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 - assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] - return AssignResult( - num_gt, assigned_gt_inds, None, labels=assigned_labels) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/max_iou_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/max_iou_assigner.py deleted file mode 100644 index 676421f7653f37e936c7152ed64bebe80564d147..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/max_iou_assigner.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class MaxIoUAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `-1`, or a semi-positive integer - indicating the ground truth index. - - - -1: negative sample, no assigned gt - - semi-positive integer: positive sample, index (0-based) of assigned gt - - Args: - pos_iou_thr (float): IoU threshold for positive bboxes. - neg_iou_thr (float or tuple): IoU threshold for negative bboxes. - min_pos_iou (float): Minimum iou for a bbox to be considered as a - positive bbox. Positive samples can have smaller IoU than - pos_iou_thr due to the 4th step (assign max IoU sample to each gt). - `min_pos_iou` is set to avoid assigning bboxes that have extremely - small iou with GT as positive samples. It brings about 0.3 mAP - improvements in 1x schedule but does not affect the performance of - 3x schedule. More comparisons can be found in - `PR #7464 `_. - gt_max_assign_all (bool): Whether to assign all bboxes with the same - highest overlap with some gt to that gt. - ignore_iof_thr (float): IoF threshold for ignoring bboxes (if - `gt_bboxes_ignore` is specified). Negative values mean not - ignoring any bboxes. - ignore_wrt_candidates (bool): Whether to compute the iof between - `bboxes` and `gt_bboxes_ignore`, or the contrary. - match_low_quality (bool): Whether to allow low quality matches. This is - usually allowed for RPN and single stage detectors, but not allowed - in the second stage. Details are demonstrated in Step 4. - gpu_assign_thr (int): The upper bound of the number of GT for GPU - assign. When the number of gt is above this threshold, will assign - on CPU device. Negative values mean not assign on CPU. - """ - - def __init__(self, - pos_iou_thr, - neg_iou_thr, - min_pos_iou=.0, - gt_max_assign_all=True, - ignore_iof_thr=-1, - ignore_wrt_candidates=True, - match_low_quality=True, - gpu_assign_thr=-1, - iou_calculator=dict(type='BboxOverlaps2D')): - self.pos_iou_thr = pos_iou_thr - self.neg_iou_thr = neg_iou_thr - self.min_pos_iou = min_pos_iou - self.gt_max_assign_all = gt_max_assign_all - self.ignore_iof_thr = ignore_iof_thr - self.ignore_wrt_candidates = ignore_wrt_candidates - self.gpu_assign_thr = gpu_assign_thr - self.match_low_quality = match_low_quality - self.iou_calculator = build_iou_calculator(iou_calculator) - - def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): - """Assign gt to bboxes. - - This method assign a gt bbox to every bbox (proposal/anchor), each bbox - will be assigned with -1, or a semi-positive number. -1 means negative - sample, semi-positive number is the index (0-based) of assigned gt. - The assignment is done in following steps, the order matters. - - 1. assign every bbox to the background - 2. assign proposals whose iou with all gts < neg_iou_thr to 0 - 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, - assign it to that bbox - 4. for each gt bbox, assign its nearest proposals (may be more than - one) to itself - - Args: - bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - - Example: - >>> self = MaxIoUAssigner(0.5, 0.5) - >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) - >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]]) - >>> assign_result = self.assign(bboxes, gt_bboxes) - >>> expected_gt_inds = torch.LongTensor([1, 0]) - >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) - """ - assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( - gt_bboxes.shape[0] > self.gpu_assign_thr) else False - # compute overlap and assign gt on CPU when number of GT is large - if assign_on_cpu: - device = bboxes.device - bboxes = bboxes.cpu() - gt_bboxes = gt_bboxes.cpu() - if gt_bboxes_ignore is not None: - gt_bboxes_ignore = gt_bboxes_ignore.cpu() - if gt_labels is not None: - gt_labels = gt_labels.cpu() - - overlaps = self.iou_calculator(gt_bboxes, bboxes) - - if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None - and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): - if self.ignore_wrt_candidates: - ignore_overlaps = self.iou_calculator( - bboxes, gt_bboxes_ignore, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) - else: - ignore_overlaps = self.iou_calculator( - gt_bboxes_ignore, bboxes, mode='iof') - ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) - overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 - - assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) - if assign_on_cpu: - assign_result.gt_inds = assign_result.gt_inds.to(device) - assign_result.max_overlaps = assign_result.max_overlaps.to(device) - if assign_result.labels is not None: - assign_result.labels = assign_result.labels.to(device) - return assign_result - - def assign_wrt_overlaps(self, overlaps, gt_labels=None): - """Assign w.r.t. the overlaps of bboxes with gts. - - Args: - overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, - shape(k, n). - gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - """ - num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) - - # 1. assign -1 by default - assigned_gt_inds = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = overlaps.new_zeros((num_bboxes, )) - if num_gts == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = overlaps.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gts, - assigned_gt_inds, - max_overlaps, - labels=assigned_labels) - - # for each anchor, which gt best overlaps with it - # for each anchor, the max iou of all gts - max_overlaps, argmax_overlaps = overlaps.max(dim=0) - # for each gt, which anchor best overlaps with it - # for each gt, the max iou of all proposals - gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) - - # 2. assign negative: below - # the negative inds are set to be 0 - if isinstance(self.neg_iou_thr, float): - assigned_gt_inds[(max_overlaps >= 0) - & (max_overlaps < self.neg_iou_thr)] = 0 - elif isinstance(self.neg_iou_thr, tuple): - assert len(self.neg_iou_thr) == 2 - assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) - & (max_overlaps < self.neg_iou_thr[1])] = 0 - - # 3. assign positive: above positive IoU threshold - pos_inds = max_overlaps >= self.pos_iou_thr - assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 - - if self.match_low_quality: - # Low-quality matching will overwrite the assigned_gt_inds assigned - # in Step 3. Thus, the assigned gt might not be the best one for - # prediction. - # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, - # bbox 1 will be assigned as the best target for bbox A in step 3. - # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's - # assigned_gt_inds will be overwritten to be bbox 2. - # This might be the reason that it is not used in ROI Heads. - for i in range(num_gts): - if gt_max_overlaps[i] >= self.min_pos_iou: - if self.gt_max_assign_all: - max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] - assigned_gt_inds[max_iou_inds] = i + 1 - else: - assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - - return AssignResult( - num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/point_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/point_assigner.py deleted file mode 100644 index b0dc2246320bd271af644992a4309077bc537076..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/point_assigner.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_ASSIGNERS -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class PointAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each point. - - Each proposals will be assigned with `0`, or a positive integer - indicating the ground truth index. - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - """ - - def __init__(self, scale=4, pos_num=3): - self.scale = scale - self.pos_num = pos_num - - def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): - """Assign gt to points. - - This method assign a gt bbox to every points set, each points set - will be assigned with the background_label (-1), or a label number. - -1 is background, and semi-positive number is the index (0-based) of - assigned gt. - The assignment is done in following steps, the order matters. - - 1. assign every points to the background_label (-1) - 2. A point is assigned to some gt bbox if - (i) the point is within the k closest points to the gt bbox - (ii) the distance between this point and the gt is smaller than - other gt bboxes - - Args: - points (Tensor): points to be assigned, shape(n, 3) while last - dimension stands for (x, y, stride). - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - NOTE: currently unused. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`AssignResult`: The assign result. - """ - num_points = points.shape[0] - num_gts = gt_bboxes.shape[0] - - if num_gts == 0 or num_points == 0: - # If no truth assign everything to the background - assigned_gt_inds = points.new_full((num_points, ), - 0, - dtype=torch.long) - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = points.new_full((num_points, ), - -1, - dtype=torch.long) - return AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) - - points_xy = points[:, :2] - points_stride = points[:, 2] - points_lvl = torch.log2( - points_stride).int() # [3...,4...,5...,6...,7...] - lvl_min, lvl_max = points_lvl.min(), points_lvl.max() - - # assign gt box - gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 - gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) - scale = self.scale - gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + - torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() - gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) - - # stores the assigned gt index of each point - assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) - # stores the assigned gt dist (to this point) of each point - assigned_gt_dist = points.new_full((num_points, ), float('inf')) - points_range = torch.arange(points.shape[0]) - - for idx in range(num_gts): - gt_lvl = gt_bboxes_lvl[idx] - # get the index of points in this level - lvl_idx = gt_lvl == points_lvl - points_index = points_range[lvl_idx] - # get the points in this level - lvl_points = points_xy[lvl_idx, :] - # get the center point of gt - gt_point = gt_bboxes_xy[[idx], :] - # get width and height of gt - gt_wh = gt_bboxes_wh[[idx], :] - # compute the distance between gt center and - # all points in this level - points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) - # find the nearest k points to gt center in this level - min_dist, min_dist_index = torch.topk( - points_gt_dist, self.pos_num, largest=False) - # the index of nearest k points to gt center in this level - min_dist_points_index = points_index[min_dist_index] - # The less_than_recorded_index stores the index - # of min_dist that is less then the assigned_gt_dist. Where - # assigned_gt_dist stores the dist from previous assigned gt - # (if exist) to each point. - less_than_recorded_index = min_dist < assigned_gt_dist[ - min_dist_points_index] - # The min_dist_points_index stores the index of points satisfy: - # (1) it is k nearest to current gt center in this level. - # (2) it is closer to current gt center than other gt center. - min_dist_points_index = min_dist_points_index[ - less_than_recorded_index] - # assign the result - assigned_gt_inds[min_dist_points_index] = idx + 1 - assigned_gt_dist[min_dist_points_index] = min_dist[ - less_than_recorded_index] - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - - return AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/region_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/region_assigner.py deleted file mode 100644 index 1833b89418820562333c7abfc2acea57deba4893..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/region_assigner.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core import anchor_inside_flags -from ..builder import BBOX_ASSIGNERS -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -def calc_region(bbox, ratio, stride, featmap_size=None): - """Calculate region of the box defined by the ratio, the ratio is from the - center of the box to every edge.""" - # project bbox on the feature - f_bbox = bbox / stride - x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) - y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) - x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) - y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) - if featmap_size is not None: - x1 = x1.clamp(min=0, max=featmap_size[1]) - y1 = y1.clamp(min=0, max=featmap_size[0]) - x2 = x2.clamp(min=0, max=featmap_size[1]) - y2 = y2.clamp(min=0, max=featmap_size[0]) - return (x1, y1, x2, y2) - - -def anchor_ctr_inside_region_flags(anchors, stride, region): - """Get the flag indicate whether anchor centers are inside regions.""" - x1, y1, x2, y2 = region - f_anchors = anchors / stride - x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 - y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 - flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) - return flags - - -@BBOX_ASSIGNERS.register_module() -class RegionAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `-1`, `0`, or a positive integer - indicating the ground truth index. - - - -1: don't care - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - center_ratio: ratio of the region in the center of the bbox to - define positive sample. - ignore_ratio: ratio of the region to define ignore samples. - """ - - def __init__(self, center_ratio=0.2, ignore_ratio=0.5): - self.center_ratio = center_ratio - self.ignore_ratio = ignore_ratio - - def assign(self, - mlvl_anchors, - mlvl_valid_flags, - gt_bboxes, - img_meta, - featmap_sizes, - anchor_scale, - anchor_strides, - gt_bboxes_ignore=None, - gt_labels=None, - allowed_border=0): - """Assign gt to anchors. - - This method assign a gt bbox to every bbox (proposal/anchor), each bbox - will be assigned with -1, 0, or a positive number. -1 means don't care, - 0 means negative sample, positive number is the index (1-based) of - assigned gt. - - The assignment is done in following steps, and the order matters. - - 1. Assign every anchor to 0 (negative) - 2. (For each gt_bboxes) Compute ignore flags based on ignore_region - then assign -1 to anchors w.r.t. ignore flags - 3. (For each gt_bboxes) Compute pos flags based on center_region then - assign gt_bboxes to anchors w.r.t. pos flags - 4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor - level then assign -1 to anchors w.r.t. ignore flags - 5. Assign anchor outside of image to -1 - - Args: - mlvl_anchors (list[Tensor]): Multi level anchors. - mlvl_valid_flags (list[Tensor]): Multi level valid flags. - gt_bboxes (Tensor): Ground truth bboxes of image - img_meta (dict): Meta info of image. - featmap_sizes (list[Tensor]): Feature mapsize each level - anchor_scale (int): Scale of the anchor. - anchor_strides (list[int]): Stride of the anchor. - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - allowed_border (int, optional): The border to allow the valid - anchor. Defaults to 0. - - Returns: - :obj:`AssignResult`: The assign result. - """ - if gt_bboxes_ignore is not None: - raise NotImplementedError - - num_gts = gt_bboxes.shape[0] - num_bboxes = sum(x.shape[0] for x in mlvl_anchors) - - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = gt_bboxes.new_zeros((num_bboxes, )) - assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ), - dtype=torch.long) - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = gt_bboxes.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gts, - assigned_gt_inds, - max_overlaps, - labels=assigned_labels) - - num_lvls = len(mlvl_anchors) - r1 = (1 - self.center_ratio) / 2 - r2 = (1 - self.ignore_ratio) / 2 - - scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * - (gt_bboxes[:, 3] - gt_bboxes[:, 1])) - min_anchor_size = scale.new_full( - (1, ), float(anchor_scale * anchor_strides[0])) - target_lvls = torch.floor( - torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) - target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() - - # 1. assign 0 (negative) by default - mlvl_assigned_gt_inds = [] - mlvl_ignore_flags = [] - for lvl in range(num_lvls): - h, w = featmap_sizes[lvl] - assert h * w == mlvl_anchors[lvl].shape[0] - assigned_gt_inds = gt_bboxes.new_full((h * w, ), - 0, - dtype=torch.long) - ignore_flags = torch.zeros_like(assigned_gt_inds) - mlvl_assigned_gt_inds.append(assigned_gt_inds) - mlvl_ignore_flags.append(ignore_flags) - - for gt_id in range(num_gts): - lvl = target_lvls[gt_id].item() - featmap_size = featmap_sizes[lvl] - stride = anchor_strides[lvl] - anchors = mlvl_anchors[lvl] - gt_bbox = gt_bboxes[gt_id, :4] - - # Compute regions - ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) - ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) - - # 2. Assign -1 to ignore flags - ignore_flags = anchor_ctr_inside_region_flags( - anchors, stride, ignore_region) - mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 - - # 3. Assign gt_bboxes to pos flags - pos_flags = anchor_ctr_inside_region_flags(anchors, stride, - ctr_region) - mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1 - - # 4. Assign -1 to ignore adjacent lvl - if lvl > 0: - d_lvl = lvl - 1 - d_anchors = mlvl_anchors[d_lvl] - d_featmap_size = featmap_sizes[d_lvl] - d_stride = anchor_strides[d_lvl] - d_ignore_region = calc_region(gt_bbox, r2, d_stride, - d_featmap_size) - ignore_flags = anchor_ctr_inside_region_flags( - d_anchors, d_stride, d_ignore_region) - mlvl_ignore_flags[d_lvl][ignore_flags] = 1 - if lvl < num_lvls - 1: - u_lvl = lvl + 1 - u_anchors = mlvl_anchors[u_lvl] - u_featmap_size = featmap_sizes[u_lvl] - u_stride = anchor_strides[u_lvl] - u_ignore_region = calc_region(gt_bbox, r2, u_stride, - u_featmap_size) - ignore_flags = anchor_ctr_inside_region_flags( - u_anchors, u_stride, u_ignore_region) - mlvl_ignore_flags[u_lvl][ignore_flags] = 1 - - # 4. (cont.) Assign -1 to ignore adjacent lvl - for lvl in range(num_lvls): - ignore_flags = mlvl_ignore_flags[lvl] - mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 - - # 5. Assign -1 to anchor outside of image - flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) - flat_anchors = torch.cat(mlvl_anchors) - flat_valid_flags = torch.cat(mlvl_valid_flags) - assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == - flat_valid_flags.shape[0]) - inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags, - img_meta['img_shape'], - allowed_border) - outside_flags = ~inside_flags - flat_assigned_gt_inds[outside_flags] = -1 - - if gt_labels is not None: - assigned_labels = torch.zeros_like(flat_assigned_gt_inds) - pos_flags = assigned_gt_inds > 0 - assigned_labels[pos_flags] = gt_labels[ - flat_assigned_gt_inds[pos_flags] - 1] - else: - assigned_labels = None - - return AssignResult( - num_gts, flat_assigned_gt_inds, None, labels=assigned_labels) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/sim_ota_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/sim_ota_assigner.py deleted file mode 100644 index 58bfef433bad3f1c43df0b950ad92a0619db7641..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/sim_ota_assigner.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn.functional as F - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import bbox_overlaps -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class SimOTAAssigner(BaseAssigner): - """Computes matching between predictions and ground truth. - - Args: - center_radius (int | float, optional): Ground truth center size - to judge whether a prior is in center. Default 2.5. - candidate_topk (int, optional): The candidate top-k which used to - get top-k ious to calculate dynamic-k. Default 10. - iou_weight (int | float, optional): The scale factor for regression - iou cost. Default 3.0. - cls_weight (int | float, optional): The scale factor for classification - cost. Default 1.0. - """ - - def __init__(self, - center_radius=2.5, - candidate_topk=10, - iou_weight=3.0, - cls_weight=1.0): - self.center_radius = center_radius - self.candidate_topk = candidate_topk - self.iou_weight = iou_weight - self.cls_weight = cls_weight - - def assign(self, - pred_scores, - priors, - decoded_bboxes, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - eps=1e-7): - """Assign gt to priors using SimOTA. It will switch to CPU mode when - GPU is out of memory. - Args: - pred_scores (Tensor): Classification scores of one image, - a 2D-Tensor with shape [num_priors, num_classes] - priors (Tensor): All priors of one image, a 2D-Tensor with shape - [num_priors, 4] in [cx, xy, stride_w, stride_y] format. - decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape - [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. - gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor - with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. - gt_labels (Tensor): Ground truth labels of one image, a Tensor - with shape [num_gts]. - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - eps (float): A value added to the denominator for numerical - stability. Default 1e-7. - Returns: - assign_result (obj:`AssignResult`): The assigned result. - """ - try: - assign_result = self._assign(pred_scores, priors, decoded_bboxes, - gt_bboxes, gt_labels, - gt_bboxes_ignore, eps) - return assign_result - except RuntimeError: - origin_device = pred_scores.device - warnings.warn('OOM RuntimeError is raised due to the huge memory ' - 'cost during label assignment. CPU mode is applied ' - 'in this batch. If you want to avoid this issue, ' - 'try to reduce the batch size or image size.') - torch.cuda.empty_cache() - - pred_scores = pred_scores.cpu() - priors = priors.cpu() - decoded_bboxes = decoded_bboxes.cpu() - gt_bboxes = gt_bboxes.cpu().float() - gt_labels = gt_labels.cpu() - - assign_result = self._assign(pred_scores, priors, decoded_bboxes, - gt_bboxes, gt_labels, - gt_bboxes_ignore, eps) - assign_result.gt_inds = assign_result.gt_inds.to(origin_device) - assign_result.max_overlaps = assign_result.max_overlaps.to( - origin_device) - assign_result.labels = assign_result.labels.to(origin_device) - - return assign_result - - def _assign(self, - pred_scores, - priors, - decoded_bboxes, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - eps=1e-7): - """Assign gt to priors using SimOTA. - Args: - pred_scores (Tensor): Classification scores of one image, - a 2D-Tensor with shape [num_priors, num_classes] - priors (Tensor): All priors of one image, a 2D-Tensor with shape - [num_priors, 4] in [cx, xy, stride_w, stride_y] format. - decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape - [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. - gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor - with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. - gt_labels (Tensor): Ground truth labels of one image, a Tensor - with shape [num_gts]. - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - eps (float): A value added to the denominator for numerical - stability. Default 1e-7. - Returns: - :obj:`AssignResult`: The assigned result. - """ - INF = 100000.0 - num_gt = gt_bboxes.size(0) - num_bboxes = decoded_bboxes.size(0) - - # assign 0 by default - assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), - 0, - dtype=torch.long) - valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( - priors, gt_bboxes) - valid_decoded_bbox = decoded_bboxes[valid_mask] - valid_pred_scores = pred_scores[valid_mask] - num_valid = valid_decoded_bbox.size(0) - - if num_gt == 0 or num_bboxes == 0 or num_valid == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) - if num_gt == 0: - # No truth, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = decoded_bboxes.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - pairwise_ious = bbox_overlaps(valid_decoded_bbox, gt_bboxes) - iou_cost = -torch.log(pairwise_ious + eps) - - gt_onehot_label = ( - F.one_hot(gt_labels.to(torch.int64), - pred_scores.shape[-1]).float().unsqueeze(0).repeat( - num_valid, 1, 1)) - - valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) - cls_cost = ( - F.binary_cross_entropy( - valid_pred_scores.to(dtype=torch.float32).sqrt_(), - gt_onehot_label, - reduction='none', - ).sum(-1).to(dtype=valid_pred_scores.dtype)) - - cost_matrix = ( - cls_cost * self.cls_weight + iou_cost * self.iou_weight + - (~is_in_boxes_and_center) * INF) - - matched_pred_ious, matched_gt_inds = \ - self.dynamic_k_matching( - cost_matrix, pairwise_ious, num_gt, valid_mask) - - # convert to AssignResult format - assigned_gt_inds[valid_mask] = matched_gt_inds + 1 - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() - max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), - -INF, - dtype=torch.float32) - max_overlaps[valid_mask] = matched_pred_ious - return AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - - def get_in_gt_and_in_center_info(self, priors, gt_bboxes): - num_gt = gt_bboxes.size(0) - - repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt) - repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt) - repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt) - repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt) - - # is prior centers in gt bboxes, shape: [n_prior, n_gt] - l_ = repeated_x - gt_bboxes[:, 0] - t_ = repeated_y - gt_bboxes[:, 1] - r_ = gt_bboxes[:, 2] - repeated_x - b_ = gt_bboxes[:, 3] - repeated_y - - deltas = torch.stack([l_, t_, r_, b_], dim=1) - is_in_gts = deltas.min(dim=1).values > 0 - is_in_gts_all = is_in_gts.sum(dim=1) > 0 - - # is prior centers in gt centers - gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 - gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 - ct_box_l = gt_cxs - self.center_radius * repeated_stride_x - ct_box_t = gt_cys - self.center_radius * repeated_stride_y - ct_box_r = gt_cxs + self.center_radius * repeated_stride_x - ct_box_b = gt_cys + self.center_radius * repeated_stride_y - - cl_ = repeated_x - ct_box_l - ct_ = repeated_y - ct_box_t - cr_ = ct_box_r - repeated_x - cb_ = ct_box_b - repeated_y - - ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1) - is_in_cts = ct_deltas.min(dim=1).values > 0 - is_in_cts_all = is_in_cts.sum(dim=1) > 0 - - # in boxes or in centers, shape: [num_priors] - is_in_gts_or_centers = is_in_gts_all | is_in_cts_all - - # both in boxes and centers, shape: [num_fg, num_gt] - is_in_boxes_and_centers = ( - is_in_gts[is_in_gts_or_centers, :] - & is_in_cts[is_in_gts_or_centers, :]) - return is_in_gts_or_centers, is_in_boxes_and_centers - - def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask): - matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) - # select candidate topk ious for dynamic-k calculation - candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) - topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) - # calculate dynamic k for each gt - dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) - for gt_idx in range(num_gt): - _, pos_idx = torch.topk( - cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) - matching_matrix[:, gt_idx][pos_idx] = 1 - - del topk_ious, dynamic_ks, pos_idx - - prior_match_gt_mask = matching_matrix.sum(1) > 1 - if prior_match_gt_mask.sum() > 0: - cost_min, cost_argmin = torch.min( - cost[prior_match_gt_mask, :], dim=1) - matching_matrix[prior_match_gt_mask, :] *= 0 - matching_matrix[prior_match_gt_mask, cost_argmin] = 1 - # get foreground mask inside box and center prior - fg_mask_inboxes = matching_matrix.sum(1) > 0 - valid_mask[valid_mask.clone()] = fg_mask_inboxes - - matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) - matched_pred_ious = (matching_matrix * - pairwise_ious).sum(1)[fg_mask_inboxes] - return matched_pred_ious, matched_gt_inds diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/task_aligned_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/task_aligned_assigner.py deleted file mode 100644 index 1872de4a780ab1e7c6b4632e576f8e0644743ca2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/task_aligned_assigner.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - -INF = 100000000 - - -@BBOX_ASSIGNERS.register_module() -class TaskAlignedAssigner(BaseAssigner): - """Task aligned assigner used in the paper: - `TOOD: Task-aligned One-stage Object Detection. - `_. - - Assign a corresponding gt bbox or background to each predicted bbox. - Each bbox will be assigned with `0` or a positive integer - indicating the ground truth index. - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - topk (int): number of bbox selected in each level - iou_calculator (dict): Config dict for iou calculator. - Default: dict(type='BboxOverlaps2D') - """ - - def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')): - assert topk >= 1 - self.topk = topk - self.iou_calculator = build_iou_calculator(iou_calculator) - - def assign(self, - pred_scores, - decode_bboxes, - anchors, - gt_bboxes, - gt_bboxes_ignore=None, - gt_labels=None, - alpha=1, - beta=6): - """Assign gt to bboxes. - - The assignment is done in following steps - - 1. compute alignment metric between all bbox (bbox of all pyramid - levels) and gt - 2. select top-k bbox as candidates for each gt - 3. limit the positive sample's center in gt (because the anchor-free - detector only can predict positive distance) - - - Args: - pred_scores (Tensor): predicted class probability, - shape(n, num_classes) - decode_bboxes (Tensor): predicted bounding boxes, shape(n, 4) - anchors (Tensor): pre-defined anchors, shape(n, 4). - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - - Returns: - :obj:`TaskAlignedAssignResult`: The assign result. - """ - anchors = anchors[:, :4] - num_gt, num_bboxes = gt_bboxes.size(0), anchors.size(0) - # compute alignment metric between all bbox and gt - overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach() - bbox_scores = pred_scores[:, gt_labels].detach() - # assign 0 by default - assigned_gt_inds = anchors.new_full((num_bboxes, ), - 0, - dtype=torch.long) - assign_metrics = anchors.new_zeros((num_bboxes, )) - - if num_gt == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = anchors.new_zeros((num_bboxes, )) - if num_gt == 0: - # No gt boxes, assign everything to background - assigned_gt_inds[:] = 0 - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = anchors.new_full((num_bboxes, ), - -1, - dtype=torch.long) - assign_result = AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - assign_result.assign_metrics = assign_metrics - return assign_result - - # select top-k bboxes as candidates for each gt - alignment_metrics = bbox_scores**alpha * overlaps**beta - topk = min(self.topk, alignment_metrics.size(0)) - _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True) - candidate_metrics = alignment_metrics[candidate_idxs, - torch.arange(num_gt)] - is_pos = candidate_metrics > 0 - - # limit the positive sample's center in gt - anchors_cx = (anchors[:, 0] + anchors[:, 2]) / 2.0 - anchors_cy = (anchors[:, 1] + anchors[:, 3]) / 2.0 - for gt_idx in range(num_gt): - candidate_idxs[:, gt_idx] += gt_idx * num_bboxes - ep_anchors_cx = anchors_cx.view(1, -1).expand( - num_gt, num_bboxes).contiguous().view(-1) - ep_anchors_cy = anchors_cy.view(1, -1).expand( - num_gt, num_bboxes).contiguous().view(-1) - candidate_idxs = candidate_idxs.view(-1) - - # calculate the left, top, right, bottom distance between positive - # bbox center and gt side - l_ = ep_anchors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] - t_ = ep_anchors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] - r_ = gt_bboxes[:, 2] - ep_anchors_cx[candidate_idxs].view(-1, num_gt) - b_ = gt_bboxes[:, 3] - ep_anchors_cy[candidate_idxs].view(-1, num_gt) - is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 - is_pos = is_pos & is_in_gts - - # if an anchor box is assigned to multiple gts, - # the one with the highest iou will be selected. - overlaps_inf = torch.full_like(overlaps, - -INF).t().contiguous().view(-1) - index = candidate_idxs.view(-1)[is_pos.view(-1)] - overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] - overlaps_inf = overlaps_inf.view(num_gt, -1).t() - - max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) - assigned_gt_inds[ - max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 - assign_metrics[max_overlaps != -INF] = alignment_metrics[ - max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]] - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - assign_result = AssignResult( - num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) - assign_result.assign_metrics = assign_metrics - return assign_result diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/uniform_assigner.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/uniform_assigner.py deleted file mode 100644 index 70294fc45f32b2611c6c1521de14f57e4ec446f0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/assigners/uniform_assigner.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_ASSIGNERS -from ..iou_calculators import build_iou_calculator -from ..transforms import bbox_xyxy_to_cxcywh -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -@BBOX_ASSIGNERS.register_module() -class UniformAssigner(BaseAssigner): - """Uniform Matching between the anchors and gt boxes, which can achieve - balance in positive anchors, and gt_bboxes_ignore was not considered for - now. - - Args: - pos_ignore_thr (float): the threshold to ignore positive anchors - neg_ignore_thr (float): the threshold to ignore negative anchors - match_times(int): Number of positive anchors for each gt box. - Default 4. - iou_calculator (dict): iou_calculator config - """ - - def __init__(self, - pos_ignore_thr, - neg_ignore_thr, - match_times=4, - iou_calculator=dict(type='BboxOverlaps2D')): - self.match_times = match_times - self.pos_ignore_thr = pos_ignore_thr - self.neg_ignore_thr = neg_ignore_thr - self.iou_calculator = build_iou_calculator(iou_calculator) - - def assign(self, - bbox_pred, - anchor, - gt_bboxes, - gt_bboxes_ignore=None, - gt_labels=None): - num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) - - # 1. assign -1 by default - assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), - 0, - dtype=torch.long) - assigned_labels = bbox_pred.new_full((num_bboxes, ), - -1, - dtype=torch.long) - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - if num_gts == 0: - # No ground truth, assign all to background - assigned_gt_inds[:] = 0 - assign_result = AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) - assign_result.set_extra_property( - 'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) - assign_result.set_extra_property('pos_predicted_boxes', - bbox_pred.new_empty((0, 4))) - assign_result.set_extra_property('target_boxes', - bbox_pred.new_empty((0, 4))) - return assign_result - - # 2. Compute the L1 cost between boxes - # Note that we use anchors and predict boxes both - cost_bbox = torch.cdist( - bbox_xyxy_to_cxcywh(bbox_pred), - bbox_xyxy_to_cxcywh(gt_bboxes), - p=1) - cost_bbox_anchors = torch.cdist( - bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) - - # We found that topk function has different results in cpu and - # cuda mode. In order to ensure consistency with the source code, - # we also use cpu mode. - # TODO: Check whether the performance of cpu and cuda are the same. - C = cost_bbox.cpu() - C1 = cost_bbox_anchors.cpu() - - # self.match_times x n - index = torch.topk( - C, # c=b,n,x c[i]=n,x - k=self.match_times, - dim=0, - largest=False)[1] - - # self.match_times x n - index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] - # (self.match_times*2) x n - indexes = torch.cat((index, index1), - dim=1).reshape(-1).to(bbox_pred.device) - - pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) - anchor_overlaps = self.iou_calculator(anchor, gt_bboxes) - pred_max_overlaps, _ = pred_overlaps.max(dim=1) - anchor_max_overlaps, _ = anchor_overlaps.max(dim=0) - - # 3. Compute the ignore indexes use gt_bboxes and predict boxes - ignore_idx = pred_max_overlaps > self.neg_ignore_thr - assigned_gt_inds[ignore_idx] = -1 - - # 4. Compute the ignore indexes of positive sample use anchors - # and predict boxes - pos_gt_index = torch.arange( - 0, C1.size(1), - device=bbox_pred.device).repeat(self.match_times * 2) - pos_ious = anchor_overlaps[indexes, pos_gt_index] - pos_ignore_idx = pos_ious < self.pos_ignore_thr - - pos_gt_index_with_ignore = pos_gt_index + 1 - pos_gt_index_with_ignore[pos_ignore_idx] = -1 - assigned_gt_inds[indexes] = pos_gt_index_with_ignore - - if gt_labels is not None: - assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) - pos_inds = torch.nonzero( - assigned_gt_inds > 0, as_tuple=False).squeeze() - if pos_inds.numel() > 0: - assigned_labels[pos_inds] = gt_labels[ - assigned_gt_inds[pos_inds] - 1] - else: - assigned_labels = None - - assign_result = AssignResult( - num_gts, - assigned_gt_inds, - anchor_max_overlaps, - labels=assigned_labels) - assign_result.set_extra_property('pos_idx', ~pos_ignore_idx) - assign_result.set_extra_property('pos_predicted_boxes', - bbox_pred[indexes]) - assign_result.set_extra_property('target_boxes', - gt_bboxes[pos_gt_index]) - return assign_result diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/builder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/builder.py deleted file mode 100644 index 9cfa055b5df8cb73d84580ea1f23b82f5393ca8e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/builder.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.utils import Registry, build_from_cfg - -BBOX_ASSIGNERS = Registry('bbox_assigner') -BBOX_SAMPLERS = Registry('bbox_sampler') -BBOX_CODERS = Registry('bbox_coder') - - -def build_assigner(cfg, **default_args): - """Builder of box assigner.""" - return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) - - -def build_sampler(cfg, **default_args): - """Builder of box sampler.""" - return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) - - -def build_bbox_coder(cfg, **default_args): - """Builder of box coder.""" - return build_from_cfg(cfg, BBOX_CODERS, default_args) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/__init__.py deleted file mode 100644 index e12fd64e12b5e76a014da9bd724f1b6f50b488c4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_bbox_coder import BaseBBoxCoder -from .bucketing_bbox_coder import BucketingBBoxCoder -from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder -from .distance_point_bbox_coder import DistancePointBBoxCoder -from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder -from .pseudo_bbox_coder import PseudoBBoxCoder -from .tblr_bbox_coder import TBLRBBoxCoder -from .yolo_bbox_coder import YOLOBBoxCoder - -__all__ = [ - 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', - 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', - 'BucketingBBoxCoder', 'DistancePointBBoxCoder' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/base_bbox_coder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/base_bbox_coder.py deleted file mode 100644 index a7ed041a456e59282c1bf72eaec76bc2c0d1b990..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/base_bbox_coder.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - - -class BaseBBoxCoder(metaclass=ABCMeta): - """Base bounding box coder.""" - - def __init__(self, **kwargs): - pass - - @abstractmethod - def encode(self, bboxes, gt_bboxes): - """Encode deltas between bboxes and ground truth boxes.""" - - @abstractmethod - def decode(self, bboxes, bboxes_pred): - """Decode the predicted bboxes according to prediction and base - boxes.""" diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/bucketing_bbox_coder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/bucketing_bbox_coder.py deleted file mode 100644 index 4be0ada04b410017035443fdfc15d898ed9a0e4b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/bucketing_bbox_coder.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np -import torch -import torch.nn.functional as F - -from ..builder import BBOX_CODERS -from ..transforms import bbox_rescale -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class BucketingBBoxCoder(BaseBBoxCoder): - """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL). - - Boundary Localization with Bucketing and Bucketing Guided Rescoring - are implemented here. - - Please refer to https://arxiv.org/abs/1912.04260 for more details. - - Args: - num_buckets (int): Number of buckets. - scale_factor (int): Scale factor of proposals to generate buckets. - offset_topk (int): Topk buckets are used to generate - bucket fine regression targets. Defaults to 2. - offset_upperbound (float): Offset upperbound to generate - bucket fine regression targets. - To avoid too large offset displacements. Defaults to 1.0. - cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. - Defaults to True. - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - """ - - def __init__(self, - num_buckets, - scale_factor, - offset_topk=2, - offset_upperbound=1.0, - cls_ignore_neighbor=True, - clip_border=True): - super(BucketingBBoxCoder, self).__init__() - self.num_buckets = num_buckets - self.scale_factor = scale_factor - self.offset_topk = offset_topk - self.offset_upperbound = offset_upperbound - self.cls_ignore_neighbor = cls_ignore_neighbor - self.clip_border = clip_border - - def encode(self, bboxes, gt_bboxes): - """Get bucketing estimation and fine regression targets during - training. - - Args: - bboxes (torch.Tensor): source boxes, e.g., object proposals. - gt_bboxes (torch.Tensor): target of the transformation, e.g., - ground truth boxes. - - Returns: - encoded_bboxes(tuple[Tensor]): bucketing estimation - and fine regression targets and weights - """ - - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets, - self.scale_factor, self.offset_topk, - self.offset_upperbound, - self.cls_ignore_neighbor) - return encoded_bboxes - - def decode(self, bboxes, pred_bboxes, max_shape=None): - """Apply transformation `pred_bboxes` to `boxes`. - Args: - boxes (torch.Tensor): Basic boxes. - pred_bboxes (torch.Tensor): Predictions for bucketing estimation - and fine regression - max_shape (tuple[int], optional): Maximum shape of boxes. - Defaults to None. - - Returns: - torch.Tensor: Decoded boxes. - """ - assert len(pred_bboxes) == 2 - cls_preds, offset_preds = pred_bboxes - assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size( - 0) == bboxes.size(0) - decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds, - self.num_buckets, self.scale_factor, - max_shape, self.clip_border) - - return decoded_bboxes - - -@mmcv.jit(coderize=True) -def generat_buckets(proposals, num_buckets, scale_factor=1.0): - """Generate buckets w.r.t bucket number and scale factor of proposals. - - Args: - proposals (Tensor): Shape (n, 4) - num_buckets (int): Number of buckets. - scale_factor (float): Scale factor to rescale proposals. - - Returns: - tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets, - t_buckets, d_buckets) - - - bucket_w: Width of buckets on x-axis. Shape (n, ). - - bucket_h: Height of buckets on y-axis. Shape (n, ). - - l_buckets: Left buckets. Shape (n, ceil(side_num/2)). - - r_buckets: Right buckets. Shape (n, ceil(side_num/2)). - - t_buckets: Top buckets. Shape (n, ceil(side_num/2)). - - d_buckets: Down buckets. Shape (n, ceil(side_num/2)). - """ - proposals = bbox_rescale(proposals, scale_factor) - - # number of buckets in each side - side_num = int(np.ceil(num_buckets / 2.0)) - pw = proposals[..., 2] - proposals[..., 0] - ph = proposals[..., 3] - proposals[..., 1] - px1 = proposals[..., 0] - py1 = proposals[..., 1] - px2 = proposals[..., 2] - py2 = proposals[..., 3] - - bucket_w = pw / num_buckets - bucket_h = ph / num_buckets - - # left buckets - l_buckets = px1[:, None] + (0.5 + torch.arange( - 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] - # right buckets - r_buckets = px2[:, None] - (0.5 + torch.arange( - 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] - # top buckets - t_buckets = py1[:, None] + (0.5 + torch.arange( - 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] - # down buckets - d_buckets = py2[:, None] - (0.5 + torch.arange( - 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] - return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets - - -@mmcv.jit(coderize=True) -def bbox2bucket(proposals, - gt, - num_buckets, - scale_factor, - offset_topk=2, - offset_upperbound=1.0, - cls_ignore_neighbor=True): - """Generate buckets estimation and fine regression targets. - - Args: - proposals (Tensor): Shape (n, 4) - gt (Tensor): Shape (n, 4) - num_buckets (int): Number of buckets. - scale_factor (float): Scale factor to rescale proposals. - offset_topk (int): Topk buckets are used to generate - bucket fine regression targets. Defaults to 2. - offset_upperbound (float): Offset allowance to generate - bucket fine regression targets. - To avoid too large offset displacements. Defaults to 1.0. - cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. - Defaults to True. - - Returns: - tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights). - - - offsets: Fine regression targets. \ - Shape (n, num_buckets*2). - - offsets_weights: Fine regression weights. \ - Shape (n, num_buckets*2). - - bucket_labels: Bucketing estimation labels. \ - Shape (n, num_buckets*2). - - cls_weights: Bucketing estimation weights. \ - Shape (n, num_buckets*2). - """ - assert proposals.size() == gt.size() - - # generate buckets - proposals = proposals.float() - gt = gt.float() - (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, - d_buckets) = generat_buckets(proposals, num_buckets, scale_factor) - - gx1 = gt[..., 0] - gy1 = gt[..., 1] - gx2 = gt[..., 2] - gy2 = gt[..., 3] - - # generate offset targets and weights - # offsets from buckets to gts - l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None] - r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None] - t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None] - d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None] - - # select top-k nearest buckets - l_topk, l_label = l_offsets.abs().topk( - offset_topk, dim=1, largest=False, sorted=True) - r_topk, r_label = r_offsets.abs().topk( - offset_topk, dim=1, largest=False, sorted=True) - t_topk, t_label = t_offsets.abs().topk( - offset_topk, dim=1, largest=False, sorted=True) - d_topk, d_label = d_offsets.abs().topk( - offset_topk, dim=1, largest=False, sorted=True) - - offset_l_weights = l_offsets.new_zeros(l_offsets.size()) - offset_r_weights = r_offsets.new_zeros(r_offsets.size()) - offset_t_weights = t_offsets.new_zeros(t_offsets.size()) - offset_d_weights = d_offsets.new_zeros(d_offsets.size()) - inds = torch.arange(0, proposals.size(0)).to(proposals).long() - - # generate offset weights of top-k nearest buckets - for k in range(offset_topk): - if k >= 1: - offset_l_weights[inds, l_label[:, - k]] = (l_topk[:, k] < - offset_upperbound).float() - offset_r_weights[inds, r_label[:, - k]] = (r_topk[:, k] < - offset_upperbound).float() - offset_t_weights[inds, t_label[:, - k]] = (t_topk[:, k] < - offset_upperbound).float() - offset_d_weights[inds, d_label[:, - k]] = (d_topk[:, k] < - offset_upperbound).float() - else: - offset_l_weights[inds, l_label[:, k]] = 1.0 - offset_r_weights[inds, r_label[:, k]] = 1.0 - offset_t_weights[inds, t_label[:, k]] = 1.0 - offset_d_weights[inds, d_label[:, k]] = 1.0 - - offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1) - offsets_weights = torch.cat([ - offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights - ], - dim=-1) - - # generate bucket labels and weight - side_num = int(np.ceil(num_buckets / 2.0)) - labels = torch.stack( - [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1) - - batch_size = labels.size(0) - bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size, - -1).float() - bucket_cls_l_weights = (l_offsets.abs() < 1).float() - bucket_cls_r_weights = (r_offsets.abs() < 1).float() - bucket_cls_t_weights = (t_offsets.abs() < 1).float() - bucket_cls_d_weights = (d_offsets.abs() < 1).float() - bucket_cls_weights = torch.cat([ - bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights, - bucket_cls_d_weights - ], - dim=-1) - # ignore second nearest buckets for cls if necessary - if cls_ignore_neighbor: - bucket_cls_weights = (~((bucket_cls_weights == 1) & - (bucket_labels == 0))).float() - else: - bucket_cls_weights[:] = 1.0 - return offsets, offsets_weights, bucket_labels, bucket_cls_weights - - -@mmcv.jit(coderize=True) -def bucket2bbox(proposals, - cls_preds, - offset_preds, - num_buckets, - scale_factor=1.0, - max_shape=None, - clip_border=True): - """Apply bucketing estimation (cls preds) and fine regression (offset - preds) to generate det bboxes. - - Args: - proposals (Tensor): Boxes to be transformed. Shape (n, 4) - cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2). - offset_preds (Tensor): fine regression. Shape (n, num_buckets*2). - num_buckets (int): Number of buckets. - scale_factor (float): Scale factor to rescale proposals. - max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - - Returns: - tuple[Tensor]: (bboxes, loc_confidence). - - - bboxes: predicted bboxes. Shape (n, 4) - - loc_confidence: localization confidence of predicted bboxes. - Shape (n,). - """ - - side_num = int(np.ceil(num_buckets / 2.0)) - cls_preds = cls_preds.view(-1, side_num) - offset_preds = offset_preds.view(-1, side_num) - - scores = F.softmax(cls_preds, dim=1) - score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True) - - rescaled_proposals = bbox_rescale(proposals, scale_factor) - - pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0] - ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1] - px1 = rescaled_proposals[..., 0] - py1 = rescaled_proposals[..., 1] - px2 = rescaled_proposals[..., 2] - py2 = rescaled_proposals[..., 3] - - bucket_w = pw / num_buckets - bucket_h = ph / num_buckets - - score_inds_l = score_label[0::4, 0] - score_inds_r = score_label[1::4, 0] - score_inds_t = score_label[2::4, 0] - score_inds_d = score_label[3::4, 0] - l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w - r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w - t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h - d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h - - offsets = offset_preds.view(-1, 4, side_num) - inds = torch.arange(proposals.size(0)).to(proposals).long() - l_offsets = offsets[:, 0, :][inds, score_inds_l] - r_offsets = offsets[:, 1, :][inds, score_inds_r] - t_offsets = offsets[:, 2, :][inds, score_inds_t] - d_offsets = offsets[:, 3, :][inds, score_inds_d] - - x1 = l_buckets - l_offsets * bucket_w - x2 = r_buckets - r_offsets * bucket_w - y1 = t_buckets - t_offsets * bucket_h - y2 = d_buckets - d_offsets * bucket_h - - if clip_border and max_shape is not None: - x1 = x1.clamp(min=0, max=max_shape[1] - 1) - y1 = y1.clamp(min=0, max=max_shape[0] - 1) - x2 = x2.clamp(min=0, max=max_shape[1] - 1) - y2 = y2.clamp(min=0, max=max_shape[0] - 1) - bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]], - dim=-1) - - # bucketing guided rescoring - loc_confidence = score_topk[:, 0] - top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1 - loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float() - loc_confidence = loc_confidence.view(-1, 4).mean(dim=1) - - return bboxes, loc_confidence diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py deleted file mode 100644 index a7f1c62fa7bde9280f9edcb4926cd77bfdd3a0b4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import mmcv -import numpy as np -import torch - -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class DeltaXYWHBBoxCoder(BaseBBoxCoder): - """Delta XYWH BBox coder. - - Following the practice in `R-CNN `_, - this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and - decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). - - Args: - target_means (Sequence[float]): Denormalizing means of target for - delta coordinates - target_stds (Sequence[float]): Denormalizing standard deviation of - target for delta coordinates - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - add_ctr_clamp (bool): Whether to add center clamp, when added, the - predicted box is clamped is its center is too far away from - the original anchor's center. Only used by YOLOF. Default False. - ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. - Default 32. - """ - - def __init__(self, - target_means=(0., 0., 0., 0.), - target_stds=(1., 1., 1., 1.), - clip_border=True, - add_ctr_clamp=False, - ctr_clamp=32): - super(BaseBBoxCoder, self).__init__() - self.means = target_means - self.stds = target_stds - self.clip_border = clip_border - self.add_ctr_clamp = add_ctr_clamp - self.ctr_clamp = ctr_clamp - - def encode(self, bboxes, gt_bboxes): - """Get box regression transformation deltas that can be used to - transform the ``bboxes`` into the ``gt_bboxes``. - - Args: - bboxes (torch.Tensor): Source boxes, e.g., object proposals. - gt_bboxes (torch.Tensor): Target of the transformation, e.g., - ground-truth boxes. - - Returns: - torch.Tensor: Box transformation deltas - """ - - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds) - return encoded_bboxes - - def decode(self, - bboxes, - pred_bboxes, - max_shape=None, - wh_ratio_clip=16 / 1000): - """Apply transformation `pred_bboxes` to `boxes`. - - Args: - bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4) - pred_bboxes (Tensor): Encoded offsets with respect to each roi. - Has shape (B, N, num_classes * 4) or (B, N, 4) or - (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H - when rois is a grid of anchors.Offset encoding follows [1]_. - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. - wh_ratio_clip (float, optional): The allowed ratio between - width and height. - - Returns: - torch.Tensor: Decoded boxes. - """ - - assert pred_bboxes.size(0) == bboxes.size(0) - if pred_bboxes.ndim == 3: - assert pred_bboxes.size(1) == bboxes.size(1) - - if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export(): - # single image decode - decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, - self.stds, max_shape, wh_ratio_clip, - self.clip_border, self.add_ctr_clamp, - self.ctr_clamp) - else: - if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export(): - warnings.warn( - 'DeprecationWarning: onnx_delta2bbox is deprecated ' - 'in the case of batch decoding and non-ONNX, ' - 'please use “delta2bbox” instead. In order to improve ' - 'the decoding speed, the batch function will no ' - 'longer be supported. ') - decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means, - self.stds, max_shape, - wh_ratio_clip, self.clip_border, - self.add_ctr_clamp, - self.ctr_clamp) - - return decoded_bboxes - - -@mmcv.jit(coderize=True) -def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)): - """Compute deltas of proposals w.r.t. gt. - - We usually compute the deltas of x, y, w, h of proposals w.r.t ground - truth bboxes to get regression target. - This is the inverse function of :func:`delta2bbox`. - - Args: - proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) - gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) - means (Sequence[float]): Denormalizing means for delta coordinates - stds (Sequence[float]): Denormalizing standard deviation for delta - coordinates - - Returns: - Tensor: deltas with shape (N, 4), where columns represent dx, dy, - dw, dh. - """ - assert proposals.size() == gt.size() - - proposals = proposals.float() - gt = gt.float() - px = (proposals[..., 0] + proposals[..., 2]) * 0.5 - py = (proposals[..., 1] + proposals[..., 3]) * 0.5 - pw = proposals[..., 2] - proposals[..., 0] - ph = proposals[..., 3] - proposals[..., 1] - - gx = (gt[..., 0] + gt[..., 2]) * 0.5 - gy = (gt[..., 1] + gt[..., 3]) * 0.5 - gw = gt[..., 2] - gt[..., 0] - gh = gt[..., 3] - gt[..., 1] - - dx = (gx - px) / pw - dy = (gy - py) / ph - dw = torch.log(gw / pw) - dh = torch.log(gh / ph) - deltas = torch.stack([dx, dy, dw, dh], dim=-1) - - means = deltas.new_tensor(means).unsqueeze(0) - stds = deltas.new_tensor(stds).unsqueeze(0) - deltas = deltas.sub_(means).div_(stds) - - return deltas - - -@mmcv.jit(coderize=True) -def delta2bbox(rois, - deltas, - means=(0., 0., 0., 0.), - stds=(1., 1., 1., 1.), - max_shape=None, - wh_ratio_clip=16 / 1000, - clip_border=True, - add_ctr_clamp=False, - ctr_clamp=32): - """Apply deltas to shift/scale base boxes. - - Typically the rois are anchor or proposed bounding boxes and the deltas are - network outputs used to shift/scale those boxes. - This is the inverse function of :func:`bbox2delta`. - - Args: - rois (Tensor): Boxes to be transformed. Has shape (N, 4). - deltas (Tensor): Encoded offsets relative to each roi. - Has shape (N, num_classes * 4) or (N, 4). Note - N = num_base_anchors * W * H, when rois is a grid of - anchors. Offset encoding follows [1]_. - means (Sequence[float]): Denormalizing means for delta coordinates. - Default (0., 0., 0., 0.). - stds (Sequence[float]): Denormalizing standard deviation for delta - coordinates. Default (1., 1., 1., 1.). - max_shape (tuple[int, int]): Maximum bounds for boxes, specifies - (H, W). Default None. - wh_ratio_clip (float): Maximum aspect ratio for boxes. Default - 16 / 1000. - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Default True. - add_ctr_clamp (bool): Whether to add center clamp. When set to True, - the center of the prediction bounding box will be clamped to - avoid being too far away from the center of the anchor. - Only used by YOLOF. Default False. - ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. - Default 32. - - Returns: - Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4 - represent tl_x, tl_y, br_x, br_y. - - References: - .. [1] https://arxiv.org/abs/1311.2524 - - Example: - >>> rois = torch.Tensor([[ 0., 0., 1., 1.], - >>> [ 0., 0., 1., 1.], - >>> [ 0., 0., 1., 1.], - >>> [ 5., 5., 5., 5.]]) - >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], - >>> [ 1., 1., 1., 1.], - >>> [ 0., 0., 2., -1.], - >>> [ 0.7, -1.9, -0.5, 0.3]]) - >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) - tensor([[0.0000, 0.0000, 1.0000, 1.0000], - [0.1409, 0.1409, 2.8591, 2.8591], - [0.0000, 0.3161, 4.1945, 0.6839], - [5.0000, 5.0000, 5.0000, 5.0000]]) - """ - num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4 - if num_bboxes == 0: - return deltas - - deltas = deltas.reshape(-1, 4) - - means = deltas.new_tensor(means).view(1, -1) - stds = deltas.new_tensor(stds).view(1, -1) - denorm_deltas = deltas * stds + means - - dxy = denorm_deltas[:, :2] - dwh = denorm_deltas[:, 2:] - - # Compute width/height of each roi - rois_ = rois.repeat(1, num_classes).reshape(-1, 4) - pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5) - pwh = (rois_[:, 2:] - rois_[:, :2]) - - dxy_wh = pwh * dxy - - max_ratio = np.abs(np.log(wh_ratio_clip)) - if add_ctr_clamp: - dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) - dwh = torch.clamp(dwh, max=max_ratio) - else: - dwh = dwh.clamp(min=-max_ratio, max=max_ratio) - - gxy = pxy + dxy_wh - gwh = pwh * dwh.exp() - x1y1 = gxy - (gwh * 0.5) - x2y2 = gxy + (gwh * 0.5) - bboxes = torch.cat([x1y1, x2y2], dim=-1) - if clip_border and max_shape is not None: - bboxes[..., 0::2].clamp_(min=0, max=max_shape[1]) - bboxes[..., 1::2].clamp_(min=0, max=max_shape[0]) - bboxes = bboxes.reshape(num_bboxes, -1) - return bboxes - - -def onnx_delta2bbox(rois, - deltas, - means=(0., 0., 0., 0.), - stds=(1., 1., 1., 1.), - max_shape=None, - wh_ratio_clip=16 / 1000, - clip_border=True, - add_ctr_clamp=False, - ctr_clamp=32): - """Apply deltas to shift/scale base boxes. - - Typically the rois are anchor or proposed bounding boxes and the deltas are - network outputs used to shift/scale those boxes. - This is the inverse function of :func:`bbox2delta`. - - Args: - rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4) - deltas (Tensor): Encoded offsets with respect to each roi. - Has shape (B, N, num_classes * 4) or (B, N, 4) or - (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H - when rois is a grid of anchors.Offset encoding follows [1]_. - means (Sequence[float]): Denormalizing means for delta coordinates. - Default (0., 0., 0., 0.). - stds (Sequence[float]): Denormalizing standard deviation for delta - coordinates. Default (1., 1., 1., 1.). - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If rois shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. Default None. - wh_ratio_clip (float): Maximum aspect ratio for boxes. - Default 16 / 1000. - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Default True. - add_ctr_clamp (bool): Whether to add center clamp, when added, the - predicted box is clamped is its center is too far away from - the original anchor's center. Only used by YOLOF. Default False. - ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. - Default 32. - - Returns: - Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or - (N, num_classes * 4) or (N, 4), where 4 represent - tl_x, tl_y, br_x, br_y. - - References: - .. [1] https://arxiv.org/abs/1311.2524 - - Example: - >>> rois = torch.Tensor([[ 0., 0., 1., 1.], - >>> [ 0., 0., 1., 1.], - >>> [ 0., 0., 1., 1.], - >>> [ 5., 5., 5., 5.]]) - >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], - >>> [ 1., 1., 1., 1.], - >>> [ 0., 0., 2., -1.], - >>> [ 0.7, -1.9, -0.5, 0.3]]) - >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) - tensor([[0.0000, 0.0000, 1.0000, 1.0000], - [0.1409, 0.1409, 2.8591, 2.8591], - [0.0000, 0.3161, 4.1945, 0.6839], - [5.0000, 5.0000, 5.0000, 5.0000]]) - """ - means = deltas.new_tensor(means).view(1, - -1).repeat(1, - deltas.size(-1) // 4) - stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4) - denorm_deltas = deltas * stds + means - dx = denorm_deltas[..., 0::4] - dy = denorm_deltas[..., 1::4] - dw = denorm_deltas[..., 2::4] - dh = denorm_deltas[..., 3::4] - - x1, y1 = rois[..., 0], rois[..., 1] - x2, y2 = rois[..., 2], rois[..., 3] - # Compute center of each roi - px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx) - py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy) - # Compute width/height of each roi - pw = (x2 - x1).unsqueeze(-1).expand_as(dw) - ph = (y2 - y1).unsqueeze(-1).expand_as(dh) - - dx_width = pw * dx - dy_height = ph * dy - - max_ratio = np.abs(np.log(wh_ratio_clip)) - if add_ctr_clamp: - dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp) - dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp) - dw = torch.clamp(dw, max=max_ratio) - dh = torch.clamp(dh, max=max_ratio) - else: - dw = dw.clamp(min=-max_ratio, max=max_ratio) - dh = dh.clamp(min=-max_ratio, max=max_ratio) - # Use exp(network energy) to enlarge/shrink each roi - gw = pw * dw.exp() - gh = ph * dh.exp() - # Use network energy to shift the center of each roi - gx = px + dx_width - gy = py + dy_height - # Convert center-xy/width/height to top-left, bottom-right - x1 = gx - gw * 0.5 - y1 = gy - gh * 0.5 - x2 = gx + gw * 0.5 - y2 = gy + gh * 0.5 - - bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) - - if clip_border and max_shape is not None: - # clip bboxes with dynamic `min` and `max` for onnx - if torch.onnx.is_in_onnx_export(): - from mmdet.core.export import dynamic_clip_for_onnx - x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) - bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) - return bboxes - if not isinstance(max_shape, torch.Tensor): - max_shape = x1.new_tensor(max_shape) - max_shape = max_shape[..., :2].type_as(x1) - if max_shape.ndim == 2: - assert bboxes.ndim == 3 - assert max_shape.size(0) == bboxes.size(0) - - min_xy = x1.new_tensor(0) - max_xy = torch.cat( - [max_shape] * (deltas.size(-1) // 2), - dim=-1).flip(-1).unsqueeze(-2) - bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) - bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) - - return bboxes diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/distance_point_bbox_coder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/distance_point_bbox_coder.py deleted file mode 100644 index 9f308a8419c8ec1c483784599deaf04beae6aa7e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/distance_point_bbox_coder.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import BBOX_CODERS -from ..transforms import bbox2distance, distance2bbox -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class DistancePointBBoxCoder(BaseBBoxCoder): - """Distance Point BBox coder. - - This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, - right) and decode it back to the original. - - Args: - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - """ - - def __init__(self, clip_border=True): - super(BaseBBoxCoder, self).__init__() - self.clip_border = clip_border - - def encode(self, points, gt_bboxes, max_dis=None, eps=0.1): - """Encode bounding box to distances. - - Args: - points (Tensor): Shape (N, 2), The format is [x, y]. - gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy" - max_dis (float): Upper bound of the distance. Default None. - eps (float): a small value to ensure target < max_dis, instead <=. - Default 0.1. - - Returns: - Tensor: Box transformation deltas. The shape is (N, 4). - """ - assert points.size(0) == gt_bboxes.size(0) - assert points.size(-1) == 2 - assert gt_bboxes.size(-1) == 4 - return bbox2distance(points, gt_bboxes, max_dis, eps) - - def decode(self, points, pred_bboxes, max_shape=None): - """Decode distance prediction to bounding box. - - Args: - points (Tensor): Shape (B, N, 2) or (N, 2). - pred_bboxes (Tensor): Distance from the given point to 4 - boundaries (left, top, right, bottom). Shape (B, N, 4) - or (N, 4) - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If priors shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]], - and the length of max_shape should also be B. - Default None. - Returns: - Tensor: Boxes with shape (N, 4) or (B, N, 4) - """ - assert points.size(0) == pred_bboxes.size(0) - assert points.size(-1) == 2 - assert pred_bboxes.size(-1) == 4 - if self.clip_border is False: - max_shape = None - return distance2bbox(points, pred_bboxes, max_shape) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py deleted file mode 100644 index 7fa348b2d1868342a16c13b7a93a2d7d01007bd4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np -import torch - -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder): - """Legacy Delta XYWH BBox coder used in MMDet V1.x. - - Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2, - y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) - back to original bbox (x1, y1, x2, y2). - - Note: - The main difference between :class`LegacyDeltaXYWHBBoxCoder` and - :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and - height calculation. We suggest to only use this coder when testing with - MMDet V1.x models. - - References: - .. [1] https://arxiv.org/abs/1311.2524 - - Args: - target_means (Sequence[float]): denormalizing means of target for - delta coordinates - target_stds (Sequence[float]): denormalizing standard deviation of - target for delta coordinates - """ - - def __init__(self, - target_means=(0., 0., 0., 0.), - target_stds=(1., 1., 1., 1.)): - super(BaseBBoxCoder, self).__init__() - self.means = target_means - self.stds = target_stds - - def encode(self, bboxes, gt_bboxes): - """Get box regression transformation deltas that can be used to - transform the ``bboxes`` into the ``gt_bboxes``. - - Args: - bboxes (torch.Tensor): source boxes, e.g., object proposals. - gt_bboxes (torch.Tensor): target of the transformation, e.g., - ground-truth boxes. - - Returns: - torch.Tensor: Box transformation deltas - """ - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means, - self.stds) - return encoded_bboxes - - def decode(self, - bboxes, - pred_bboxes, - max_shape=None, - wh_ratio_clip=16 / 1000): - """Apply transformation `pred_bboxes` to `boxes`. - - Args: - boxes (torch.Tensor): Basic boxes. - pred_bboxes (torch.Tensor): Encoded boxes with shape - max_shape (tuple[int], optional): Maximum shape of boxes. - Defaults to None. - wh_ratio_clip (float, optional): The allowed ratio between - width and height. - - Returns: - torch.Tensor: Decoded boxes. - """ - assert pred_bboxes.size(0) == bboxes.size(0) - decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means, - self.stds, max_shape, wh_ratio_clip) - - return decoded_bboxes - - -@mmcv.jit(coderize=True) -def legacy_bbox2delta(proposals, - gt, - means=(0., 0., 0., 0.), - stds=(1., 1., 1., 1.)): - """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner. - - We usually compute the deltas of x, y, w, h of proposals w.r.t ground - truth bboxes to get regression target. - This is the inverse function of `delta2bbox()` - - Args: - proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) - gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) - means (Sequence[float]): Denormalizing means for delta coordinates - stds (Sequence[float]): Denormalizing standard deviation for delta - coordinates - - Returns: - Tensor: deltas with shape (N, 4), where columns represent dx, dy, - dw, dh. - """ - assert proposals.size() == gt.size() - - proposals = proposals.float() - gt = gt.float() - px = (proposals[..., 0] + proposals[..., 2]) * 0.5 - py = (proposals[..., 1] + proposals[..., 3]) * 0.5 - pw = proposals[..., 2] - proposals[..., 0] + 1.0 - ph = proposals[..., 3] - proposals[..., 1] + 1.0 - - gx = (gt[..., 0] + gt[..., 2]) * 0.5 - gy = (gt[..., 1] + gt[..., 3]) * 0.5 - gw = gt[..., 2] - gt[..., 0] + 1.0 - gh = gt[..., 3] - gt[..., 1] + 1.0 - - dx = (gx - px) / pw - dy = (gy - py) / ph - dw = torch.log(gw / pw) - dh = torch.log(gh / ph) - deltas = torch.stack([dx, dy, dw, dh], dim=-1) - - means = deltas.new_tensor(means).unsqueeze(0) - stds = deltas.new_tensor(stds).unsqueeze(0) - deltas = deltas.sub_(means).div_(stds) - - return deltas - - -@mmcv.jit(coderize=True) -def legacy_delta2bbox(rois, - deltas, - means=(0., 0., 0., 0.), - stds=(1., 1., 1., 1.), - max_shape=None, - wh_ratio_clip=16 / 1000): - """Apply deltas to shift/scale base boxes in the MMDet V1.x manner. - - Typically the rois are anchor or proposed bounding boxes and the deltas are - network outputs used to shift/scale those boxes. - This is the inverse function of `bbox2delta()` - - Args: - rois (Tensor): Boxes to be transformed. Has shape (N, 4) - deltas (Tensor): Encoded offsets with respect to each roi. - Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when - rois is a grid of anchors. Offset encoding follows [1]_. - means (Sequence[float]): Denormalizing means for delta coordinates - stds (Sequence[float]): Denormalizing standard deviation for delta - coordinates - max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) - wh_ratio_clip (float): Maximum aspect ratio for boxes. - - Returns: - Tensor: Boxes with shape (N, 4), where columns represent - tl_x, tl_y, br_x, br_y. - - References: - .. [1] https://arxiv.org/abs/1311.2524 - - Example: - >>> rois = torch.Tensor([[ 0., 0., 1., 1.], - >>> [ 0., 0., 1., 1.], - >>> [ 0., 0., 1., 1.], - >>> [ 5., 5., 5., 5.]]) - >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], - >>> [ 1., 1., 1., 1.], - >>> [ 0., 0., 2., -1.], - >>> [ 0.7, -1.9, -0.5, 0.3]]) - >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32)) - tensor([[0.0000, 0.0000, 1.5000, 1.5000], - [0.0000, 0.0000, 5.2183, 5.2183], - [0.0000, 0.1321, 7.8891, 0.8679], - [5.3967, 2.4251, 6.0033, 3.7749]]) - """ - means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) - stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) - denorm_deltas = deltas * stds + means - dx = denorm_deltas[:, 0::4] - dy = denorm_deltas[:, 1::4] - dw = denorm_deltas[:, 2::4] - dh = denorm_deltas[:, 3::4] - max_ratio = np.abs(np.log(wh_ratio_clip)) - dw = dw.clamp(min=-max_ratio, max=max_ratio) - dh = dh.clamp(min=-max_ratio, max=max_ratio) - # Compute center of each roi - px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) - py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) - # Compute width/height of each roi - pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) - ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) - # Use exp(network energy) to enlarge/shrink each roi - gw = pw * dw.exp() - gh = ph * dh.exp() - # Use network energy to shift the center of each roi - gx = px + pw * dx - gy = py + ph * dy - # Convert center-xy/width/height to top-left, bottom-right - - # The true legacy box coder should +- 0.5 here. - # However, current implementation improves the performance when testing - # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP) - x1 = gx - gw * 0.5 - y1 = gy - gh * 0.5 - x2 = gx + gw * 0.5 - y2 = gy + gh * 0.5 - if max_shape is not None: - x1 = x1.clamp(min=0, max=max_shape[1] - 1) - y1 = y1.clamp(min=0, max=max_shape[0] - 1) - x2 = x2.clamp(min=0, max=max_shape[1] - 1) - y2 = y2.clamp(min=0, max=max_shape[0] - 1) - bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) - return bboxes diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/pseudo_bbox_coder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/pseudo_bbox_coder.py deleted file mode 100644 index fe71f369cf18dc06ce2a81c9d23f32d7e9d93449..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/pseudo_bbox_coder.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class PseudoBBoxCoder(BaseBBoxCoder): - """Pseudo bounding box coder.""" - - def __init__(self, **kwargs): - super(BaseBBoxCoder, self).__init__(**kwargs) - - def encode(self, bboxes, gt_bboxes): - """torch.Tensor: return the given ``bboxes``""" - return gt_bboxes - - def decode(self, bboxes, pred_bboxes): - """torch.Tensor: return the given ``pred_bboxes``""" - return pred_bboxes diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/tblr_bbox_coder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/tblr_bbox_coder.py deleted file mode 100644 index cb4206636f5b3704b465c5507d1f25492f11cf5c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/tblr_bbox_coder.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch - -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class TBLRBBoxCoder(BaseBBoxCoder): - """TBLR BBox coder. - - Following the practice in `FSAF `_, - this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, - right) and decode it back to the original. - - Args: - normalizer (list | float): Normalization factor to be - divided with when coding the coordinates. If it is a list, it should - have length of 4 indicating normalization factor in tblr dims. - Otherwise it is a unified float factor for all dims. Default: 4.0 - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - """ - - def __init__(self, normalizer=4.0, clip_border=True): - super(BaseBBoxCoder, self).__init__() - self.normalizer = normalizer - self.clip_border = clip_border - - def encode(self, bboxes, gt_bboxes): - """Get box regression transformation deltas that can be used to - transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left, - bottom, right) order. - - Args: - bboxes (torch.Tensor): source boxes, e.g., object proposals. - gt_bboxes (torch.Tensor): target of the transformation, e.g., - ground truth boxes. - - Returns: - torch.Tensor: Box transformation deltas - """ - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - encoded_bboxes = bboxes2tblr( - bboxes, gt_bboxes, normalizer=self.normalizer) - return encoded_bboxes - - def decode(self, bboxes, pred_bboxes, max_shape=None): - """Apply transformation `pred_bboxes` to `boxes`. - - Args: - bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4) - pred_bboxes (torch.Tensor): Encoded boxes with shape - (B, N, 4) or (N, 4) - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. - - Returns: - torch.Tensor: Decoded boxes. - """ - decoded_bboxes = tblr2bboxes( - bboxes, - pred_bboxes, - normalizer=self.normalizer, - max_shape=max_shape, - clip_border=self.clip_border) - - return decoded_bboxes - - -@mmcv.jit(coderize=True) -def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True): - """Encode ground truth boxes to tblr coordinate. - - It first convert the gt coordinate to tblr format, - (top, bottom, left, right), relative to prior box centers. - The tblr coordinate may be normalized by the side length of prior bboxes - if `normalize_by_wh` is specified as True, and it is then normalized by - the `normalizer` factor. - - Args: - priors (Tensor): Prior boxes in point form - Shape: (num_proposals,4). - gts (Tensor): Coords of ground truth for each prior in point-form - Shape: (num_proposals, 4). - normalizer (Sequence[float] | float): normalization parameter of - encoded boxes. If it is a list, it has to have length = 4. - Default: 4.0 - normalize_by_wh (bool): Whether to normalize tblr coordinate by the - side length (wh) of prior bboxes. - - Return: - encoded boxes (Tensor), Shape: (num_proposals, 4) - """ - - # dist b/t match center and prior's center - if not isinstance(normalizer, float): - normalizer = torch.tensor(normalizer, device=priors.device) - assert len(normalizer) == 4, 'Normalizer must have length = 4' - assert priors.size(0) == gts.size(0) - prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2 - xmin, ymin, xmax, ymax = gts.split(1, dim=1) - top = prior_centers[:, 1].unsqueeze(1) - ymin - bottom = ymax - prior_centers[:, 1].unsqueeze(1) - left = prior_centers[:, 0].unsqueeze(1) - xmin - right = xmax - prior_centers[:, 0].unsqueeze(1) - loc = torch.cat((top, bottom, left, right), dim=1) - if normalize_by_wh: - # Normalize tblr by anchor width and height - wh = priors[:, 2:4] - priors[:, 0:2] - w, h = torch.split(wh, 1, dim=1) - loc[:, :2] /= h # tb is normalized by h - loc[:, 2:] /= w # lr is normalized by w - # Normalize tblr by the given normalization factor - return loc / normalizer - - -@mmcv.jit(coderize=True) -def tblr2bboxes(priors, - tblr, - normalizer=4.0, - normalize_by_wh=True, - max_shape=None, - clip_border=True): - """Decode tblr outputs to prediction boxes. - - The process includes 3 steps: 1) De-normalize tblr coordinates by - multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the - prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert - tblr (top, bottom, left, right) pair relative to the center of priors back - to (xmin, ymin, xmax, ymax) coordinate. - - Args: - priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) - Shape: (N,4) or (B, N, 4). - tblr (Tensor): Coords of network output in tblr form - Shape: (N, 4) or (B, N, 4). - normalizer (Sequence[float] | float): Normalization parameter of - encoded boxes. By list, it represents the normalization factors at - tblr dims. By float, it is the unified normalization factor at all - dims. Default: 4.0 - normalize_by_wh (bool): Whether the tblr coordinates have been - normalized by the side length (wh) of prior bboxes. - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If priors shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - - Return: - encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4) - """ - if not isinstance(normalizer, float): - normalizer = torch.tensor(normalizer, device=priors.device) - assert len(normalizer) == 4, 'Normalizer must have length = 4' - assert priors.size(0) == tblr.size(0) - if priors.ndim == 3: - assert priors.size(1) == tblr.size(1) - - loc_decode = tblr * normalizer - prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2 - if normalize_by_wh: - wh = priors[..., 2:4] - priors[..., 0:2] - w, h = torch.split(wh, 1, dim=-1) - # Inplace operation with slice would failed for exporting to ONNX - th = h * loc_decode[..., :2] # tb - tw = w * loc_decode[..., 2:] # lr - loc_decode = torch.cat([th, tw], dim=-1) - # Cannot be exported using onnx when loc_decode.split(1, dim=-1) - top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1) - xmin = prior_centers[..., 0].unsqueeze(-1) - left - xmax = prior_centers[..., 0].unsqueeze(-1) + right - ymin = prior_centers[..., 1].unsqueeze(-1) - top - ymax = prior_centers[..., 1].unsqueeze(-1) + bottom - - bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1) - - if clip_border and max_shape is not None: - # clip bboxes with dynamic `min` and `max` for onnx - if torch.onnx.is_in_onnx_export(): - from mmdet.core.export import dynamic_clip_for_onnx - xmin, ymin, xmax, ymax = dynamic_clip_for_onnx( - xmin, ymin, xmax, ymax, max_shape) - bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1) - return bboxes - if not isinstance(max_shape, torch.Tensor): - max_shape = priors.new_tensor(max_shape) - max_shape = max_shape[..., :2].type_as(priors) - if max_shape.ndim == 2: - assert bboxes.ndim == 3 - assert max_shape.size(0) == bboxes.size(0) - - min_xy = priors.new_tensor(0) - max_xy = torch.cat([max_shape, max_shape], - dim=-1).flip(-1).unsqueeze(-2) - bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) - bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) - - return bboxes diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/yolo_bbox_coder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/yolo_bbox_coder.py deleted file mode 100644 index 2852eca7541769cc2dff872665bc1d54a5b81b5a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/coder/yolo_bbox_coder.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch - -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class YOLOBBoxCoder(BaseBBoxCoder): - """YOLO BBox coder. - - Following `YOLO `_, this coder divide - image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh). - cx, cy in [0., 1.], denotes relative center position w.r.t the center of - bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`. - - Args: - eps (float): Min value of cx, cy when encoding. - """ - - def __init__(self, eps=1e-6): - super(BaseBBoxCoder, self).__init__() - self.eps = eps - - @mmcv.jit(coderize=True) - def encode(self, bboxes, gt_bboxes, stride): - """Get box regression transformation deltas that can be used to - transform the ``bboxes`` into the ``gt_bboxes``. - - Args: - bboxes (torch.Tensor): Source boxes, e.g., anchors. - gt_bboxes (torch.Tensor): Target of the transformation, e.g., - ground-truth boxes. - stride (torch.Tensor | int): Stride of bboxes. - - Returns: - torch.Tensor: Box transformation deltas - """ - - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 - y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 - w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] - h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] - x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 - y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 - w = bboxes[..., 2] - bboxes[..., 0] - h = bboxes[..., 3] - bboxes[..., 1] - w_target = torch.log((w_gt / w).clamp(min=self.eps)) - h_target = torch.log((h_gt / h).clamp(min=self.eps)) - x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( - self.eps, 1 - self.eps) - y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( - self.eps, 1 - self.eps) - encoded_bboxes = torch.stack( - [x_center_target, y_center_target, w_target, h_target], dim=-1) - return encoded_bboxes - - @mmcv.jit(coderize=True) - def decode(self, bboxes, pred_bboxes, stride): - """Apply transformation `pred_bboxes` to `boxes`. - - Args: - boxes (torch.Tensor): Basic boxes, e.g. anchors. - pred_bboxes (torch.Tensor): Encoded boxes with shape - stride (torch.Tensor | int): Strides of bboxes. - - Returns: - torch.Tensor: Decoded boxes. - """ - assert pred_bboxes.size(-1) == bboxes.size(-1) == 4 - xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + ( - pred_bboxes[..., :2] - 0.5) * stride - whs = (bboxes[..., 2:] - - bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp() - decoded_bboxes = torch.stack( - (xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] - - whs[..., 1], xy_centers[..., 0] + whs[..., 0], - xy_centers[..., 1] + whs[..., 1]), - dim=-1) - return decoded_bboxes diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/demodata.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/demodata.py deleted file mode 100644 index eb24b34b640d3f333c1ec568f96ec795b560ab86..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/demodata.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - -from mmdet.utils.util_random import ensure_rng - - -def random_boxes(num=1, scale=1, rng=None): - """Simple version of ``kwimage.Boxes.random`` - - Returns: - Tensor: shape (n, 4) in x1, y1, x2, y2 format. - - References: - https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 - - Example: - >>> num = 3 - >>> scale = 512 - >>> rng = 0 - >>> boxes = random_boxes(num, scale, rng) - >>> print(boxes) - tensor([[280.9925, 278.9802, 308.6148, 366.1769], - [216.9113, 330.6978, 224.0446, 456.5878], - [405.3632, 196.3221, 493.3953, 270.7942]]) - """ - rng = ensure_rng(rng) - - tlbr = rng.rand(num, 4).astype(np.float32) - - tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) - tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) - br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) - br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) - - tlbr[:, 0] = tl_x * scale - tlbr[:, 1] = tl_y * scale - tlbr[:, 2] = br_x * scale - tlbr[:, 3] = br_y * scale - - boxes = torch.from_numpy(tlbr) - return boxes diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/__init__.py deleted file mode 100644 index 04ba925b448d8e4c99ac1434a7d7b909ace1d65f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import build_iou_calculator -from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps - -__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps'] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/builder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/builder.py deleted file mode 100644 index 378ee269f3616d40e6687ad1a2d27ad5234e1784..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/builder.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.utils import Registry, build_from_cfg - -IOU_CALCULATORS = Registry('IoU calculator') - - -def build_iou_calculator(cfg, default_args=None): - """Builder of IoU calculator.""" - return build_from_cfg(cfg, IOU_CALCULATORS, default_args) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/iou2d_calculator.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/iou2d_calculator.py deleted file mode 100644 index b71a5557ea129aaf72e39305524236e4419c3327..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/iou_calculators/iou2d_calculator.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from .builder import IOU_CALCULATORS - - -def cast_tensor_type(x, scale=1., dtype=None): - if dtype == 'fp16': - # scale is for preventing overflows - x = (x / scale).half() - return x - - -def fp16_clamp(x, min=None, max=None): - if not x.is_cuda and x.dtype == torch.float16: - # clamp for cpu float16, tensor fp16 has no clamp implementation - return x.float().clamp(min, max).half() - - return x.clamp(min, max) - - -@IOU_CALCULATORS.register_module() -class BboxOverlaps2D: - """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" - - def __init__(self, scale=1., dtype=None): - self.scale = scale - self.dtype = dtype - - def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): - """Calculate IoU between 2D bboxes. - - Args: - bboxes1 (Tensor): bboxes have shape (m, 4) in - format, or shape (m, 5) in format. - bboxes2 (Tensor): bboxes have shape (n, 4) in - format, shape (n, 5) in format, or be - empty. - mode (str): "iou" (intersection over union), "iof" (intersection - over foreground), or "giou" (generalized intersection over - union). - is_aligned (bool, optional): If True, then m and n must be equal. - Default False. - - Returns: - Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) - """ - assert bboxes1.size(-1) in [0, 4, 5] - assert bboxes2.size(-1) in [0, 4, 5] - if bboxes2.size(-1) == 5: - bboxes2 = bboxes2[..., :4] - if bboxes1.size(-1) == 5: - bboxes1 = bboxes1[..., :4] - - if self.dtype == 'fp16': - # change tensor type to save cpu and cuda memory and keep speed - bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) - bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) - overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) - if not overlaps.is_cuda and overlaps.dtype == torch.float16: - # resume cpu float32 - overlaps = overlaps.float() - return overlaps - - return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) - - def __repr__(self): - """str: a string describing the module""" - repr_str = self.__class__.__name__ + f'(' \ - f'scale={self.scale}, dtype={self.dtype})' - return repr_str - - -def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): - """Calculate overlap between two set of bboxes. - - FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 - Note: - Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', - there are some new generated variable when calculating IOU - using bbox_overlaps function: - - 1) is_aligned is False - area1: M x 1 - area2: N x 1 - lt: M x N x 2 - rb: M x N x 2 - wh: M x N x 2 - overlap: M x N x 1 - union: M x N x 1 - ious: M x N x 1 - - Total memory: - S = (9 x N x M + N + M) * 4 Byte, - - When using FP16, we can reduce: - R = (9 x N x M + N + M) * 4 / 2 Byte - R large than (N + M) * 4 * 2 is always true when N and M >= 1. - Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, - N + 1 < 3 * N, when N or M is 1. - - Given M = 40 (ground truth), N = 400000 (three anchor boxes - in per grid, FPN, R-CNNs), - R = 275 MB (one times) - - A special case (dense detection), M = 512 (ground truth), - R = 3516 MB = 3.43 GB - - When the batch size is B, reduce: - B x R - - Therefore, CUDA memory runs out frequently. - - Experiments on GeForce RTX 2080Ti (11019 MiB): - - | dtype | M | N | Use | Real | Ideal | - |:----:|:----:|:----:|:----:|:----:|:----:| - | FP32 | 512 | 400000 | 8020 MiB | -- | -- | - | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | - | FP32 | 40 | 400000 | 1540 MiB | -- | -- | - | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | - - 2) is_aligned is True - area1: N x 1 - area2: N x 1 - lt: N x 2 - rb: N x 2 - wh: N x 2 - overlap: N x 1 - union: N x 1 - ious: N x 1 - - Total memory: - S = 11 x N * 4 Byte - - When using FP16, we can reduce: - R = 11 x N * 4 / 2 Byte - - So do the 'giou' (large than 'iou'). - - Time-wise, FP16 is generally faster than FP32. - - When gpu_assign_thr is not -1, it takes more time on cpu - but not reduce memory. - There, we can reduce half the memory and keep the speed. - - If ``is_aligned`` is ``False``, then calculate the overlaps between each - bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned - pair of bboxes1 and bboxes2. - - Args: - bboxes1 (Tensor): shape (B, m, 4) in format or empty. - bboxes2 (Tensor): shape (B, n, 4) in format or empty. - B indicates the batch dim, in shape (B1, B2, ..., Bn). - If ``is_aligned`` is ``True``, then m and n must be equal. - mode (str): "iou" (intersection over union), "iof" (intersection over - foreground) or "giou" (generalized intersection over union). - Default "iou". - is_aligned (bool, optional): If True, then m and n must be equal. - Default False. - eps (float, optional): A value added to the denominator for numerical - stability. Default 1e-6. - - Returns: - Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) - - Example: - >>> bboxes1 = torch.FloatTensor([ - >>> [0, 0, 10, 10], - >>> [10, 10, 20, 20], - >>> [32, 32, 38, 42], - >>> ]) - >>> bboxes2 = torch.FloatTensor([ - >>> [0, 0, 10, 20], - >>> [0, 10, 10, 19], - >>> [10, 10, 20, 20], - >>> ]) - >>> overlaps = bbox_overlaps(bboxes1, bboxes2) - >>> assert overlaps.shape == (3, 3) - >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) - >>> assert overlaps.shape == (3, ) - - Example: - >>> empty = torch.empty(0, 4) - >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) - >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) - >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) - >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) - """ - - assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' - # Either the boxes are empty or the length of boxes' last dimension is 4 - assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) - assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) - - # Batch dim must be the same - # Batch dim: (B1, B2, ... Bn) - assert bboxes1.shape[:-2] == bboxes2.shape[:-2] - batch_shape = bboxes1.shape[:-2] - - rows = bboxes1.size(-2) - cols = bboxes2.size(-2) - if is_aligned: - assert rows == cols - - if rows * cols == 0: - if is_aligned: - return bboxes1.new(batch_shape + (rows, )) - else: - return bboxes1.new(batch_shape + (rows, cols)) - - area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( - bboxes1[..., 3] - bboxes1[..., 1]) - area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( - bboxes2[..., 3] - bboxes2[..., 1]) - - if is_aligned: - lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] - rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] - - wh = fp16_clamp(rb - lt, min=0) - overlap = wh[..., 0] * wh[..., 1] - - if mode in ['iou', 'giou']: - union = area1 + area2 - overlap - else: - union = area1 - if mode == 'giou': - enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) - enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) - else: - lt = torch.max(bboxes1[..., :, None, :2], - bboxes2[..., None, :, :2]) # [B, rows, cols, 2] - rb = torch.min(bboxes1[..., :, None, 2:], - bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] - - wh = fp16_clamp(rb - lt, min=0) - overlap = wh[..., 0] * wh[..., 1] - - if mode in ['iou', 'giou']: - union = area1[..., None] + area2[..., None, :] - overlap - else: - union = area1[..., None] - if mode == 'giou': - enclosed_lt = torch.min(bboxes1[..., :, None, :2], - bboxes2[..., None, :, :2]) - enclosed_rb = torch.max(bboxes1[..., :, None, 2:], - bboxes2[..., None, :, 2:]) - - eps = union.new_tensor([eps]) - union = torch.max(union, eps) - ious = overlap / union - if mode in ['iou', 'iof']: - return ious - # calculate gious - enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) - enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] - enclose_area = torch.max(enclose_area, eps) - gious = ious - (enclose_area - union) / enclose_area - return gious diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/__init__.py deleted file mode 100644 index 1b636795082cf7b731e3125f7ae36b51e4bfb5a3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import build_match_cost -from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost, - DiceCost, FocalLossCost, IoUCost) - -__all__ = [ - 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', - 'FocalLossCost', 'DiceCost', 'CrossEntropyLossCost' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/builder.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/builder.py deleted file mode 100644 index ea086adff23c5adbc35d448d5a93daf1a04bdc53..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/builder.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.utils import Registry, build_from_cfg - -MATCH_COST = Registry('Match Cost') - - -def build_match_cost(cfg, default_args=None): - """Builder of IoU calculator.""" - return build_from_cfg(cfg, MATCH_COST, default_args) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/match_cost.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/match_cost.py deleted file mode 100644 index 4342b024588663b602d7dc1b82a1e708cc8aea91..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/match_costs/match_cost.py +++ /dev/null @@ -1,359 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn.functional as F - -from mmdet.core.bbox.iou_calculators import bbox_overlaps -from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh -from .builder import MATCH_COST - - -@MATCH_COST.register_module() -class BBoxL1Cost: - """BBoxL1Cost. - - Args: - weight (int | float, optional): loss_weight - box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN - - Examples: - >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost - >>> import torch - >>> self = BBoxL1Cost() - >>> bbox_pred = torch.rand(1, 4) - >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) - >>> factor = torch.tensor([10, 8, 10, 8]) - >>> self(bbox_pred, gt_bboxes, factor) - tensor([[1.6172, 1.6422]]) - """ - - def __init__(self, weight=1., box_format='xyxy'): - self.weight = weight - assert box_format in ['xyxy', 'xywh'] - self.box_format = box_format - - def __call__(self, bbox_pred, gt_bboxes): - """ - Args: - bbox_pred (Tensor): Predicted boxes with normalized coordinates - (cx, cy, w, h), which are all in range [0, 1]. Shape - (num_query, 4). - gt_bboxes (Tensor): Ground truth boxes with normalized - coordinates (x1, y1, x2, y2). Shape (num_gt, 4). - - Returns: - torch.Tensor: bbox_cost value with weight - """ - if self.box_format == 'xywh': - gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) - elif self.box_format == 'xyxy': - bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) - bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) - return bbox_cost * self.weight - - -@MATCH_COST.register_module() -class FocalLossCost: - """FocalLossCost. - - Args: - weight (int | float, optional): loss_weight - alpha (int | float, optional): focal_loss alpha - gamma (int | float, optional): focal_loss gamma - eps (float, optional): default 1e-12 - binary_input (bool, optional): Whether the input is binary, - default False. - - Examples: - >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost - >>> import torch - >>> self = FocalLossCost() - >>> cls_pred = torch.rand(4, 3) - >>> gt_labels = torch.tensor([0, 1, 2]) - >>> factor = torch.tensor([10, 8, 10, 8]) - >>> self(cls_pred, gt_labels) - tensor([[-0.3236, -0.3364, -0.2699], - [-0.3439, -0.3209, -0.4807], - [-0.4099, -0.3795, -0.2929], - [-0.1950, -0.1207, -0.2626]]) - """ - - def __init__(self, - weight=1., - alpha=0.25, - gamma=2, - eps=1e-12, - binary_input=False): - self.weight = weight - self.alpha = alpha - self.gamma = gamma - self.eps = eps - self.binary_input = binary_input - - def _focal_loss_cost(self, cls_pred, gt_labels): - """ - Args: - cls_pred (Tensor): Predicted classification logits, shape - (num_query, num_class). - gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). - - Returns: - torch.Tensor: cls_cost value with weight - """ - cls_pred = cls_pred.sigmoid() - neg_cost = -(1 - cls_pred + self.eps).log() * ( - 1 - self.alpha) * cls_pred.pow(self.gamma) - pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( - 1 - cls_pred).pow(self.gamma) - - cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] - return cls_cost * self.weight - - def _mask_focal_loss_cost(self, cls_pred, gt_labels): - """ - Args: - cls_pred (Tensor): Predicted classfication logits - in shape (num_query, d1, ..., dn), dtype=torch.float32. - gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn), - dtype=torch.long. Labels should be binary. - - Returns: - Tensor: Focal cost matrix with weight in shape\ - (num_query, num_gt). - """ - cls_pred = cls_pred.flatten(1) - gt_labels = gt_labels.flatten(1).float() - n = cls_pred.shape[1] - cls_pred = cls_pred.sigmoid() - neg_cost = -(1 - cls_pred + self.eps).log() * ( - 1 - self.alpha) * cls_pred.pow(self.gamma) - pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( - 1 - cls_pred).pow(self.gamma) - - cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ - torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) - return cls_cost / n * self.weight - - def __call__(self, cls_pred, gt_labels): - """ - Args: - cls_pred (Tensor): Predicted classfication logits. - gt_labels (Tensor)): Labels. - - Returns: - Tensor: Focal cost matrix with weight in shape\ - (num_query, num_gt). - """ - if self.binary_input: - return self._mask_focal_loss_cost(cls_pred, gt_labels) - else: - return self._focal_loss_cost(cls_pred, gt_labels) - - -@MATCH_COST.register_module() -class ClassificationCost: - """ClsSoftmaxCost. - - Args: - weight (int | float, optional): loss_weight - - Examples: - >>> from mmdet.core.bbox.match_costs.match_cost import \ - ... ClassificationCost - >>> import torch - >>> self = ClassificationCost() - >>> cls_pred = torch.rand(4, 3) - >>> gt_labels = torch.tensor([0, 1, 2]) - >>> factor = torch.tensor([10, 8, 10, 8]) - >>> self(cls_pred, gt_labels) - tensor([[-0.3430, -0.3525, -0.3045], - [-0.3077, -0.2931, -0.3992], - [-0.3664, -0.3455, -0.2881], - [-0.3343, -0.2701, -0.3956]]) - """ - - def __init__(self, weight=1.): - self.weight = weight - - def __call__(self, cls_pred, gt_labels): - """ - Args: - cls_pred (Tensor): Predicted classification logits, shape - (num_query, num_class). - gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). - - Returns: - torch.Tensor: cls_cost value with weight - """ - # Following the official DETR repo, contrary to the loss that - # NLL is used, we approximate it in 1 - cls_score[gt_label]. - # The 1 is a constant that doesn't change the matching, - # so it can be omitted. - cls_score = cls_pred.softmax(-1) - cls_cost = -cls_score[:, gt_labels] - return cls_cost * self.weight - - -@MATCH_COST.register_module() -class IoUCost: - """IoUCost. - - Args: - iou_mode (str, optional): iou mode such as 'iou' | 'giou' - weight (int | float, optional): loss weight - - Examples: - >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost - >>> import torch - >>> self = IoUCost() - >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) - >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) - >>> self(bboxes, gt_bboxes) - tensor([[-0.1250, 0.1667], - [ 0.1667, -0.5000]]) - """ - - def __init__(self, iou_mode='giou', weight=1.): - self.weight = weight - self.iou_mode = iou_mode - - def __call__(self, bboxes, gt_bboxes): - """ - Args: - bboxes (Tensor): Predicted boxes with unnormalized coordinates - (x1, y1, x2, y2). Shape (num_query, 4). - gt_bboxes (Tensor): Ground truth boxes with unnormalized - coordinates (x1, y1, x2, y2). Shape (num_gt, 4). - - Returns: - torch.Tensor: iou_cost value with weight - """ - # overlaps: [num_bboxes, num_gt] - overlaps = bbox_overlaps( - bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) - # The 1 is a constant that doesn't change the matching, so omitted. - iou_cost = -overlaps - return iou_cost * self.weight - - -@MATCH_COST.register_module() -class DiceCost: - """Cost of mask assignments based on dice losses. - - Args: - weight (int | float, optional): loss_weight. Defaults to 1. - pred_act (bool, optional): Whether to apply sigmoid to mask_pred. - Defaults to False. - eps (float, optional): default 1e-12. - naive_dice (bool, optional): If True, use the naive dice loss - in which the power of the number in the denominator is - the first power. If Flase, use the second power that - is adopted by K-Net and SOLO. - Defaults to True. - """ - - def __init__(self, weight=1., pred_act=False, eps=1e-3, naive_dice=True): - self.weight = weight - self.pred_act = pred_act - self.eps = eps - self.naive_dice = naive_dice - - def binary_mask_dice_loss(self, mask_preds, gt_masks): - """ - Args: - mask_preds (Tensor): Mask prediction in shape (num_query, *). - gt_masks (Tensor): Ground truth in shape (num_gt, *) - store 0 or 1, 0 for negative class and 1 for - positive class. - - Returns: - Tensor: Dice cost matrix in shape (num_query, num_gt). - """ - mask_preds = mask_preds.flatten(1) - gt_masks = gt_masks.flatten(1).float() - numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) - if self.naive_dice: - denominator = mask_preds.sum(-1)[:, None] + \ - gt_masks.sum(-1)[None, :] - else: - denominator = mask_preds.pow(2).sum(1)[:, None] + \ - gt_masks.pow(2).sum(1)[None, :] - loss = 1 - (numerator + self.eps) / (denominator + self.eps) - return loss - - def __call__(self, mask_preds, gt_masks): - """ - Args: - mask_preds (Tensor): Mask prediction logits in shape (num_query, *) - gt_masks (Tensor): Ground truth in shape (num_gt, *) - - Returns: - Tensor: Dice cost matrix with weight in shape (num_query, num_gt). - """ - if self.pred_act: - mask_preds = mask_preds.sigmoid() - dice_cost = self.binary_mask_dice_loss(mask_preds, gt_masks) - return dice_cost * self.weight - - -@MATCH_COST.register_module() -class CrossEntropyLossCost: - """CrossEntropyLossCost. - - Args: - weight (int | float, optional): loss weight. Defaults to 1. - use_sigmoid (bool, optional): Whether the prediction uses sigmoid - of softmax. Defaults to True. - Examples: - >>> from mmdet.core.bbox.match_costs import CrossEntropyLossCost - >>> import torch - >>> bce = CrossEntropyLossCost(use_sigmoid=True) - >>> cls_pred = torch.tensor([[7.6, 1.2], [-1.3, 10]]) - >>> gt_labels = torch.tensor([[1, 1], [1, 0]]) - >>> print(bce(cls_pred, gt_labels)) - """ - - def __init__(self, weight=1., use_sigmoid=True): - assert use_sigmoid, 'use_sigmoid = False is not supported yet.' - self.weight = weight - self.use_sigmoid = use_sigmoid - - def _binary_cross_entropy(self, cls_pred, gt_labels): - """ - Args: - cls_pred (Tensor): The prediction with shape (num_query, 1, *) or - (num_query, *). - gt_labels (Tensor): The learning label of prediction with - shape (num_gt, *). - - Returns: - Tensor: Cross entropy cost matrix in shape (num_query, num_gt). - """ - cls_pred = cls_pred.flatten(1).float() - gt_labels = gt_labels.flatten(1).float() - n = cls_pred.shape[1] - pos = F.binary_cross_entropy_with_logits( - cls_pred, torch.ones_like(cls_pred), reduction='none') - neg = F.binary_cross_entropy_with_logits( - cls_pred, torch.zeros_like(cls_pred), reduction='none') - cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \ - torch.einsum('nc,mc->nm', neg, 1 - gt_labels) - cls_cost = cls_cost / n - - return cls_cost - - def __call__(self, cls_pred, gt_labels): - """ - Args: - cls_pred (Tensor): Predicted classification logits. - gt_labels (Tensor): Labels. - - Returns: - Tensor: Cross entropy cost matrix with weight in - shape (num_query, num_gt). - """ - if self.use_sigmoid: - cls_cost = self._binary_cross_entropy(cls_pred, gt_labels) - else: - raise NotImplementedError - - return cls_cost * self.weight diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/__init__.py deleted file mode 100644 index f58505b59dca744e489328a39fdabb02a893fb51..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_sampler import BaseSampler -from .combined_sampler import CombinedSampler -from .instance_balanced_pos_sampler import InstanceBalancedPosSampler -from .iou_balanced_neg_sampler import IoUBalancedNegSampler -from .mask_pseudo_sampler import MaskPseudoSampler -from .mask_sampling_result import MaskSamplingResult -from .ohem_sampler import OHEMSampler -from .pseudo_sampler import PseudoSampler -from .random_sampler import RandomSampler -from .sampling_result import SamplingResult -from .score_hlr_sampler import ScoreHLRSampler - -__all__ = [ - 'BaseSampler', 'PseudoSampler', 'RandomSampler', - 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', - 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler', - 'MaskSamplingResult' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/base_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/base_sampler.py deleted file mode 100644 index bd15c7c643bdf52a39fd2f35e8d26a64de813b4b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/base_sampler.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -import torch - -from .sampling_result import SamplingResult - - -class BaseSampler(metaclass=ABCMeta): - """Base class of samplers.""" - - def __init__(self, - num, - pos_fraction, - neg_pos_ub=-1, - add_gt_as_proposals=True, - **kwargs): - self.num = num - self.pos_fraction = pos_fraction - self.neg_pos_ub = neg_pos_ub - self.add_gt_as_proposals = add_gt_as_proposals - self.pos_sampler = self - self.neg_sampler = self - - @abstractmethod - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Sample positive samples.""" - pass - - @abstractmethod - def _sample_neg(self, assign_result, num_expected, **kwargs): - """Sample negative samples.""" - pass - - def sample(self, - assign_result, - bboxes, - gt_bboxes, - gt_labels=None, - **kwargs): - """Sample positive and negative bboxes. - - This is a simple implementation of bbox sampling given candidates, - assigning results and ground truth bboxes. - - Args: - assign_result (:obj:`AssignResult`): Bbox assigning results. - bboxes (Tensor): Boxes to be sampled from. - gt_bboxes (Tensor): Ground truth bboxes. - gt_labels (Tensor, optional): Class labels of ground truth bboxes. - - Returns: - :obj:`SamplingResult`: Sampling result. - - Example: - >>> from mmdet.core.bbox import RandomSampler - >>> from mmdet.core.bbox import AssignResult - >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes - >>> rng = ensure_rng(None) - >>> assign_result = AssignResult.random(rng=rng) - >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) - >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) - >>> gt_labels = None - >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, - >>> add_gt_as_proposals=False) - >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) - """ - if len(bboxes.shape) < 2: - bboxes = bboxes[None, :] - - bboxes = bboxes[:, :4] - - gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) - if self.add_gt_as_proposals and len(gt_bboxes) > 0: - if gt_labels is None: - raise ValueError( - 'gt_labels must be given when add_gt_as_proposals is True') - bboxes = torch.cat([gt_bboxes, bboxes], dim=0) - assign_result.add_gt_(gt_labels) - gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) - gt_flags = torch.cat([gt_ones, gt_flags]) - - num_expected_pos = int(self.num * self.pos_fraction) - pos_inds = self.pos_sampler._sample_pos( - assign_result, num_expected_pos, bboxes=bboxes, **kwargs) - # We found that sampled indices have duplicated items occasionally. - # (may be a bug of PyTorch) - pos_inds = pos_inds.unique() - num_sampled_pos = pos_inds.numel() - num_expected_neg = self.num - num_sampled_pos - if self.neg_pos_ub >= 0: - _pos = max(1, num_sampled_pos) - neg_upper_bound = int(self.neg_pos_ub * _pos) - if num_expected_neg > neg_upper_bound: - num_expected_neg = neg_upper_bound - neg_inds = self.neg_sampler._sample_neg( - assign_result, num_expected_neg, bboxes=bboxes, **kwargs) - neg_inds = neg_inds.unique() - - sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, - assign_result, gt_flags) - return sampling_result diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/combined_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/combined_sampler.py deleted file mode 100644 index 4f6d86ff26e1fbcecb31a671bf18a40e362feb57..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/combined_sampler.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import BBOX_SAMPLERS, build_sampler -from .base_sampler import BaseSampler - - -@BBOX_SAMPLERS.register_module() -class CombinedSampler(BaseSampler): - """A sampler that combines positive sampler and negative sampler.""" - - def __init__(self, pos_sampler, neg_sampler, **kwargs): - super(CombinedSampler, self).__init__(**kwargs) - self.pos_sampler = build_sampler(pos_sampler, **kwargs) - self.neg_sampler = build_sampler(neg_sampler, **kwargs) - - def _sample_pos(self, **kwargs): - """Sample positive samples.""" - raise NotImplementedError - - def _sample_neg(self, **kwargs): - """Sample negative samples.""" - raise NotImplementedError diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py deleted file mode 100644 index 5e0d9cc0e0a2dcd687d23c2f08c94fe4bf127d3a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - -from ..builder import BBOX_SAMPLERS -from .random_sampler import RandomSampler - - -@BBOX_SAMPLERS.register_module() -class InstanceBalancedPosSampler(RandomSampler): - """Instance balanced sampler that samples equal number of positive samples - for each instance.""" - - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Sample positive boxes. - - Args: - assign_result (:obj:`AssignResult`): The assigned results of boxes. - num_expected (int): The number of expected positive samples - - Returns: - Tensor or ndarray: sampled indices. - """ - pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) - if pos_inds.numel() != 0: - pos_inds = pos_inds.squeeze(1) - if pos_inds.numel() <= num_expected: - return pos_inds - else: - unique_gt_inds = assign_result.gt_inds[pos_inds].unique() - num_gts = len(unique_gt_inds) - num_per_gt = int(round(num_expected / float(num_gts)) + 1) - sampled_inds = [] - for i in unique_gt_inds: - inds = torch.nonzero( - assign_result.gt_inds == i.item(), as_tuple=False) - if inds.numel() != 0: - inds = inds.squeeze(1) - else: - continue - if len(inds) > num_per_gt: - inds = self.random_choice(inds, num_per_gt) - sampled_inds.append(inds) - sampled_inds = torch.cat(sampled_inds) - if len(sampled_inds) < num_expected: - num_extra = num_expected - len(sampled_inds) - extra_inds = np.array( - list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) - if len(extra_inds) > num_extra: - extra_inds = self.random_choice(extra_inds, num_extra) - extra_inds = torch.from_numpy(extra_inds).to( - assign_result.gt_inds.device).long() - sampled_inds = torch.cat([sampled_inds, extra_inds]) - elif len(sampled_inds) > num_expected: - sampled_inds = self.random_choice(sampled_inds, num_expected) - return sampled_inds diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py deleted file mode 100644 index 56e2874a47566b740899b0cdc3f311c02f83ad50..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - -from ..builder import BBOX_SAMPLERS -from .random_sampler import RandomSampler - - -@BBOX_SAMPLERS.register_module() -class IoUBalancedNegSampler(RandomSampler): - """IoU Balanced Sampling. - - arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) - - Sampling proposals according to their IoU. `floor_fraction` of needed RoIs - are sampled from proposals whose IoU are lower than `floor_thr` randomly. - The others are sampled from proposals whose IoU are higher than - `floor_thr`. These proposals are sampled from some bins evenly, which are - split by `num_bins` via IoU evenly. - - Args: - num (int): number of proposals. - pos_fraction (float): fraction of positive proposals. - floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, - set to -1 if all using IoU balanced sampling. - floor_fraction (float): sampling fraction of proposals under floor_thr. - num_bins (int): number of bins in IoU balanced sampling. - """ - - def __init__(self, - num, - pos_fraction, - floor_thr=-1, - floor_fraction=0, - num_bins=3, - **kwargs): - super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, - **kwargs) - assert floor_thr >= 0 or floor_thr == -1 - assert 0 <= floor_fraction <= 1 - assert num_bins >= 1 - - self.floor_thr = floor_thr - self.floor_fraction = floor_fraction - self.num_bins = num_bins - - def sample_via_interval(self, max_overlaps, full_set, num_expected): - """Sample according to the iou interval. - - Args: - max_overlaps (torch.Tensor): IoU between bounding boxes and ground - truth boxes. - full_set (set(int)): A full set of indices of boxes。 - num_expected (int): Number of expected samples。 - - Returns: - np.ndarray: Indices of samples - """ - max_iou = max_overlaps.max() - iou_interval = (max_iou - self.floor_thr) / self.num_bins - per_num_expected = int(num_expected / self.num_bins) - - sampled_inds = [] - for i in range(self.num_bins): - start_iou = self.floor_thr + i * iou_interval - end_iou = self.floor_thr + (i + 1) * iou_interval - tmp_set = set( - np.where( - np.logical_and(max_overlaps >= start_iou, - max_overlaps < end_iou))[0]) - tmp_inds = list(tmp_set & full_set) - if len(tmp_inds) > per_num_expected: - tmp_sampled_set = self.random_choice(tmp_inds, - per_num_expected) - else: - tmp_sampled_set = np.array(tmp_inds, dtype=np.int) - sampled_inds.append(tmp_sampled_set) - - sampled_inds = np.concatenate(sampled_inds) - if len(sampled_inds) < num_expected: - num_extra = num_expected - len(sampled_inds) - extra_inds = np.array(list(full_set - set(sampled_inds))) - if len(extra_inds) > num_extra: - extra_inds = self.random_choice(extra_inds, num_extra) - sampled_inds = np.concatenate([sampled_inds, extra_inds]) - - return sampled_inds - - def _sample_neg(self, assign_result, num_expected, **kwargs): - """Sample negative boxes. - - Args: - assign_result (:obj:`AssignResult`): The assigned results of boxes. - num_expected (int): The number of expected negative samples - - Returns: - Tensor or ndarray: sampled indices. - """ - neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) - if neg_inds.numel() != 0: - neg_inds = neg_inds.squeeze(1) - if len(neg_inds) <= num_expected: - return neg_inds - else: - max_overlaps = assign_result.max_overlaps.cpu().numpy() - # balance sampling for negative samples - neg_set = set(neg_inds.cpu().numpy()) - - if self.floor_thr > 0: - floor_set = set( - np.where( - np.logical_and(max_overlaps >= 0, - max_overlaps < self.floor_thr))[0]) - iou_sampling_set = set( - np.where(max_overlaps >= self.floor_thr)[0]) - elif self.floor_thr == 0: - floor_set = set(np.where(max_overlaps == 0)[0]) - iou_sampling_set = set( - np.where(max_overlaps > self.floor_thr)[0]) - else: - floor_set = set() - iou_sampling_set = set( - np.where(max_overlaps > self.floor_thr)[0]) - # for sampling interval calculation - self.floor_thr = 0 - - floor_neg_inds = list(floor_set & neg_set) - iou_sampling_neg_inds = list(iou_sampling_set & neg_set) - num_expected_iou_sampling = int(num_expected * - (1 - self.floor_fraction)) - if len(iou_sampling_neg_inds) > num_expected_iou_sampling: - if self.num_bins >= 2: - iou_sampled_inds = self.sample_via_interval( - max_overlaps, set(iou_sampling_neg_inds), - num_expected_iou_sampling) - else: - iou_sampled_inds = self.random_choice( - iou_sampling_neg_inds, num_expected_iou_sampling) - else: - iou_sampled_inds = np.array( - iou_sampling_neg_inds, dtype=np.int) - num_expected_floor = num_expected - len(iou_sampled_inds) - if len(floor_neg_inds) > num_expected_floor: - sampled_floor_inds = self.random_choice( - floor_neg_inds, num_expected_floor) - else: - sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) - sampled_inds = np.concatenate( - (sampled_floor_inds, iou_sampled_inds)) - if len(sampled_inds) < num_expected: - num_extra = num_expected - len(sampled_inds) - extra_inds = np.array(list(neg_set - set(sampled_inds))) - if len(extra_inds) > num_extra: - extra_inds = self.random_choice(extra_inds, num_extra) - sampled_inds = np.concatenate((sampled_inds, extra_inds)) - sampled_inds = torch.from_numpy(sampled_inds).long().to( - assign_result.gt_inds.device) - return sampled_inds diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/mask_pseudo_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/mask_pseudo_sampler.py deleted file mode 100644 index b5f69658d02808fd67adf54d2acf5f7fc28d2e6e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/mask_pseudo_sampler.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""copy from -https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" - -import torch - -from mmdet.core.bbox.builder import BBOX_SAMPLERS -from .base_sampler import BaseSampler -from .mask_sampling_result import MaskSamplingResult - - -@BBOX_SAMPLERS.register_module() -class MaskPseudoSampler(BaseSampler): - """A pseudo sampler that does not do sampling actually.""" - - def __init__(self, **kwargs): - pass - - def _sample_pos(self, **kwargs): - """Sample positive samples.""" - raise NotImplementedError - - def _sample_neg(self, **kwargs): - """Sample negative samples.""" - raise NotImplementedError - - def sample(self, assign_result, masks, gt_masks, **kwargs): - """Directly returns the positive and negative indices of samples. - - Args: - assign_result (:obj:`AssignResult`): Assigned results - masks (torch.Tensor): Bounding boxes - gt_masks (torch.Tensor): Ground truth boxes - Returns: - :obj:`SamplingResult`: sampler results - """ - pos_inds = torch.nonzero( - assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() - neg_inds = torch.nonzero( - assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() - gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8) - sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, - gt_masks, assign_result, gt_flags) - return sampling_result diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/mask_sampling_result.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/mask_sampling_result.py deleted file mode 100644 index 3d109432260089b8f494d0e5b78bab7280cc2e0d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/mask_sampling_result.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""copy from -https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" - -import torch - -from .sampling_result import SamplingResult - - -class MaskSamplingResult(SamplingResult): - """Mask sampling result.""" - - def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result, - gt_flags): - self.pos_inds = pos_inds - self.neg_inds = neg_inds - self.pos_masks = masks[pos_inds] - self.neg_masks = masks[neg_inds] - self.pos_is_gt = gt_flags[pos_inds] - - self.num_gts = gt_masks.shape[0] - self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 - - if gt_masks.numel() == 0: - # hack for index error case - assert self.pos_assigned_gt_inds.numel() == 0 - self.pos_gt_masks = torch.empty_like(gt_masks) - else: - self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :] - - if assign_result.labels is not None: - self.pos_gt_labels = assign_result.labels[pos_inds] - else: - self.pos_gt_labels = None - - @property - def masks(self): - """torch.Tensor: concatenated positive and negative boxes""" - return torch.cat([self.pos_masks, self.neg_masks]) - - def __nice__(self): - data = self.info.copy() - data['pos_masks'] = data.pop('pos_masks').shape - data['neg_masks'] = data.pop('neg_masks').shape - parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] - body = ' ' + ',\n '.join(parts) - return '{\n' + body + '\n}' - - @property - def info(self): - """Returns a dictionary of info about the object.""" - return { - 'pos_inds': self.pos_inds, - 'neg_inds': self.neg_inds, - 'pos_masks': self.pos_masks, - 'neg_masks': self.neg_masks, - 'pos_is_gt': self.pos_is_gt, - 'num_gts': self.num_gts, - 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, - } diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/ohem_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/ohem_sampler.py deleted file mode 100644 index 7eb066633809ff8d70240062c2dacd0e7283a1c5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/ohem_sampler.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_SAMPLERS -from ..transforms import bbox2roi -from .base_sampler import BaseSampler - - -@BBOX_SAMPLERS.register_module() -class OHEMSampler(BaseSampler): - r"""Online Hard Example Mining Sampler described in `Training Region-based - Object Detectors with Online Hard Example Mining - `_. - """ - - def __init__(self, - num, - pos_fraction, - context, - neg_pos_ub=-1, - add_gt_as_proposals=True, - loss_key='loss_cls', - **kwargs): - super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, - add_gt_as_proposals) - self.context = context - if not hasattr(self.context, 'num_stages'): - self.bbox_head = self.context.bbox_head - else: - self.bbox_head = self.context.bbox_head[self.context.current_stage] - - self.loss_key = loss_key - - def hard_mining(self, inds, num_expected, bboxes, labels, feats): - with torch.no_grad(): - rois = bbox2roi([bboxes]) - if not hasattr(self.context, 'num_stages'): - bbox_results = self.context._bbox_forward(feats, rois) - else: - bbox_results = self.context._bbox_forward( - self.context.current_stage, feats, rois) - cls_score = bbox_results['cls_score'] - loss = self.bbox_head.loss( - cls_score=cls_score, - bbox_pred=None, - rois=rois, - labels=labels, - label_weights=cls_score.new_ones(cls_score.size(0)), - bbox_targets=None, - bbox_weights=None, - reduction_override='none')[self.loss_key] - _, topk_loss_inds = loss.topk(num_expected) - return inds[topk_loss_inds] - - def _sample_pos(self, - assign_result, - num_expected, - bboxes=None, - feats=None, - **kwargs): - """Sample positive boxes. - - Args: - assign_result (:obj:`AssignResult`): Assigned results - num_expected (int): Number of expected positive samples - bboxes (torch.Tensor, optional): Boxes. Defaults to None. - feats (list[torch.Tensor], optional): Multi-level features. - Defaults to None. - - Returns: - torch.Tensor: Indices of positive samples - """ - # Sample some hard positive samples - pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) - if pos_inds.numel() != 0: - pos_inds = pos_inds.squeeze(1) - if pos_inds.numel() <= num_expected: - return pos_inds - else: - return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], - assign_result.labels[pos_inds], feats) - - def _sample_neg(self, - assign_result, - num_expected, - bboxes=None, - feats=None, - **kwargs): - """Sample negative boxes. - - Args: - assign_result (:obj:`AssignResult`): Assigned results - num_expected (int): Number of expected negative samples - bboxes (torch.Tensor, optional): Boxes. Defaults to None. - feats (list[torch.Tensor], optional): Multi-level features. - Defaults to None. - - Returns: - torch.Tensor: Indices of negative samples - """ - # Sample some hard negative samples - neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) - if neg_inds.numel() != 0: - neg_inds = neg_inds.squeeze(1) - if len(neg_inds) <= num_expected: - return neg_inds - else: - neg_labels = assign_result.labels.new_empty( - neg_inds.size(0)).fill_(self.bbox_head.num_classes) - return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], - neg_labels, feats) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/pseudo_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/pseudo_sampler.py deleted file mode 100644 index b5ce298ed01a327daa12167a20cb14b48c14d4e0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/pseudo_sampler.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_SAMPLERS -from .base_sampler import BaseSampler -from .sampling_result import SamplingResult - - -@BBOX_SAMPLERS.register_module() -class PseudoSampler(BaseSampler): - """A pseudo sampler that does not do sampling actually.""" - - def __init__(self, **kwargs): - pass - - def _sample_pos(self, **kwargs): - """Sample positive samples.""" - raise NotImplementedError - - def _sample_neg(self, **kwargs): - """Sample negative samples.""" - raise NotImplementedError - - def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs): - """Directly returns the positive and negative indices of samples. - - Args: - assign_result (:obj:`AssignResult`): Assigned results - bboxes (torch.Tensor): Bounding boxes - gt_bboxes (torch.Tensor): Ground truth boxes - - Returns: - :obj:`SamplingResult`: sampler results - """ - pos_inds = torch.nonzero( - assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() - neg_inds = torch.nonzero( - assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() - gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) - sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, - assign_result, gt_flags) - return sampling_result diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/random_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/random_sampler.py deleted file mode 100644 index 8d3effcb7802df98aeff4282594d2b7464643621..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/random_sampler.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from ..builder import BBOX_SAMPLERS -from .base_sampler import BaseSampler - - -@BBOX_SAMPLERS.register_module() -class RandomSampler(BaseSampler): - """Random sampler. - - Args: - num (int): Number of samples - pos_fraction (float): Fraction of positive samples - neg_pos_ub (int, optional): Upper bound number of negative and - positive samples. Defaults to -1. - add_gt_as_proposals (bool, optional): Whether to add ground truth - boxes as proposals. Defaults to True. - """ - - def __init__(self, - num, - pos_fraction, - neg_pos_ub=-1, - add_gt_as_proposals=True, - **kwargs): - from mmdet.core.bbox import demodata - super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, - add_gt_as_proposals) - self.rng = demodata.ensure_rng(kwargs.get('rng', None)) - - def random_choice(self, gallery, num): - """Random select some elements from the gallery. - - If `gallery` is a Tensor, the returned indices will be a Tensor; - If `gallery` is a ndarray or list, the returned indices will be a - ndarray. - - Args: - gallery (Tensor | ndarray | list): indices pool. - num (int): expected sample num. - - Returns: - Tensor or ndarray: sampled indices. - """ - assert len(gallery) >= num - - is_tensor = isinstance(gallery, torch.Tensor) - if not is_tensor: - if torch.cuda.is_available(): - device = torch.cuda.current_device() - else: - device = 'cpu' - gallery = torch.tensor(gallery, dtype=torch.long, device=device) - # This is a temporary fix. We can revert the following code - # when PyTorch fixes the abnormal return of torch.randperm. - # See: https://github.com/open-mmlab/mmdetection/pull/5014 - perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) - rand_inds = gallery[perm] - if not is_tensor: - rand_inds = rand_inds.cpu().numpy() - return rand_inds - - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Randomly sample some positive samples.""" - pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) - if pos_inds.numel() != 0: - pos_inds = pos_inds.squeeze(1) - if pos_inds.numel() <= num_expected: - return pos_inds - else: - return self.random_choice(pos_inds, num_expected) - - def _sample_neg(self, assign_result, num_expected, **kwargs): - """Randomly sample some negative samples.""" - neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) - if neg_inds.numel() != 0: - neg_inds = neg_inds.squeeze(1) - if len(neg_inds) <= num_expected: - return neg_inds - else: - return self.random_choice(neg_inds, num_expected) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/sampling_result.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/sampling_result.py deleted file mode 100644 index 11a02c5d95a4d633dfea26df7fb3e440494a8be7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/sampling_result.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.utils import util_mixins - - -class SamplingResult(util_mixins.NiceRepr): - """Bbox sampling result. - - Example: - >>> # xdoctest: +IGNORE_WANT - >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA - >>> self = SamplingResult.random(rng=10) - >>> print(f'self = {self}') - self = - """ - - def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, - gt_flags): - self.pos_inds = pos_inds - self.neg_inds = neg_inds - self.pos_bboxes = bboxes[pos_inds] - self.neg_bboxes = bboxes[neg_inds] - self.pos_is_gt = gt_flags[pos_inds] - - self.num_gts = gt_bboxes.shape[0] - self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 - - if gt_bboxes.numel() == 0: - # hack for index error case - assert self.pos_assigned_gt_inds.numel() == 0 - self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4) - else: - if len(gt_bboxes.shape) < 2: - gt_bboxes = gt_bboxes.view(-1, 4) - - self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long(), :] - - if assign_result.labels is not None: - self.pos_gt_labels = assign_result.labels[pos_inds] - else: - self.pos_gt_labels = None - - @property - def bboxes(self): - """torch.Tensor: concatenated positive and negative boxes""" - return torch.cat([self.pos_bboxes, self.neg_bboxes]) - - def to(self, device): - """Change the device of the data inplace. - - Example: - >>> self = SamplingResult.random() - >>> print(f'self = {self.to(None)}') - >>> # xdoctest: +REQUIRES(--gpu) - >>> print(f'self = {self.to(0)}') - """ - _dict = self.__dict__ - for key, value in _dict.items(): - if isinstance(value, torch.Tensor): - _dict[key] = value.to(device) - return self - - def __nice__(self): - data = self.info.copy() - data['pos_bboxes'] = data.pop('pos_bboxes').shape - data['neg_bboxes'] = data.pop('neg_bboxes').shape - parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] - body = ' ' + ',\n '.join(parts) - return '{\n' + body + '\n}' - - @property - def info(self): - """Returns a dictionary of info about the object.""" - return { - 'pos_inds': self.pos_inds, - 'neg_inds': self.neg_inds, - 'pos_bboxes': self.pos_bboxes, - 'neg_bboxes': self.neg_bboxes, - 'pos_is_gt': self.pos_is_gt, - 'num_gts': self.num_gts, - 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, - } - - @classmethod - def random(cls, rng=None, **kwargs): - """ - Args: - rng (None | int | numpy.random.RandomState): seed or state. - kwargs (keyword arguments): - - num_preds: number of predicted boxes - - num_gts: number of true boxes - - p_ignore (float): probability of a predicted box assigned to \ - an ignored truth. - - p_assigned (float): probability of a predicted box not being \ - assigned. - - p_use_label (float | bool): with labels or not. - - Returns: - :obj:`SamplingResult`: Randomly generated sampling result. - - Example: - >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA - >>> self = SamplingResult.random() - >>> print(self.__dict__) - """ - from mmdet.core.bbox import demodata - from mmdet.core.bbox.assigners.assign_result import AssignResult - from mmdet.core.bbox.samplers.random_sampler import RandomSampler - rng = demodata.ensure_rng(rng) - - # make probabilistic? - num = 32 - pos_fraction = 0.5 - neg_pos_ub = -1 - - assign_result = AssignResult.random(rng=rng, **kwargs) - - # Note we could just compute an assignment - bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng) - gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng) - - if rng.rand() > 0.2: - # sometimes algorithms squeeze their data, be robust to that - gt_bboxes = gt_bboxes.squeeze() - bboxes = bboxes.squeeze() - - if assign_result.labels is None: - gt_labels = None - else: - gt_labels = None # todo - - if gt_labels is None: - add_gt_as_proposals = False - else: - add_gt_as_proposals = True # make probabilistic? - - sampler = RandomSampler( - num, - pos_fraction, - neg_pos_ub=neg_pos_ub, - add_gt_as_proposals=add_gt_as_proposals, - rng=rng) - self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) - return self diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/score_hlr_sampler.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/score_hlr_sampler.py deleted file mode 100644 index f4be9b8cfefff7bd59242de1ab5b6a9e37fa7943..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/samplers/score_hlr_sampler.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.ops import nms_match - -from ..builder import BBOX_SAMPLERS -from ..transforms import bbox2roi -from .base_sampler import BaseSampler -from .sampling_result import SamplingResult - - -@BBOX_SAMPLERS.register_module() -class ScoreHLRSampler(BaseSampler): - r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample - Attention in Object Detection `_. - - Score hierarchical local rank (HLR) differentiates with RandomSampler in - negative part. It firstly computes Score-HLR in a two-step way, - then linearly maps score hlr to the loss weights. - - Args: - num (int): Total number of sampled RoIs. - pos_fraction (float): Fraction of positive samples. - context (:class:`BaseRoIHead`): RoI head that the sampler belongs to. - neg_pos_ub (int): Upper bound of the ratio of num negative to num - positive, -1 means no upper bound. - add_gt_as_proposals (bool): Whether to add ground truth as proposals. - k (float): Power of the non-linear mapping. - bias (float): Shift of the non-linear mapping. - score_thr (float): Minimum score that a negative sample is to be - considered as valid bbox. - """ - - def __init__(self, - num, - pos_fraction, - context, - neg_pos_ub=-1, - add_gt_as_proposals=True, - k=0.5, - bias=0, - score_thr=0.05, - iou_thr=0.5, - **kwargs): - super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) - self.k = k - self.bias = bias - self.score_thr = score_thr - self.iou_thr = iou_thr - self.context = context - # context of cascade detectors is a list, so distinguish them here. - if not hasattr(context, 'num_stages'): - self.bbox_roi_extractor = context.bbox_roi_extractor - self.bbox_head = context.bbox_head - self.with_shared_head = context.with_shared_head - if self.with_shared_head: - self.shared_head = context.shared_head - else: - self.bbox_roi_extractor = context.bbox_roi_extractor[ - context.current_stage] - self.bbox_head = context.bbox_head[context.current_stage] - - @staticmethod - def random_choice(gallery, num): - """Randomly select some elements from the gallery. - - If `gallery` is a Tensor, the returned indices will be a Tensor; - If `gallery` is a ndarray or list, the returned indices will be a - ndarray. - - Args: - gallery (Tensor | ndarray | list): indices pool. - num (int): expected sample num. - - Returns: - Tensor or ndarray: sampled indices. - """ - assert len(gallery) >= num - - is_tensor = isinstance(gallery, torch.Tensor) - if not is_tensor: - if torch.cuda.is_available(): - device = torch.cuda.current_device() - else: - device = 'cpu' - gallery = torch.tensor(gallery, dtype=torch.long, device=device) - perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] - rand_inds = gallery[perm] - if not is_tensor: - rand_inds = rand_inds.cpu().numpy() - return rand_inds - - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Randomly sample some positive samples.""" - pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten() - if pos_inds.numel() <= num_expected: - return pos_inds - else: - return self.random_choice(pos_inds, num_expected) - - def _sample_neg(self, - assign_result, - num_expected, - bboxes, - feats=None, - img_meta=None, - **kwargs): - """Sample negative samples. - - Score-HLR sampler is done in the following steps: - 1. Take the maximum positive score prediction of each negative samples - as s_i. - 2. Filter out negative samples whose s_i <= score_thr, the left samples - are called valid samples. - 3. Use NMS-Match to divide valid samples into different groups, - samples in the same group will greatly overlap with each other - 4. Rank the matched samples in two-steps to get Score-HLR. - (1) In the same group, rank samples with their scores. - (2) In the same score rank across different groups, - rank samples with their scores again. - 5. Linearly map Score-HLR to the final label weights. - - Args: - assign_result (:obj:`AssignResult`): result of assigner. - num_expected (int): Expected number of samples. - bboxes (Tensor): bbox to be sampled. - feats (Tensor): Features come from FPN. - img_meta (dict): Meta information dictionary. - """ - neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten() - num_neg = neg_inds.size(0) - if num_neg == 0: - return neg_inds, None - with torch.no_grad(): - neg_bboxes = bboxes[neg_inds] - neg_rois = bbox2roi([neg_bboxes]) - bbox_result = self.context._bbox_forward(feats, neg_rois) - cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[ - 'bbox_pred'] - - ori_loss = self.bbox_head.loss( - cls_score=cls_score, - bbox_pred=None, - rois=None, - labels=neg_inds.new_full((num_neg, ), - self.bbox_head.num_classes), - label_weights=cls_score.new_ones(num_neg), - bbox_targets=None, - bbox_weights=None, - reduction_override='none')['loss_cls'] - - # filter out samples with the max score lower than score_thr - max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1) - valid_inds = (max_score > self.score_thr).nonzero().view(-1) - invalid_inds = (max_score <= self.score_thr).nonzero().view(-1) - num_valid = valid_inds.size(0) - num_invalid = invalid_inds.size(0) - - num_expected = min(num_neg, num_expected) - num_hlr = min(num_valid, num_expected) - num_rand = num_expected - num_hlr - if num_valid > 0: - valid_rois = neg_rois[valid_inds] - valid_max_score = max_score[valid_inds] - valid_argmax_score = argmax_score[valid_inds] - valid_bbox_pred = bbox_pred[valid_inds] - - # valid_bbox_pred shape: [num_valid, #num_classes, 4] - valid_bbox_pred = valid_bbox_pred.view( - valid_bbox_pred.size(0), -1, 4) - selected_bbox_pred = valid_bbox_pred[range(num_valid), - valid_argmax_score] - pred_bboxes = self.bbox_head.bbox_coder.decode( - valid_rois[:, 1:], selected_bbox_pred) - pred_bboxes_with_score = torch.cat( - [pred_bboxes, valid_max_score[:, None]], -1) - group = nms_match(pred_bboxes_with_score, self.iou_thr) - - # imp: importance - imp = cls_score.new_zeros(num_valid) - for g in group: - g_score = valid_max_score[g] - # g_score has already sorted - rank = g_score.new_tensor(range(g_score.size(0))) - imp[g] = num_valid - rank + g_score - _, imp_rank_inds = imp.sort(descending=True) - _, imp_rank = imp_rank_inds.sort() - hlr_inds = imp_rank_inds[:num_expected] - - if num_rand > 0: - rand_inds = torch.randperm(num_invalid)[:num_rand] - select_inds = torch.cat( - [valid_inds[hlr_inds], invalid_inds[rand_inds]]) - else: - select_inds = valid_inds[hlr_inds] - - neg_label_weights = cls_score.new_ones(num_expected) - - up_bound = max(num_expected, num_valid) - imp_weights = (up_bound - - imp_rank[hlr_inds].float()) / up_bound - neg_label_weights[:num_hlr] = imp_weights - neg_label_weights[num_hlr:] = imp_weights.min() - neg_label_weights = (self.bias + - (1 - self.bias) * neg_label_weights).pow( - self.k) - ori_selected_loss = ori_loss[select_inds] - new_loss = ori_selected_loss * neg_label_weights - norm_ratio = ori_selected_loss.sum() / new_loss.sum() - neg_label_weights *= norm_ratio - else: - neg_label_weights = cls_score.new_ones(num_expected) - select_inds = torch.randperm(num_neg)[:num_expected] - - return neg_inds[select_inds], neg_label_weights - - def sample(self, - assign_result, - bboxes, - gt_bboxes, - gt_labels=None, - img_meta=None, - **kwargs): - """Sample positive and negative bboxes. - - This is a simple implementation of bbox sampling given candidates, - assigning results and ground truth bboxes. - - Args: - assign_result (:obj:`AssignResult`): Bbox assigning results. - bboxes (Tensor): Boxes to be sampled from. - gt_bboxes (Tensor): Ground truth bboxes. - gt_labels (Tensor, optional): Class labels of ground truth bboxes. - - Returns: - tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negative - label weights. - """ - bboxes = bboxes[:, :4] - - gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) - if self.add_gt_as_proposals: - bboxes = torch.cat([gt_bboxes, bboxes], dim=0) - assign_result.add_gt_(gt_labels) - gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) - gt_flags = torch.cat([gt_ones, gt_flags]) - - num_expected_pos = int(self.num * self.pos_fraction) - pos_inds = self.pos_sampler._sample_pos( - assign_result, num_expected_pos, bboxes=bboxes, **kwargs) - num_sampled_pos = pos_inds.numel() - num_expected_neg = self.num - num_sampled_pos - if self.neg_pos_ub >= 0: - _pos = max(1, num_sampled_pos) - neg_upper_bound = int(self.neg_pos_ub * _pos) - if num_expected_neg > neg_upper_bound: - num_expected_neg = neg_upper_bound - neg_inds, neg_label_weights = self.neg_sampler._sample_neg( - assign_result, - num_expected_neg, - bboxes, - img_meta=img_meta, - **kwargs) - - return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, - assign_result, gt_flags), neg_label_weights diff --git a/cv/detection/co-detr/pytorch/mmdet/core/bbox/transforms.py b/cv/detection/co-detr/pytorch/mmdet/core/bbox/transforms.py deleted file mode 100644 index 6d72076a5621c5b59c081a8a190b4c8d167c26a5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/bbox/transforms.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - - -def find_inside_bboxes(bboxes, img_h, img_w): - """Find bboxes as long as a part of bboxes is inside the image. - - Args: - bboxes (Tensor): Shape (N, 4). - img_h (int): Image height. - img_w (int): Image width. - - Returns: - Tensor: Index of the remaining bboxes. - """ - inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \ - & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0) - return inside_inds - - -def bbox_flip(bboxes, img_shape, direction='horizontal'): - """Flip bboxes horizontally or vertically. - - Args: - bboxes (Tensor): Shape (..., 4*k) - img_shape (tuple): Image shape. - direction (str): Flip direction, options are "horizontal", "vertical", - "diagonal". Default: "horizontal" - - Returns: - Tensor: Flipped bboxes. - """ - assert bboxes.shape[-1] % 4 == 0 - assert direction in ['horizontal', 'vertical', 'diagonal'] - flipped = bboxes.clone() - if direction == 'horizontal': - flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] - flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] - elif direction == 'vertical': - flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] - flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] - else: - flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] - flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] - flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] - flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] - return flipped - - -def bbox_mapping(bboxes, - img_shape, - scale_factor, - flip, - flip_direction='horizontal'): - """Map bboxes from the original image scale to testing scale.""" - new_bboxes = bboxes * bboxes.new_tensor(scale_factor) - if flip: - new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) - return new_bboxes - - -def bbox_mapping_back(bboxes, - img_shape, - scale_factor, - flip, - flip_direction='horizontal'): - """Map bboxes from testing scale to original image scale.""" - new_bboxes = bbox_flip(bboxes, img_shape, - flip_direction) if flip else bboxes - new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) - return new_bboxes.view(bboxes.shape) - - -def bbox2roi(bbox_list): - """Convert a list of bboxes to roi format. - - Args: - bbox_list (list[Tensor]): a list of bboxes corresponding to a batch - of images. - - Returns: - Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] - """ - rois_list = [] - for img_id, bboxes in enumerate(bbox_list): - if bboxes.size(0) > 0: - img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) - rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) - else: - rois = bboxes.new_zeros((0, 5)) - rois_list.append(rois) - rois = torch.cat(rois_list, 0) - return rois - - -def roi2bbox(rois): - """Convert rois to bounding box format. - - Args: - rois (torch.Tensor): RoIs with the shape (n, 5) where the first - column indicates batch id of each RoI. - - Returns: - list[torch.Tensor]: Converted boxes of corresponding rois. - """ - bbox_list = [] - img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) - for img_id in img_ids: - inds = (rois[:, 0] == img_id.item()) - bbox = rois[inds, 1:] - bbox_list.append(bbox) - return bbox_list - - -def bbox2result(bboxes, labels, num_classes): - """Convert detection results to a list of numpy arrays. - - Args: - bboxes (torch.Tensor | np.ndarray): shape (n, 5) - labels (torch.Tensor | np.ndarray): shape (n, ) - num_classes (int): class number, including background class - - Returns: - list(ndarray): bbox results of each class - """ - if bboxes.shape[0] == 0: - return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] - else: - if isinstance(bboxes, torch.Tensor): - bboxes = bboxes.detach().cpu().numpy() - labels = labels.detach().cpu().numpy() - return [bboxes[labels == i, :] for i in range(num_classes)] - - -def distance2bbox(points, distance, max_shape=None): - """Decode distance prediction to bounding box. - - Args: - points (Tensor): Shape (B, N, 2) or (N, 2). - distance (Tensor): Distance from the given point to 4 - boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If priors shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. - - Returns: - Tensor: Boxes with shape (N, 4) or (B, N, 4) - """ - - x1 = points[..., 0] - distance[..., 0] - y1 = points[..., 1] - distance[..., 1] - x2 = points[..., 0] + distance[..., 2] - y2 = points[..., 1] + distance[..., 3] - - bboxes = torch.stack([x1, y1, x2, y2], -1) - - if max_shape is not None: - if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export(): - # speed up - bboxes[:, 0::2].clamp_(min=0, max=max_shape[1]) - bboxes[:, 1::2].clamp_(min=0, max=max_shape[0]) - return bboxes - - # clip bboxes with dynamic `min` and `max` for onnx - if torch.onnx.is_in_onnx_export(): - from mmdet.core.export import dynamic_clip_for_onnx - x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) - bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - return bboxes - if not isinstance(max_shape, torch.Tensor): - max_shape = x1.new_tensor(max_shape) - max_shape = max_shape[..., :2].type_as(x1) - if max_shape.ndim == 2: - assert bboxes.ndim == 3 - assert max_shape.size(0) == bboxes.size(0) - - min_xy = x1.new_tensor(0) - max_xy = torch.cat([max_shape, max_shape], - dim=-1).flip(-1).unsqueeze(-2) - bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) - bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) - - return bboxes - - -def bbox2distance(points, bbox, max_dis=None, eps=0.1): - """Decode bounding box based on distances. - - Args: - points (Tensor): Shape (n, 2), [x, y]. - bbox (Tensor): Shape (n, 4), "xyxy" format - max_dis (float): Upper bound of the distance. - eps (float): a small value to ensure target < max_dis, instead <= - - Returns: - Tensor: Decoded distances. - """ - left = points[:, 0] - bbox[:, 0] - top = points[:, 1] - bbox[:, 1] - right = bbox[:, 2] - points[:, 0] - bottom = bbox[:, 3] - points[:, 1] - if max_dis is not None: - left = left.clamp(min=0, max=max_dis - eps) - top = top.clamp(min=0, max=max_dis - eps) - right = right.clamp(min=0, max=max_dis - eps) - bottom = bottom.clamp(min=0, max=max_dis - eps) - return torch.stack([left, top, right, bottom], -1) - - -def bbox_rescale(bboxes, scale_factor=1.0): - """Rescale bounding box w.r.t. scale_factor. - - Args: - bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois - scale_factor (float): rescale factor - - Returns: - Tensor: Rescaled bboxes. - """ - if bboxes.size(1) == 5: - bboxes_ = bboxes[:, 1:] - inds_ = bboxes[:, 0] - else: - bboxes_ = bboxes - cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 - cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 - w = bboxes_[:, 2] - bboxes_[:, 0] - h = bboxes_[:, 3] - bboxes_[:, 1] - w = w * scale_factor - h = h * scale_factor - x1 = cx - 0.5 * w - x2 = cx + 0.5 * w - y1 = cy - 0.5 * h - y2 = cy + 0.5 * h - if bboxes.size(1) == 5: - rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) - else: - rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - return rescaled_bboxes - - -def bbox_cxcywh_to_xyxy(bbox): - """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). - - Args: - bbox (Tensor): Shape (n, 4) for bboxes. - - Returns: - Tensor: Converted bboxes. - """ - cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) - bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] - return torch.cat(bbox_new, dim=-1) - - -def bbox_xyxy_to_cxcywh(bbox): - """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). - - Args: - bbox (Tensor): Shape (n, 4) for bboxes. - - Returns: - Tensor: Converted bboxes. - """ - x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) - bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] - return torch.cat(bbox_new, dim=-1) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/data_structures/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/data_structures/__init__.py deleted file mode 100644 index 11ab96c565da484ad11533c3535e25abcc212c32..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/data_structures/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .general_data import GeneralData -from .instance_data import InstanceData - -__all__ = ['GeneralData', 'InstanceData'] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/data_structures/general_data.py b/cv/detection/co-detr/pytorch/mmdet/core/data_structures/general_data.py deleted file mode 100644 index 99316e41bc5b38f6beb4408cdd11de6304722e33..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/data_structures/general_data.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import numpy as np -import torch - -from mmdet.utils.util_mixins import NiceRepr - - -class GeneralData(NiceRepr): - """A general data structure of OpenMMlab. - - A data structure that stores the meta information, - the annotations of the images or the model predictions, - which can be used in communication between components. - - The attributes in `GeneralData` are divided into two parts, - the `meta_info_fields` and the `data_fields` respectively. - - - `meta_info_fields`: Usually contains the - information about the image such as filename, - image_shape, pad_shape, etc. All attributes in - it are immutable once set, - but the user can add new meta information with - `set_meta_info` function, all information can be accessed - with methods `meta_info_keys`, `meta_info_values`, - `meta_info_items`. - - - `data_fields`: Annotations or model predictions are - stored. The attributes can be accessed or modified by - dict-like or object-like operations, such as - `.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`, - `values()`, `items()`. Users can also apply tensor-like methods - to all obj:`torch.Tensor` in the `data_fileds`, - such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()` - `.detach()`, `.numpy()` - - Args: - meta_info (dict, optional): A dict contains the meta information - of single image. such as `img_shape`, `scale_factor`, etc. - Default: None. - data (dict, optional): A dict contains annotations of single image or - model predictions. Default: None. - - Examples: - >>> from mmdet.core import GeneralData - >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) - >>> instance_data = GeneralData(meta_info=img_meta) - >>> img_shape in instance_data - True - >>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3]) - >>> instance_data["det_scores"] = torch.Tensor([0.01, 0.1, 0.2, 0.3]) - >>> print(results) - - >>> instance_data.det_scores - tensor([0.0100, 0.1000, 0.2000, 0.3000]) - >>> instance_data.det_labels - tensor([0, 1, 2, 3]) - >>> instance_data['det_labels'] - tensor([0, 1, 2, 3]) - >>> 'det_labels' in instance_data - True - >>> instance_data.img_shape - (800, 1196, 3) - >>> 'det_scores' in instance_data - True - >>> del instance_data.det_scores - >>> 'det_scores' in instance_data - False - >>> det_labels = instance_data.pop('det_labels', None) - >>> det_labels - tensor([0, 1, 2, 3]) - >>> 'det_labels' in instance_data - >>> False - """ - - def __init__(self, meta_info=None, data=None): - - self._meta_info_fields = set() - self._data_fields = set() - - if meta_info is not None: - self.set_meta_info(meta_info=meta_info) - if data is not None: - self.set_data(data) - - def set_meta_info(self, meta_info): - """Add meta information. - - Args: - meta_info (dict): A dict contains the meta information - of image. such as `img_shape`, `scale_factor`, etc. - Default: None. - """ - assert isinstance(meta_info, - dict), f'meta should be a `dict` but get {meta_info}' - meta = copy.deepcopy(meta_info) - for k, v in meta.items(): - # should be consistent with original meta_info - if k in self._meta_info_fields: - ori_value = getattr(self, k) - if isinstance(ori_value, (torch.Tensor, np.ndarray)): - if (ori_value == v).all(): - continue - else: - raise KeyError( - f'img_meta_info {k} has been set as ' - f'{getattr(self, k)} before, which is immutable ') - elif ori_value == v: - continue - else: - raise KeyError( - f'img_meta_info {k} has been set as ' - f'{getattr(self, k)} before, which is immutable ') - else: - self._meta_info_fields.add(k) - self.__dict__[k] = v - - def set_data(self, data): - """Update a dict to `data_fields`. - - Args: - data (dict): A dict contains annotations of image or - model predictions. Default: None. - """ - assert isinstance(data, - dict), f'meta should be a `dict` but get {data}' - for k, v in data.items(): - self.__setattr__(k, v) - - def new(self, meta_info=None, data=None): - """Return a new results with same image meta information. - - Args: - meta_info (dict, optional): A dict contains the meta information - of image. such as `img_shape`, `scale_factor`, etc. - Default: None. - data (dict, optional): A dict contains annotations of image or - model predictions. Default: None. - """ - new_data = self.__class__() - new_data.set_meta_info(dict(self.meta_info_items())) - if meta_info is not None: - new_data.set_meta_info(meta_info) - if data is not None: - new_data.set_data(data) - return new_data - - def keys(self): - """ - Returns: - list: Contains all keys in data_fields. - """ - return [key for key in self._data_fields] - - def meta_info_keys(self): - """ - Returns: - list: Contains all keys in meta_info_fields. - """ - return [key for key in self._meta_info_fields] - - def values(self): - """ - Returns: - list: Contains all values in data_fields. - """ - return [getattr(self, k) for k in self.keys()] - - def meta_info_values(self): - """ - Returns: - list: Contains all values in meta_info_fields. - """ - return [getattr(self, k) for k in self.meta_info_keys()] - - def items(self): - for k in self.keys(): - yield (k, getattr(self, k)) - - def meta_info_items(self): - for k in self.meta_info_keys(): - yield (k, getattr(self, k)) - - def __setattr__(self, name, val): - if name in ('_meta_info_fields', '_data_fields'): - if not hasattr(self, name): - super().__setattr__(name, val) - else: - raise AttributeError( - f'{name} has been used as a ' - f'private attribute, which is immutable. ') - else: - if name in self._meta_info_fields: - raise AttributeError(f'`{name}` is used in meta information,' - f'which is immutable') - - self._data_fields.add(name) - super().__setattr__(name, val) - - def __delattr__(self, item): - - if item in ('_meta_info_fields', '_data_fields'): - raise AttributeError(f'{item} has been used as a ' - f'private attribute, which is immutable. ') - - if item in self._meta_info_fields: - raise KeyError(f'{item} is used in meta information, ' - f'which is immutable.') - super().__delattr__(item) - if item in self._data_fields: - self._data_fields.remove(item) - - # dict-like methods - __setitem__ = __setattr__ - __delitem__ = __delattr__ - - def __getitem__(self, name): - return getattr(self, name) - - def get(self, *args): - assert len(args) < 3, '`get` get more than 2 arguments' - return self.__dict__.get(*args) - - def pop(self, *args): - assert len(args) < 3, '`pop` get more than 2 arguments' - name = args[0] - if name in self._meta_info_fields: - raise KeyError(f'{name} is a key in meta information, ' - f'which is immutable') - - if args[0] in self._data_fields: - self._data_fields.remove(args[0]) - return self.__dict__.pop(*args) - - # with default value - elif len(args) == 2: - return args[1] - else: - raise KeyError(f'{args[0]}') - - def __contains__(self, item): - return item in self._data_fields or \ - item in self._meta_info_fields - - # Tensor-like methods - def to(self, *args, **kwargs): - """Apply same name function to all tensors in data_fields.""" - new_data = self.new() - for k, v in self.items(): - if hasattr(v, 'to'): - v = v.to(*args, **kwargs) - new_data[k] = v - return new_data - - # Tensor-like methods - def cpu(self): - """Apply same name function to all tensors in data_fields.""" - new_data = self.new() - for k, v in self.items(): - if isinstance(v, torch.Tensor): - v = v.cpu() - new_data[k] = v - return new_data - - # Tensor-like methods - def mlu(self): - """Apply same name function to all tensors in data_fields.""" - new_data = self.new() - for k, v in self.items(): - if isinstance(v, torch.Tensor): - v = v.mlu() - new_data[k] = v - return new_data - - # Tensor-like methods - def cuda(self): - """Apply same name function to all tensors in data_fields.""" - new_data = self.new() - for k, v in self.items(): - if isinstance(v, torch.Tensor): - v = v.cuda() - new_data[k] = v - return new_data - - # Tensor-like methods - def detach(self): - """Apply same name function to all tensors in data_fields.""" - new_data = self.new() - for k, v in self.items(): - if isinstance(v, torch.Tensor): - v = v.detach() - new_data[k] = v - return new_data - - # Tensor-like methods - def numpy(self): - """Apply same name function to all tensors in data_fields.""" - new_data = self.new() - for k, v in self.items(): - if isinstance(v, torch.Tensor): - v = v.detach().cpu().numpy() - new_data[k] = v - return new_data - - def __nice__(self): - repr = '\n \n META INFORMATION \n' - for k, v in self.meta_info_items(): - repr += f'{k}: {v} \n' - repr += '\n DATA FIELDS \n' - for k, v in self.items(): - if isinstance(v, (torch.Tensor, np.ndarray)): - repr += f'shape of {k}: {v.shape} \n' - else: - repr += f'{k}: {v} \n' - return repr + '\n' diff --git a/cv/detection/co-detr/pytorch/mmdet/core/data_structures/instance_data.py b/cv/detection/co-detr/pytorch/mmdet/core/data_structures/instance_data.py deleted file mode 100644 index eef2065c831541f1eea723a54c93bb551f9d7579..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/data_structures/instance_data.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import itertools - -import numpy as np -import torch - -from .general_data import GeneralData - - -class InstanceData(GeneralData): - """Data structure for instance-level annnotations or predictions. - - Subclass of :class:`GeneralData`. All value in `data_fields` - should have the same length. This design refer to - https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501 - - Examples: - >>> from mmdet.core import InstanceData - >>> import numpy as np - >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) - >>> results = InstanceData(img_meta) - >>> img_shape in results - True - >>> results.det_labels = torch.LongTensor([0, 1, 2, 3]) - >>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3]) - >>> results["det_masks"] = np.ndarray(4, 2, 2) - >>> len(results) - 4 - >>> print(resutls) - - >>> sorted_results = results[results.det_scores.sort().indices] - >>> sorted_results.det_scores - tensor([0.0100, 0.3000, 0.6000, 0.7000]) - >>> sorted_results.det_labels - tensor([0, 3, 2, 1]) - >>> print(results[results.scores > 0.5]) - - >>> results[results.det_scores > 0.5].det_labels - tensor([1, 2]) - >>> results[results.det_scores > 0.5].det_scores - tensor([0.7000, 0.6000]) - """ - - def __setattr__(self, name, value): - - if name in ('_meta_info_fields', '_data_fields'): - if not hasattr(self, name): - super().__setattr__(name, value) - else: - raise AttributeError( - f'{name} has been used as a ' - f'private attribute, which is immutable. ') - - else: - assert isinstance(value, (torch.Tensor, np.ndarray, list)), \ - f'Can set {type(value)}, only support' \ - f' {(torch.Tensor, np.ndarray, list)}' - - if self._data_fields: - assert len(value) == len(self), f'the length of ' \ - f'values {len(value)} is ' \ - f'not consistent with' \ - f' the length ' \ - f'of this :obj:`InstanceData` ' \ - f'{len(self)} ' - super().__setattr__(name, value) - - def __getitem__(self, item): - """ - Args: - item (str, obj:`slice`, - obj`torch.LongTensor`, obj:`torch.BoolTensor`): - get the corresponding values according to item. - - Returns: - obj:`InstanceData`: Corresponding values. - """ - assert len(self), ' This is a empty instance' - - assert isinstance( - item, (str, slice, int, torch.LongTensor, torch.BoolTensor)) - - if isinstance(item, str): - return getattr(self, item) - - if type(item) == int: - if item >= len(self) or item < -len(self): - raise IndexError(f'Index {item} out of range!') - else: - # keep the dimension - item = slice(item, None, len(self)) - - new_data = self.new() - if isinstance(item, (torch.Tensor)): - assert item.dim() == 1, 'Only support to get the' \ - ' values along the first dimension.' - if isinstance(item, torch.BoolTensor): - assert len(item) == len(self), f'The shape of the' \ - f' input(BoolTensor)) ' \ - f'{len(item)} ' \ - f' does not match the shape ' \ - f'of the indexed tensor ' \ - f'in results_filed ' \ - f'{len(self)} at ' \ - f'first dimension. ' - - for k, v in self.items(): - if isinstance(v, torch.Tensor): - new_data[k] = v[item] - elif isinstance(v, np.ndarray): - new_data[k] = v[item.cpu().numpy()] - elif isinstance(v, list): - r_list = [] - # convert to indexes from boolTensor - if isinstance(item, torch.BoolTensor): - indexes = torch.nonzero(item).view(-1) - else: - indexes = item - for index in indexes: - r_list.append(v[index]) - new_data[k] = r_list - else: - # item is a slice - for k, v in self.items(): - new_data[k] = v[item] - return new_data - - @staticmethod - def cat(instances_list): - """Concat the predictions of all :obj:`InstanceData` in the list. - - Args: - instances_list (list[:obj:`InstanceData`]): A list - of :obj:`InstanceData`. - - Returns: - obj:`InstanceData` - """ - assert all( - isinstance(results, InstanceData) for results in instances_list) - assert len(instances_list) > 0 - if len(instances_list) == 1: - return instances_list[0] - - new_data = instances_list[0].new() - for k in instances_list[0]._data_fields: - values = [results[k] for results in instances_list] - v0 = values[0] - if isinstance(v0, torch.Tensor): - values = torch.cat(values, dim=0) - elif isinstance(v0, np.ndarray): - values = np.concatenate(values, axis=0) - elif isinstance(v0, list): - values = list(itertools.chain(*values)) - else: - raise ValueError( - f'Can not concat the {k} which is a {type(v0)}') - new_data[k] = values - return new_data - - def __len__(self): - if len(self._data_fields): - for v in self.values(): - return len(v) - else: - raise AssertionError('This is an empty `InstanceData`.') diff --git a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/evaluation/__init__.py deleted file mode 100644 index ef7c0ca5dc5c048125d59a0b2e917a2501d04d34..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, - get_classes, imagenet_det_classes, - imagenet_vid_classes, oid_challenge_classes, - oid_v6_classes, voc_classes, DatasetEnum) -from .eval_hooks import DistEvalHook, EvalHook -from .mean_ap import average_precision, eval_map, print_map_summary -from .panoptic_utils import INSTANCE_OFFSET -from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, - print_recall_summary) - -__all__ = [ - 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', - 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', - 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', - 'print_map_summary', 'eval_recalls', 'print_recall_summary', - 'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes', - 'oid_challenge_classes', 'INSTANCE_OFFSET', 'DatasetEnum', -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/bbox_overlaps.py b/cv/detection/co-detr/pytorch/mmdet/core/evaluation/bbox_overlaps.py deleted file mode 100644 index 5d6eb82fcfc8d5444dd2a13b7d95b978f8206a55..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/bbox_overlaps.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np - - -def bbox_overlaps(bboxes1, - bboxes2, - mode='iou', - eps=1e-6, - use_legacy_coordinate=False): - """Calculate the ious between each bbox of bboxes1 and bboxes2. - - Args: - bboxes1 (ndarray): Shape (n, 4) - bboxes2 (ndarray): Shape (k, 4) - mode (str): IOU (intersection over union) or IOF (intersection - over foreground) - use_legacy_coordinate (bool): Whether to use coordinate system in - mmdet v1.x. which means width, height should be - calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. - Note when function is used in `VOCDataset`, it should be - True to align with the official implementation - `http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar` - Default: False. - - Returns: - ious (ndarray): Shape (n, k) - """ - - assert mode in ['iou', 'iof'] - if not use_legacy_coordinate: - extra_length = 0. - else: - extra_length = 1. - bboxes1 = bboxes1.astype(np.float32) - bboxes2 = bboxes2.astype(np.float32) - rows = bboxes1.shape[0] - cols = bboxes2.shape[0] - ious = np.zeros((rows, cols), dtype=np.float32) - if rows * cols == 0: - return ious - exchange = False - if bboxes1.shape[0] > bboxes2.shape[0]: - bboxes1, bboxes2 = bboxes2, bboxes1 - ious = np.zeros((cols, rows), dtype=np.float32) - exchange = True - area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * ( - bboxes1[:, 3] - bboxes1[:, 1] + extra_length) - area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * ( - bboxes2[:, 3] - bboxes2[:, 1] + extra_length) - for i in range(bboxes1.shape[0]): - x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) - y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) - x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) - y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) - overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum( - y_end - y_start + extra_length, 0) - if mode == 'iou': - union = area1[i] + area2 - overlap - else: - union = area1[i] if not exchange else area2 - union = np.maximum(union, eps) - ious[i, :] = overlap / union - if exchange: - ious = ious.T - return ious diff --git a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/class_names.py b/cv/detection/co-detr/pytorch/mmdet/core/evaluation/class_names.py deleted file mode 100644 index f10b95f6e19a7f21dc7a732813c5707d53cb9443..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/class_names.py +++ /dev/null @@ -1,594 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -from enum import Enum - -class DatasetEnum(Enum): - VOC = 'voc' - IMAGENET_DET = 'imagenet_det' - IMAGENET_VID = 'imagenet_vid' - COCO = 'coco' - LVIS = 'lvis' - WIDER_FACE = 'wider_face' - CITYSCAPES = 'cityscapes' - OID_CHALLENGE = 'oid_challenge' - OID_V6 = 'oid_v6' - -def wider_face_classes(): - return ['face'] - - -def voc_classes(): - return [ - 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', - 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', - 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' - ] - - -def imagenet_det_classes(): - return [ - 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', - 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', - 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', - 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', - 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', - 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', - 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', - 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', - 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', - 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', - 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', - 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', - 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', - 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', - 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', - 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', - 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', - 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', - 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', - 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', - 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', - 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', - 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', - 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', - 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', - 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', - 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', - 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', - 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', - 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', - 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', - 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', - 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', - 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', - 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', - 'whale', 'wine_bottle', 'zebra' - ] - - -def imagenet_vid_classes(): - return [ - 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', - 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', - 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', - 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', - 'watercraft', 'whale', 'zebra' - ] - - -def coco_classes(): - return [ - 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', - 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', - 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', - 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', - 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', - 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', - 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', - 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', - 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', - 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', - 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', - 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' - ] - - -def cityscapes_classes(): - return [ - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle' - ] - - -def oid_challenge_classes(): - return [ - 'Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', - 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', - 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', - 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', - 'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink', - 'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table', - 'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light', - 'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum', - 'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat', - 'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt', - 'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear', - 'Vehicle registration plate', 'Microphone', 'Musical keyboard', - 'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable', - 'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries', - 'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane', - 'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail', - 'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle', - 'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat', - 'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame', - 'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet', - 'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag', - 'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree', - 'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine', - 'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance', - 'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard', - 'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf', - 'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch', - 'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster', - 'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal', - 'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer', - 'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer', - 'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace', - 'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry', - 'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot', - 'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite', - 'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper', - 'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft', - 'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter', - 'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra', - 'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard', - 'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building', - 'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll', - 'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon', - 'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock', - 'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance', - 'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair', - 'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat', - 'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen', - 'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust', - 'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot', - 'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken', - 'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod', - 'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet', - 'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture', - 'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat', - 'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep', - 'Tablet computer', 'Pillow', 'Kitchen & dining room table', - 'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree', - 'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread', - 'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope', - 'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber', - 'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies', - 'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch', - 'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags', - 'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock', - 'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza', - 'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store', - 'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry', - 'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase', - 'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft', - 'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer', - 'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon', - 'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger', - 'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball', - 'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin', - 'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle', - 'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot', - 'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle', - 'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman', - 'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper', - 'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone', - 'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear', - 'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail', - 'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn', - 'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango', - 'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell', - 'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase', - 'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup', - 'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula', - 'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon' - ] - -def lvis_classes(): - return [ - 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', - 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna', - 'apple', 'applesauce', 'apricot', 'apron', 'aquarium', - 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor', - 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', - 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy', - 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel', - 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', - 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo', - 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', - 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap', - 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)', - 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)', - 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie', - 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper', - 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', - 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', - 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath', - 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', - 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket', - 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry', - 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg', - 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase', - 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle', - 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', - 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box', - 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', - 'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase', - 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', - 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer', - 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', - 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', - 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', - 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', - 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', - 'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar', - 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup', - 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino', - 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', - 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship', - 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', - 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower', - 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', - 'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier', - 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', - 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime', - 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', - 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', - 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider', - 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', - 'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine', - 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock', - 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', - 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach', - 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table', - 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', - 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', - 'compass', 'computer_keyboard', 'condiment', 'cone', 'control', - 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie', - 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)', - 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', - 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall', - 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker', - 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib', - 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown', - 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', - 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', - 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', - 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard', - 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', - 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', - 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', - 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup', - 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin', - 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', - 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', - 'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)', - 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', - 'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring', - 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', - 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', - 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', - 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', - 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', - 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', - 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', - 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap', - 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', - 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', - 'folding_chair', 'food_processor', 'football_(American)', - 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', - 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice', - 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', - 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', - 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator', - 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', - 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', - 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', - 'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat', - 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly', - 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet', - 'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock', - 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', - 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', - 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband', - 'headboard', 'headlight', 'headscarf', 'headset', - 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet', - 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog', - 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah', - 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', - 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', - 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', - 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board', - 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey', - 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak', - 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono', - 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit', - 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)', - 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', - 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard', - 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather', - 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce', - 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb', - 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor', - 'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat', - 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)', - 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger', - 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato', - 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox', - 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine', - 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone', - 'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror', - 'mitten', 'mixer_(kitchen_tool)', 'money', - 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', - 'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)', - 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', - 'music_stool', 'musical_instrument', 'nailfile', 'napkin', - 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper', - 'newsstand', 'nightshirt', 'nosebag_(for_animals)', - 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker', - 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil', - 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich', - 'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad', - 'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas', - 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', - 'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book', - 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol', - 'parchment', 'parka', 'parking_meter', 'parrot', - 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', - 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', - 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg', - 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box', - 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)', - 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet', - 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', - 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', - 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', - 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', - 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', - 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)', - 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)', - 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)', - 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', - 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel', - 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune', - 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', - 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', - 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', - 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', - 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', - 'recliner', 'record_player', 'reflector', 'remote_control', - 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map', - 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade', - 'rolling_pin', 'root_beer', 'router_(computer_equipment)', - 'rubber_band', 'runner_(carpet)', 'plastic_bag', - 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin', - 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)', - 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)', - 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse', - 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf', - 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver', - 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', - 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark', - 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl', - 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt', - 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass', - 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap', - 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink', - 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole', - 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)', - 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', - 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball', - 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon', - 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)', - 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish', - 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)', - 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish', - 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel', - 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', - 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer', - 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign', - 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl', - 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses', - 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband', - 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword', - 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table', - 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight', - 'tambourine', 'army_tank', 'tank_(storage_vessel)', - 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', - 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', - 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', - 'telephone_pole', 'telephoto_lens', 'television_camera', - 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', - 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', - 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', - 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', - 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', - 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', - 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', - 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', - 'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', - 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', - 'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)', - 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn', - 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest', - 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture', - 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick', - 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe', - 'washbasin', 'automatic_washer', 'watch', 'water_bottle', - 'water_cooler', 'water_faucet', 'water_heater', 'water_jug', - 'water_gun', 'water_scooter', 'water_ski', 'water_tower', - 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake', - 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream', - 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)', - 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket', - 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', - 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt', - 'yoke_(animal_equipment)', 'zebra', 'zucchini' - ] -def oid_v6_classes(): - return [ - 'Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', - 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', - 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', - 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', - 'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber', - 'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick', - 'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle', - 'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot', - 'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy', - 'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt', - 'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear', - 'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot', - 'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee', - 'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw', - 'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern', - 'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace', - 'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer', - 'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock', - 'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft', - 'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile', - 'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel', - 'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola', - 'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building', - 'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor', - 'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment', - 'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini', - 'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur', - 'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula', - 'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser', - 'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero', - 'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener', - 'Goggles', 'Human body', 'Roller skates', 'Coffee cup', - 'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign', - 'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker', - 'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food', - 'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove', - 'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax', - 'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart', - 'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind', - 'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light', - 'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear', - 'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle', - 'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat', - 'Baseball bat', 'Baseball glove', 'Mixing bowl', - 'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House', - 'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed', - 'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer', - 'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster', - 'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw', - 'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate', - 'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove', - 'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)', - 'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet', - 'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife', - 'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse', - 'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard', - 'Billiard table', 'Mammal', 'Mouse', 'Motorcycle', - 'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow', - 'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk', - 'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom', - 'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device', - 'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard', - 'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball', - 'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl', - 'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta', - 'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer', - 'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile', - 'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda', - 'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood', - 'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi', - 'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine', - 'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table', - 'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco', - 'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree', - 'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray', - 'Trousers', 'Bowling equipment', 'Football helmet', 'Truck', - 'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag', - 'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale', - 'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion', - 'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck', - 'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper', - 'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog', - 'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer', - 'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark', - 'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser', - 'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger', - 'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus', - 'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull', - 'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench', - 'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange', - 'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet', - 'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut', - 'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera', - 'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable', - 'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish', - 'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple', - 'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower', - 'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug', - 'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow', - 'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone', - 'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray', - 'Kitchen & dining room table', 'Dog bed', 'Cake stand', - 'Cat furniture', 'Bathroom accessory', 'Facial tissue holder', - 'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler', - 'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry', - 'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily', - 'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant', - 'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon', - 'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich', - 'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod', - 'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume', - 'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair', - 'Rugby ball', 'Armadillo', 'Maracas', 'Helmet' - ] - - -dataset_aliases = { - DatasetEnum.VOC: ['voc', 'pascal_voc', 'voc07', 'voc12'], - DatasetEnum.IMAGENET_DET: ['det', 'imagenet_det', 'ilsvrc_det'], - DatasetEnum.IMAGENET_VID: ['vid', 'imagenet_vid', 'ilsvrc_vid'], - DatasetEnum.COCO: ['coco', 'mscoco', 'ms_coco'], - DatasetEnum.LVIS: ['lvis'], - DatasetEnum.WIDER_FACE: ['WIDERFaceDataset', 'wider_face', 'WIDERFace'], - DatasetEnum.CITYSCAPES: ['cityscapes'], - DatasetEnum.OID_CHALLENGE: ['oid_challenge', 'openimages_challenge'], - DatasetEnum.OID_V6: ['oid_v6', 'openimages_v6'] -} - -# def get_classes(dataset): -# """Get class names of a dataset.""" -# alias2name = {} -# for name, aliases in dataset_aliases.items(): -# for alias in aliases: -# alias2name[alias] = name - -# if mmcv.is_str(dataset): -# if dataset in alias2name: -# labels = eval(alias2name[dataset] + '_classes()') -# else: -# raise ValueError(f'Unrecognized dataset: {dataset}') -# else: -# raise TypeError(f'dataset must a str, but got {type(dataset)}') -# return labels - -def get_classes(dataset): - """Get class names of a dataset.""" - if isinstance(dataset, DatasetEnum): - if dataset in dataset_aliases: - labels = eval(dataset_aliases[dataset][0] + '_classes()') - else: - raise ValueError(f'Unrecognized dataset: {dataset}') - else: - raise TypeError(f'dataset must be a DatasetEnum member, but got {type(dataset)}') - return labels \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/eval_hooks.py b/cv/detection/co-detr/pytorch/mmdet/core/evaluation/eval_hooks.py deleted file mode 100644 index 98856c18ce65625fa1ac68beee3a1ea584ffec9d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/eval_hooks.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import bisect -import os.path as osp - -import mmcv -import torch.distributed as dist -from mmcv.runner import DistEvalHook as BaseDistEvalHook -from mmcv.runner import EvalHook as BaseEvalHook -from torch.nn.modules.batchnorm import _BatchNorm - - -def _calc_dynamic_intervals(start_interval, dynamic_interval_list): - assert mmcv.is_list_of(dynamic_interval_list, tuple) - - dynamic_milestones = [0] - dynamic_milestones.extend( - [dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) - dynamic_intervals = [start_interval] - dynamic_intervals.extend( - [dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) - return dynamic_milestones, dynamic_intervals - - -class EvalHook(BaseEvalHook): - - def __init__(self, *args, dynamic_intervals=None, **kwargs): - super(EvalHook, self).__init__(*args, **kwargs) - self.latest_results = None - - self.use_dynamic_intervals = dynamic_intervals is not None - if self.use_dynamic_intervals: - self.dynamic_milestones, self.dynamic_intervals = \ - _calc_dynamic_intervals(self.interval, dynamic_intervals) - - def _decide_interval(self, runner): - if self.use_dynamic_intervals: - progress = runner.epoch if self.by_epoch else runner.iter - step = bisect.bisect(self.dynamic_milestones, (progress + 1)) - # Dynamically modify the evaluation interval - self.interval = self.dynamic_intervals[step - 1] - - def before_train_epoch(self, runner): - """Evaluate the model only at the start of training by epoch.""" - self._decide_interval(runner) - super().before_train_epoch(runner) - - def before_train_iter(self, runner): - self._decide_interval(runner) - super().before_train_iter(runner) - - def _do_evaluate(self, runner): - """perform evaluation and save ckpt.""" - if not self._should_evaluate(runner): - return - - from mmdet.apis import single_gpu_test - - # Changed results to self.results so that MMDetWandbHook can access - # the evaluation results and log them to wandb. - results = single_gpu_test(runner.model, self.dataloader, show=False) - self.latest_results = results - runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) - key_score = self.evaluate(runner, results) - # the key_score may be `None` so it needs to skip the action to save - # the best checkpoint - if self.save_best and key_score: - self._save_ckpt(runner, key_score) - - -# Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, -# in order to avoid strong version dependency, we did not directly -# inherit EvalHook but BaseDistEvalHook. -class DistEvalHook(BaseDistEvalHook): - - def __init__(self, *args, dynamic_intervals=None, **kwargs): - super(DistEvalHook, self).__init__(*args, **kwargs) - self.latest_results = None - - self.use_dynamic_intervals = dynamic_intervals is not None - if self.use_dynamic_intervals: - self.dynamic_milestones, self.dynamic_intervals = \ - _calc_dynamic_intervals(self.interval, dynamic_intervals) - - def _decide_interval(self, runner): - if self.use_dynamic_intervals: - progress = runner.epoch if self.by_epoch else runner.iter - step = bisect.bisect(self.dynamic_milestones, (progress + 1)) - # Dynamically modify the evaluation interval - self.interval = self.dynamic_intervals[step - 1] - - def before_train_epoch(self, runner): - """Evaluate the model only at the start of training by epoch.""" - self._decide_interval(runner) - super().before_train_epoch(runner) - - def before_train_iter(self, runner): - self._decide_interval(runner) - super().before_train_iter(runner) - - def _do_evaluate(self, runner): - """perform evaluation and save ckpt.""" - # Synchronization of BatchNorm's buffer (running_mean - # and running_var) is not supported in the DDP of pytorch, - # which may cause the inconsistent performance of models in - # different ranks, so we broadcast BatchNorm's buffers - # of rank 0 to other ranks to avoid this. - if self.broadcast_bn_buffer: - model = runner.model - for name, module in model.named_modules(): - if isinstance(module, - _BatchNorm) and module.track_running_stats: - dist.broadcast(module.running_var, 0) - dist.broadcast(module.running_mean, 0) - - if not self._should_evaluate(runner): - return - - tmpdir = self.tmpdir - if tmpdir is None: - tmpdir = osp.join(runner.work_dir, '.eval_hook') - - from mmdet.apis import multi_gpu_test - - # Changed results to self.results so that MMDetWandbHook can access - # the evaluation results and log them to wandb. - results = multi_gpu_test( - runner.model, - self.dataloader, - tmpdir=tmpdir, - gpu_collect=self.gpu_collect) - self.latest_results = results - if runner.rank == 0: - print('\n') - runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) - key_score = self.evaluate(runner, results) - - # the key_score may be `None` so it needs to skip - # the action to save the best checkpoint - if self.save_best and key_score: - self._save_ckpt(runner, key_score) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/mean_ap.py b/cv/detection/co-detr/pytorch/mmdet/core/evaluation/mean_ap.py deleted file mode 100644 index a293b80f0fadd33ae6ba703b0aee759f569b31ca..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/mean_ap.py +++ /dev/null @@ -1,782 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from multiprocessing import Pool - -import mmcv -import numpy as np -from mmcv.utils import print_log -from terminaltables import AsciiTable - -from .bbox_overlaps import bbox_overlaps -from .class_names import get_classes - - -def average_precision(recalls, precisions, mode='area'): - """Calculate average precision (for single or multiple scales). - - Args: - recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) - precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) - mode (str): 'area' or '11points', 'area' means calculating the area - under precision-recall curve, '11points' means calculating - the average precision of recalls at [0, 0.1, ..., 1] - - Returns: - float or ndarray: calculated average precision - """ - no_scale = False - if recalls.ndim == 1: - no_scale = True - recalls = recalls[np.newaxis, :] - precisions = precisions[np.newaxis, :] - assert recalls.shape == precisions.shape and recalls.ndim == 2 - num_scales = recalls.shape[0] - ap = np.zeros(num_scales, dtype=np.float32) - if mode == 'area': - zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) - ones = np.ones((num_scales, 1), dtype=recalls.dtype) - mrec = np.hstack((zeros, recalls, ones)) - mpre = np.hstack((zeros, precisions, zeros)) - for i in range(mpre.shape[1] - 1, 0, -1): - mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) - for i in range(num_scales): - ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] - ap[i] = np.sum( - (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) - elif mode == '11points': - for i in range(num_scales): - for thr in np.arange(0, 1 + 1e-3, 0.1): - precs = precisions[i, recalls[i, :] >= thr] - prec = precs.max() if precs.size > 0 else 0 - ap[i] += prec - ap /= 11 - else: - raise ValueError( - 'Unrecognized mode, only "area" and "11points" are supported') - if no_scale: - ap = ap[0] - return ap - - -def tpfp_imagenet(det_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - default_iou_thr=0.5, - area_ranges=None, - use_legacy_coordinate=False, - **kwargs): - """Check if detected bboxes are true positive or false positive. - - Args: - det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). - gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). - gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, - of shape (k, 4). Default: None - default_iou_thr (float): IoU threshold to be considered as matched for - medium and large bboxes (small ones have special rules). - Default: 0.5. - area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, - in the format [(min1, max1), (min2, max2), ...]. Default: None. - use_legacy_coordinate (bool): Whether to use coordinate system in - mmdet v1.x. which means width, height should be - calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. - Default: False. - - Returns: - tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of - each array is (num_scales, m). - """ - - if not use_legacy_coordinate: - extra_length = 0. - else: - extra_length = 1. - - # an indicator of ignored gts - gt_ignore_inds = np.concatenate( - (np.zeros(gt_bboxes.shape[0], dtype=np.bool), - np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) - # stack gt_bboxes and gt_bboxes_ignore for convenience - gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) - - num_dets = det_bboxes.shape[0] - num_gts = gt_bboxes.shape[0] - if area_ranges is None: - area_ranges = [(None, None)] - num_scales = len(area_ranges) - # tp and fp are of shape (num_scales, num_gts), each row is tp or fp - # of a certain scale. - tp = np.zeros((num_scales, num_dets), dtype=np.float32) - fp = np.zeros((num_scales, num_dets), dtype=np.float32) - if gt_bboxes.shape[0] == 0: - if area_ranges == [(None, None)]: - fp[...] = 1 - else: - det_areas = ( - det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( - det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) - for i, (min_area, max_area) in enumerate(area_ranges): - fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 - return tp, fp - ious = bbox_overlaps( - det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate) - gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length - gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length - iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), - default_iou_thr) - # sort all detections by scores in descending order - sort_inds = np.argsort(-det_bboxes[:, -1]) - for k, (min_area, max_area) in enumerate(area_ranges): - gt_covered = np.zeros(num_gts, dtype=bool) - # if no area range is specified, gt_area_ignore is all False - if min_area is None: - gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) - else: - gt_areas = gt_w * gt_h - gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) - for i in sort_inds: - max_iou = -1 - matched_gt = -1 - # find best overlapped available gt - for j in range(num_gts): - # different from PASCAL VOC: allow finding other gts if the - # best overlapped ones are already matched by other det bboxes - if gt_covered[j]: - continue - elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: - max_iou = ious[i, j] - matched_gt = j - # there are 4 cases for a det bbox: - # 1. it matches a gt, tp = 1, fp = 0 - # 2. it matches an ignored gt, tp = 0, fp = 0 - # 3. it matches no gt and within area range, tp = 0, fp = 1 - # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 - if matched_gt >= 0: - gt_covered[matched_gt] = 1 - if not (gt_ignore_inds[matched_gt] - or gt_area_ignore[matched_gt]): - tp[k, i] = 1 - elif min_area is None: - fp[k, i] = 1 - else: - bbox = det_bboxes[i, :4] - area = (bbox[2] - bbox[0] + extra_length) * ( - bbox[3] - bbox[1] + extra_length) - if area >= min_area and area < max_area: - fp[k, i] = 1 - return tp, fp - - -def tpfp_default(det_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - iou_thr=0.5, - area_ranges=None, - use_legacy_coordinate=False, - **kwargs): - """Check if detected bboxes are true positive or false positive. - - Args: - det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). - gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). - gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, - of shape (k, 4). Default: None - iou_thr (float): IoU threshold to be considered as matched. - Default: 0.5. - area_ranges (list[tuple] | None): Range of bbox areas to be - evaluated, in the format [(min1, max1), (min2, max2), ...]. - Default: None. - use_legacy_coordinate (bool): Whether to use coordinate system in - mmdet v1.x. which means width, height should be - calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. - Default: False. - - Returns: - tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of - each array is (num_scales, m). - """ - - if not use_legacy_coordinate: - extra_length = 0. - else: - extra_length = 1. - - # an indicator of ignored gts - gt_ignore_inds = np.concatenate( - (np.zeros(gt_bboxes.shape[0], dtype=np.bool), - np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) - # stack gt_bboxes and gt_bboxes_ignore for convenience - gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) - - num_dets = det_bboxes.shape[0] - num_gts = gt_bboxes.shape[0] - if area_ranges is None: - area_ranges = [(None, None)] - num_scales = len(area_ranges) - # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of - # a certain scale - tp = np.zeros((num_scales, num_dets), dtype=np.float32) - fp = np.zeros((num_scales, num_dets), dtype=np.float32) - - # if there is no gt bboxes in this image, then all det bboxes - # within area range are false positives - if gt_bboxes.shape[0] == 0: - if area_ranges == [(None, None)]: - fp[...] = 1 - else: - det_areas = ( - det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( - det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) - for i, (min_area, max_area) in enumerate(area_ranges): - fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 - return tp, fp - - ious = bbox_overlaps( - det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) - # for each det, the max iou with all gts - ious_max = ious.max(axis=1) - # for each det, which gt overlaps most with it - ious_argmax = ious.argmax(axis=1) - # sort all dets in descending order by scores - sort_inds = np.argsort(-det_bboxes[:, -1]) - for k, (min_area, max_area) in enumerate(area_ranges): - gt_covered = np.zeros(num_gts, dtype=bool) - # if no area range is specified, gt_area_ignore is all False - if min_area is None: - gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) - else: - gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( - gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) - gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) - for i in sort_inds: - if ious_max[i] >= iou_thr: - matched_gt = ious_argmax[i] - if not (gt_ignore_inds[matched_gt] - or gt_area_ignore[matched_gt]): - if not gt_covered[matched_gt]: - gt_covered[matched_gt] = True - tp[k, i] = 1 - else: - fp[k, i] = 1 - # otherwise ignore this detected bbox, tp = 0, fp = 0 - elif min_area is None: - fp[k, i] = 1 - else: - bbox = det_bboxes[i, :4] - area = (bbox[2] - bbox[0] + extra_length) * ( - bbox[3] - bbox[1] + extra_length) - if area >= min_area and area < max_area: - fp[k, i] = 1 - return tp, fp - - -def tpfp_openimages(det_bboxes, - gt_bboxes, - gt_bboxes_ignore=None, - iou_thr=0.5, - area_ranges=None, - use_legacy_coordinate=False, - gt_bboxes_group_of=None, - use_group_of=True, - ioa_thr=0.5, - **kwargs): - """Check if detected bboxes are true positive or false positive. - - Args: - det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). - gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). - gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, - of shape (k, 4). Default: None - iou_thr (float): IoU threshold to be considered as matched. - Default: 0.5. - area_ranges (list[tuple] | None): Range of bbox areas to be - evaluated, in the format [(min1, max1), (min2, max2), ...]. - Default: None. - use_legacy_coordinate (bool): Whether to use coordinate system in - mmdet v1.x. which means width, height should be - calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. - Default: False. - gt_bboxes_group_of (ndarray): GT group_of of this image, of shape - (k, 1). Default: None - use_group_of (bool): Whether to use group of when calculate TP and FP, - which only used in OpenImages evaluation. Default: True. - ioa_thr (float | None): IoA threshold to be considered as matched, - which only used in OpenImages evaluation. Default: 0.5. - - Returns: - tuple[np.ndarray]: Returns a tuple (tp, fp, det_bboxes), where - (tp, fp) whose elements are 0 and 1. The shape of each array is - (num_scales, m). (det_bboxes) whose will filter those are not - matched by group of gts when processing Open Images evaluation. - The shape is (num_scales, m). - """ - - if not use_legacy_coordinate: - extra_length = 0. - else: - extra_length = 1. - - # an indicator of ignored gts - gt_ignore_inds = np.concatenate( - (np.zeros(gt_bboxes.shape[0], dtype=np.bool), - np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) - # stack gt_bboxes and gt_bboxes_ignore for convenience - gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) - - num_dets = det_bboxes.shape[0] - num_gts = gt_bboxes.shape[0] - if area_ranges is None: - area_ranges = [(None, None)] - num_scales = len(area_ranges) - # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of - # a certain scale - tp = np.zeros((num_scales, num_dets), dtype=np.float32) - fp = np.zeros((num_scales, num_dets), dtype=np.float32) - - # if there is no gt bboxes in this image, then all det bboxes - # within area range are false positives - if gt_bboxes.shape[0] == 0: - if area_ranges == [(None, None)]: - fp[...] = 1 - else: - det_areas = ( - det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( - det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) - for i, (min_area, max_area) in enumerate(area_ranges): - fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 - return tp, fp, det_bboxes - - if gt_bboxes_group_of is not None and use_group_of: - # if handle group-of boxes, divided gt boxes into two parts: - # non-group-of and group-of.Then calculate ious and ioas through - # non-group-of group-of gts respectively. This only used in - # OpenImages evaluation. - assert gt_bboxes_group_of.shape[0] == gt_bboxes.shape[0] - non_group_gt_bboxes = gt_bboxes[~gt_bboxes_group_of] - group_gt_bboxes = gt_bboxes[gt_bboxes_group_of] - num_gts_group = group_gt_bboxes.shape[0] - ious = bbox_overlaps(det_bboxes, non_group_gt_bboxes) - ioas = bbox_overlaps(det_bboxes, group_gt_bboxes, mode='iof') - else: - # if not consider group-of boxes, only calculate ious through gt boxes - ious = bbox_overlaps( - det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) - ioas = None - - if ious.shape[1] > 0: - # for each det, the max iou with all gts - ious_max = ious.max(axis=1) - # for each det, which gt overlaps most with it - ious_argmax = ious.argmax(axis=1) - # sort all dets in descending order by scores - sort_inds = np.argsort(-det_bboxes[:, -1]) - for k, (min_area, max_area) in enumerate(area_ranges): - gt_covered = np.zeros(num_gts, dtype=bool) - # if no area range is specified, gt_area_ignore is all False - if min_area is None: - gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) - else: - gt_areas = ( - gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( - gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) - gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) - for i in sort_inds: - if ious_max[i] >= iou_thr: - matched_gt = ious_argmax[i] - if not (gt_ignore_inds[matched_gt] - or gt_area_ignore[matched_gt]): - if not gt_covered[matched_gt]: - gt_covered[matched_gt] = True - tp[k, i] = 1 - else: - fp[k, i] = 1 - # otherwise ignore this detected bbox, tp = 0, fp = 0 - elif min_area is None: - fp[k, i] = 1 - else: - bbox = det_bboxes[i, :4] - area = (bbox[2] - bbox[0] + extra_length) * ( - bbox[3] - bbox[1] + extra_length) - if area >= min_area and area < max_area: - fp[k, i] = 1 - else: - # if there is no no-group-of gt bboxes in this image, - # then all det bboxes within area range are false positives. - # Only used in OpenImages evaluation. - if area_ranges == [(None, None)]: - fp[...] = 1 - else: - det_areas = ( - det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( - det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) - for i, (min_area, max_area) in enumerate(area_ranges): - fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 - - if ioas is None or ioas.shape[1] <= 0: - return tp, fp, det_bboxes - else: - # The evaluation of group-of TP and FP are done in two stages: - # 1. All detections are first matched to non group-of boxes; true - # positives are determined. - # 2. Detections that are determined as false positives are matched - # against group-of boxes and calculated group-of TP and FP. - # Only used in OpenImages evaluation. - det_bboxes_group = np.zeros( - (num_scales, ioas.shape[1], det_bboxes.shape[1]), dtype=float) - match_group_of = np.zeros((num_scales, num_dets), dtype=bool) - tp_group = np.zeros((num_scales, num_gts_group), dtype=np.float32) - ioas_max = ioas.max(axis=1) - # for each det, which gt overlaps most with it - ioas_argmax = ioas.argmax(axis=1) - # sort all dets in descending order by scores - sort_inds = np.argsort(-det_bboxes[:, -1]) - for k, (min_area, max_area) in enumerate(area_ranges): - box_is_covered = tp[k] - # if no area range is specified, gt_area_ignore is all False - if min_area is None: - gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) - else: - gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( - gt_bboxes[:, 3] - gt_bboxes[:, 1]) - gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) - for i in sort_inds: - matched_gt = ioas_argmax[i] - if not box_is_covered[i]: - if ioas_max[i] >= ioa_thr: - if not (gt_ignore_inds[matched_gt] - or gt_area_ignore[matched_gt]): - if not tp_group[k, matched_gt]: - tp_group[k, matched_gt] = 1 - match_group_of[k, i] = True - else: - match_group_of[k, i] = True - - if det_bboxes_group[k, matched_gt, -1] < \ - det_bboxes[i, -1]: - det_bboxes_group[k, matched_gt] = \ - det_bboxes[i] - - fp_group = (tp_group <= 0).astype(float) - tps = [] - fps = [] - # concatenate tp, fp, and det-boxes which not matched group of - # gt boxes and tp_group, fp_group, and det_bboxes_group which - # matched group of boxes respectively. - for i in range(num_scales): - tps.append( - np.concatenate((tp[i][~match_group_of[i]], tp_group[i]))) - fps.append( - np.concatenate((fp[i][~match_group_of[i]], fp_group[i]))) - det_bboxes = np.concatenate( - (det_bboxes[~match_group_of[i]], det_bboxes_group[i])) - - tp = np.vstack(tps) - fp = np.vstack(fps) - return tp, fp, det_bboxes - - -def get_cls_results(det_results, annotations, class_id): - """Get det results and gt information of a certain class. - - Args: - det_results (list[list]): Same as `eval_map()`. - annotations (list[dict]): Same as `eval_map()`. - class_id (int): ID of a specific class. - - Returns: - tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes - """ - cls_dets = [img_res[class_id] for img_res in det_results] - cls_gts = [] - cls_gts_ignore = [] - for ann in annotations: - gt_inds = ann['labels'] == class_id - cls_gts.append(ann['bboxes'][gt_inds, :]) - - if ann.get('labels_ignore', None) is not None: - ignore_inds = ann['labels_ignore'] == class_id - cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) - else: - cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) - - return cls_dets, cls_gts, cls_gts_ignore - - -def get_cls_group_ofs(annotations, class_id): - """Get `gt_group_of` of a certain class, which is used in Open Images. - - Args: - annotations (list[dict]): Same as `eval_map()`. - class_id (int): ID of a specific class. - - Returns: - list[np.ndarray]: `gt_group_of` of a certain class. - """ - gt_group_ofs = [] - for ann in annotations: - gt_inds = ann['labels'] == class_id - if ann.get('gt_is_group_ofs', None) is not None: - gt_group_ofs.append(ann['gt_is_group_ofs'][gt_inds]) - else: - gt_group_ofs.append(np.empty((0, 1), dtype=np.bool)) - - return gt_group_ofs - - -def eval_map(det_results, - annotations, - scale_ranges=None, - iou_thr=0.5, - ioa_thr=None, - dataset=None, - logger=None, - tpfp_fn=None, - nproc=4, - use_legacy_coordinate=False, - use_group_of=False): - """Evaluate mAP of a dataset. - - Args: - det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. - The outer list indicates images, and the inner list indicates - per-class detected bboxes. - annotations (list[dict]): Ground truth annotations where each item of - the list indicates an image. Keys of annotations are: - - - `bboxes`: numpy array of shape (n, 4) - - `labels`: numpy array of shape (n, ) - - `bboxes_ignore` (optional): numpy array of shape (k, 4) - - `labels_ignore` (optional): numpy array of shape (k, ) - scale_ranges (list[tuple] | None): Range of scales to be evaluated, - in the format [(min1, max1), (min2, max2), ...]. A range of - (32, 64) means the area range between (32**2, 64**2). - Default: None. - iou_thr (float): IoU threshold to be considered as matched. - Default: 0.5. - ioa_thr (float | None): IoA threshold to be considered as matched, - which only used in OpenImages evaluation. Default: None. - dataset (list[str] | str | None): Dataset name or dataset classes, - there are minor differences in metrics for different datasets, e.g. - "voc07", "imagenet_det", etc. Default: None. - logger (logging.Logger | str | None): The way to print the mAP - summary. See `mmcv.utils.print_log()` for details. Default: None. - tpfp_fn (callable | None): The function used to determine true/ - false positives. If None, :func:`tpfp_default` is used as default - unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this - case). If it is given as a function, then this function is used - to evaluate tp & fp. Default None. - nproc (int): Processes used for computing TP and FP. - Default: 4. - use_legacy_coordinate (bool): Whether to use coordinate system in - mmdet v1.x. which means width, height should be - calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. - Default: False. - use_group_of (bool): Whether to use group of when calculate TP and FP, - which only used in OpenImages evaluation. Default: False. - - Returns: - tuple: (mAP, [dict, dict, ...]) - """ - assert len(det_results) == len(annotations) - if not use_legacy_coordinate: - extra_length = 0. - else: - extra_length = 1. - - num_imgs = len(det_results) - num_scales = len(scale_ranges) if scale_ranges is not None else 1 - num_classes = len(det_results[0]) # positive class num - area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] - if scale_ranges is not None else None) - - # There is no need to use multi processes to process - # when num_imgs = 1 . - if num_imgs > 1: - assert nproc > 0, 'nproc must be at least one.' - nproc = min(nproc, num_imgs) - pool = Pool(nproc) - - eval_results = [] - for i in range(num_classes): - # get gt and det bboxes of this class - cls_dets, cls_gts, cls_gts_ignore = get_cls_results( - det_results, annotations, i) - # choose proper function according to datasets to compute tp and fp - if tpfp_fn is None: - if dataset in ['det', 'vid']: - tpfp_fn = tpfp_imagenet - elif dataset in ['oid_challenge', 'oid_v6'] \ - or use_group_of is True: - tpfp_fn = tpfp_openimages - else: - tpfp_fn = tpfp_default - if not callable(tpfp_fn): - raise ValueError( - f'tpfp_fn has to be a function or None, but got {tpfp_fn}') - - if num_imgs > 1: - # compute tp and fp for each image with multiple processes - args = [] - if use_group_of: - # used in Open Images Dataset evaluation - gt_group_ofs = get_cls_group_ofs(annotations, i) - args.append(gt_group_ofs) - args.append([use_group_of for _ in range(num_imgs)]) - if ioa_thr is not None: - args.append([ioa_thr for _ in range(num_imgs)]) - - tpfp = pool.starmap( - tpfp_fn, - zip(cls_dets, cls_gts, cls_gts_ignore, - [iou_thr for _ in range(num_imgs)], - [area_ranges for _ in range(num_imgs)], - [use_legacy_coordinate for _ in range(num_imgs)], *args)) - else: - tpfp = tpfp_fn( - cls_dets[0], - cls_gts[0], - cls_gts_ignore[0], - iou_thr, - area_ranges, - use_legacy_coordinate, - gt_bboxes_group_of=(get_cls_group_ofs(annotations, i)[0] - if use_group_of else None), - use_group_of=use_group_of, - ioa_thr=ioa_thr) - tpfp = [tpfp] - - if use_group_of: - tp, fp, cls_dets = tuple(zip(*tpfp)) - else: - tp, fp = tuple(zip(*tpfp)) - # calculate gt number of each scale - # ignored gts or gts beyond the specific scale are not counted - num_gts = np.zeros(num_scales, dtype=int) - for j, bbox in enumerate(cls_gts): - if area_ranges is None: - num_gts[0] += bbox.shape[0] - else: - gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * ( - bbox[:, 3] - bbox[:, 1] + extra_length) - for k, (min_area, max_area) in enumerate(area_ranges): - num_gts[k] += np.sum((gt_areas >= min_area) - & (gt_areas < max_area)) - # sort all det bboxes by score, also sort tp and fp - cls_dets = np.vstack(cls_dets) - num_dets = cls_dets.shape[0] - sort_inds = np.argsort(-cls_dets[:, -1]) - tp = np.hstack(tp)[:, sort_inds] - fp = np.hstack(fp)[:, sort_inds] - # calculate recall and precision with tp and fp - tp = np.cumsum(tp, axis=1) - fp = np.cumsum(fp, axis=1) - eps = np.finfo(np.float32).eps - recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) - precisions = tp / np.maximum((tp + fp), eps) - # calculate AP - if scale_ranges is None: - recalls = recalls[0, :] - precisions = precisions[0, :] - num_gts = num_gts.item() - mode = 'area' if dataset != 'voc07' else '11points' - ap = average_precision(recalls, precisions, mode) - eval_results.append({ - 'num_gts': num_gts, - 'num_dets': num_dets, - 'recall': recalls, - 'precision': precisions, - 'ap': ap - }) - - if num_imgs > 1: - pool.close() - - if scale_ranges is not None: - # shape (num_classes, num_scales) - all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) - all_num_gts = np.vstack( - [cls_result['num_gts'] for cls_result in eval_results]) - mean_ap = [] - for i in range(num_scales): - if np.any(all_num_gts[:, i] > 0): - mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) - else: - mean_ap.append(0.0) - else: - aps = [] - for cls_result in eval_results: - if cls_result['num_gts'] > 0: - aps.append(cls_result['ap']) - mean_ap = np.array(aps).mean().item() if aps else 0.0 - - print_map_summary( - mean_ap, eval_results, dataset, area_ranges, logger=logger) - - return mean_ap, eval_results - - -def print_map_summary(mean_ap, - results, - dataset=None, - scale_ranges=None, - logger=None): - """Print mAP and results of each class. - - A table will be printed to show the gts/dets/recall/AP of each class and - the mAP. - - Args: - mean_ap (float): Calculated from `eval_map()`. - results (list[dict]): Calculated from `eval_map()`. - dataset (list[str] | str | None): Dataset name or dataset classes. - scale_ranges (list[tuple] | None): Range of scales to be evaluated. - logger (logging.Logger | str | None): The way to print the mAP - summary. See `mmcv.utils.print_log()` for details. Default: None. - """ - - if logger == 'silent': - return - - if isinstance(results[0]['ap'], np.ndarray): - num_scales = len(results[0]['ap']) - else: - num_scales = 1 - - if scale_ranges is not None: - assert len(scale_ranges) == num_scales - - num_classes = len(results) - - recalls = np.zeros((num_scales, num_classes), dtype=np.float32) - aps = np.zeros((num_scales, num_classes), dtype=np.float32) - num_gts = np.zeros((num_scales, num_classes), dtype=int) - for i, cls_result in enumerate(results): - if cls_result['recall'].size > 0: - recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] - aps[:, i] = cls_result['ap'] - num_gts[:, i] = cls_result['num_gts'] - - if dataset is None: - label_names = [str(i) for i in range(num_classes)] - elif mmcv.is_str(dataset): - label_names = get_classes(dataset) - else: - label_names = dataset - - if not isinstance(mean_ap, list): - mean_ap = [mean_ap] - - header = ['class', 'gts', 'dets', 'recall', 'ap'] - for i in range(num_scales): - if scale_ranges is not None: - print_log(f'Scale range {scale_ranges[i]}', logger=logger) - table_data = [header] - for j in range(num_classes): - row_data = [ - label_names[j], num_gts[i, j], results[j]['num_dets'], - f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' - ] - table_data.append(row_data) - table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) - table = AsciiTable(table_data) - table.inner_footing_row_border = True - print_log('\n' + table.table, logger=logger) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/panoptic_utils.py b/cv/detection/co-detr/pytorch/mmdet/core/evaluation/panoptic_utils.py deleted file mode 100644 index 10c9ad934e0c9047ccdcfbf0d429ab13b8527d88..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/panoptic_utils.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# A custom value to distinguish instance ID and category ID; need to -# be greater than the number of categories. -# For a pixel in the panoptic result map: -# pan_id = ins_id * INSTANCE_OFFSET + cat_id -INSTANCE_OFFSET = 1000 diff --git a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/recall.py b/cv/detection/co-detr/pytorch/mmdet/core/evaluation/recall.py deleted file mode 100644 index 82b3c909b82fad29d6d5147c562a674e5db7c14c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/evaluation/recall.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections.abc import Sequence - -import numpy as np -from mmcv.utils import print_log -from terminaltables import AsciiTable - -from .bbox_overlaps import bbox_overlaps - - -def _recalls(all_ious, proposal_nums, thrs): - - img_num = all_ious.shape[0] - total_gt_num = sum([ious.shape[0] for ious in all_ious]) - - _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) - for k, proposal_num in enumerate(proposal_nums): - tmp_ious = np.zeros(0) - for i in range(img_num): - ious = all_ious[i][:, :proposal_num].copy() - gt_ious = np.zeros((ious.shape[0])) - if ious.size == 0: - tmp_ious = np.hstack((tmp_ious, gt_ious)) - continue - for j in range(ious.shape[0]): - gt_max_overlaps = ious.argmax(axis=1) - max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] - gt_idx = max_ious.argmax() - gt_ious[j] = max_ious[gt_idx] - box_idx = gt_max_overlaps[gt_idx] - ious[gt_idx, :] = -1 - ious[:, box_idx] = -1 - tmp_ious = np.hstack((tmp_ious, gt_ious)) - _ious[k, :] = tmp_ious - - _ious = np.fliplr(np.sort(_ious, axis=1)) - recalls = np.zeros((proposal_nums.size, thrs.size)) - for i, thr in enumerate(thrs): - recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) - - return recalls - - -def set_recall_param(proposal_nums, iou_thrs): - """Check proposal_nums and iou_thrs and set correct format.""" - if isinstance(proposal_nums, Sequence): - _proposal_nums = np.array(proposal_nums) - elif isinstance(proposal_nums, int): - _proposal_nums = np.array([proposal_nums]) - else: - _proposal_nums = proposal_nums - - if iou_thrs is None: - _iou_thrs = np.array([0.5]) - elif isinstance(iou_thrs, Sequence): - _iou_thrs = np.array(iou_thrs) - elif isinstance(iou_thrs, float): - _iou_thrs = np.array([iou_thrs]) - else: - _iou_thrs = iou_thrs - - return _proposal_nums, _iou_thrs - - -def eval_recalls(gts, - proposals, - proposal_nums=None, - iou_thrs=0.5, - logger=None, - use_legacy_coordinate=False): - """Calculate recalls. - - Args: - gts (list[ndarray]): a list of arrays of shape (n, 4) - proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5) - proposal_nums (int | Sequence[int]): Top N proposals to be evaluated. - iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5. - logger (logging.Logger | str | None): The way to print the recall - summary. See `mmcv.utils.print_log()` for details. Default: None. - use_legacy_coordinate (bool): Whether use coordinate system - in mmdet v1.x. "1" was added to both height and width - which means w, h should be - computed as 'x2 - x1 + 1` and 'y2 - y1 + 1'. Default: False. - - - Returns: - ndarray: recalls of different ious and proposal nums - """ - - img_num = len(gts) - assert img_num == len(proposals) - proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) - all_ious = [] - for i in range(img_num): - if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: - scores = proposals[i][:, 4] - sort_idx = np.argsort(scores)[::-1] - img_proposal = proposals[i][sort_idx, :] - else: - img_proposal = proposals[i] - prop_num = min(img_proposal.shape[0], proposal_nums[-1]) - if gts[i] is None or gts[i].shape[0] == 0: - ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) - else: - ious = bbox_overlaps( - gts[i], - img_proposal[:prop_num, :4], - use_legacy_coordinate=use_legacy_coordinate) - all_ious.append(ious) - all_ious = np.array(all_ious) - recalls = _recalls(all_ious, proposal_nums, iou_thrs) - - print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger) - return recalls - - -def print_recall_summary(recalls, - proposal_nums, - iou_thrs, - row_idxs=None, - col_idxs=None, - logger=None): - """Print recalls in a table. - - Args: - recalls (ndarray): calculated from `bbox_recalls` - proposal_nums (ndarray or list): top N proposals - iou_thrs (ndarray or list): iou thresholds - row_idxs (ndarray): which rows(proposal nums) to print - col_idxs (ndarray): which cols(iou thresholds) to print - logger (logging.Logger | str | None): The way to print the recall - summary. See `mmcv.utils.print_log()` for details. Default: None. - """ - proposal_nums = np.array(proposal_nums, dtype=np.int32) - iou_thrs = np.array(iou_thrs) - if row_idxs is None: - row_idxs = np.arange(proposal_nums.size) - if col_idxs is None: - col_idxs = np.arange(iou_thrs.size) - row_header = [''] + iou_thrs[col_idxs].tolist() - table_data = [row_header] - for i, num in enumerate(proposal_nums[row_idxs]): - row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()] - row.insert(0, num) - table_data.append(row) - table = AsciiTable(table_data) - print_log('\n' + table.table, logger=logger) - - -def plot_num_recall(recalls, proposal_nums): - """Plot Proposal_num-Recalls curve. - - Args: - recalls(ndarray or list): shape (k,) - proposal_nums(ndarray or list): same shape as `recalls` - """ - if isinstance(proposal_nums, np.ndarray): - _proposal_nums = proposal_nums.tolist() - else: - _proposal_nums = proposal_nums - if isinstance(recalls, np.ndarray): - _recalls = recalls.tolist() - else: - _recalls = recalls - - import matplotlib.pyplot as plt - f = plt.figure() - plt.plot([0] + _proposal_nums, [0] + _recalls) - plt.xlabel('Proposal num') - plt.ylabel('Recall') - plt.axis([0, proposal_nums.max(), 0, 1]) - f.show() - - -def plot_iou_recall(recalls, iou_thrs): - """Plot IoU-Recalls curve. - - Args: - recalls(ndarray or list): shape (k,) - iou_thrs(ndarray or list): same shape as `recalls` - """ - if isinstance(iou_thrs, np.ndarray): - _iou_thrs = iou_thrs.tolist() - else: - _iou_thrs = iou_thrs - if isinstance(recalls, np.ndarray): - _recalls = recalls.tolist() - else: - _recalls = recalls - - import matplotlib.pyplot as plt - f = plt.figure() - plt.plot(_iou_thrs + [1.0], _recalls + [0.]) - plt.xlabel('IoU') - plt.ylabel('Recall') - plt.axis([iou_thrs.min(), 1, 0, 1]) - f.show() diff --git a/cv/detection/co-detr/pytorch/mmdet/core/export/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/export/__init__.py deleted file mode 100644 index a8179c93642dcfaa780c5beccd3f1f104f32d4ae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/export/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx, - get_k_for_topk) -from .pytorch2onnx import (build_model_from_cfg, - generate_inputs_and_wrap_model, - preprocess_example_input) - -__all__ = [ - 'build_model_from_cfg', 'generate_inputs_and_wrap_model', - 'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx', - 'dynamic_clip_for_onnx' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/export/model_wrappers.py b/cv/detection/co-detr/pytorch/mmdet/core/export/model_wrappers.py deleted file mode 100644 index 2f62bb03150bab12ad89b48856ba24b98bdf8b26..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/export/model_wrappers.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import warnings - -import numpy as np -import torch - -from mmdet.core import bbox2result -from mmdet.models import BaseDetector - - -class DeployBaseDetector(BaseDetector): - """DeployBaseDetector.""" - - def __init__(self, class_names, device_id): - super(DeployBaseDetector, self).__init__() - self.CLASSES = class_names - self.device_id = device_id - - def simple_test(self, img, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def aug_test(self, imgs, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def extract_feat(self, imgs): - raise NotImplementedError('This method is not implemented.') - - def forward_train(self, imgs, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def val_step(self, data, optimizer): - raise NotImplementedError('This method is not implemented.') - - def train_step(self, data, optimizer): - raise NotImplementedError('This method is not implemented.') - - def forward_test(self, *, img, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def async_simple_test(self, img, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def forward(self, img, img_metas, return_loss=True, **kwargs): - outputs = self.forward_test(img, img_metas, **kwargs) - batch_dets, batch_labels = outputs[:2] - batch_masks = outputs[2] if len(outputs) == 3 else None - batch_size = img[0].shape[0] - img_metas = img_metas[0] - results = [] - rescale = kwargs.get('rescale', True) - for i in range(batch_size): - dets, labels = batch_dets[i], batch_labels[i] - if rescale: - scale_factor = img_metas[i]['scale_factor'] - - if isinstance(scale_factor, (list, tuple, np.ndarray)): - assert len(scale_factor) == 4 - scale_factor = np.array(scale_factor)[None, :] # [1,4] - dets[:, :4] /= scale_factor - - if 'border' in img_metas[i]: - # offset pixel of the top-left corners between original image - # and padded/enlarged image, 'border' is used when exporting - # CornerNet and CentripetalNet to onnx - x_off = img_metas[i]['border'][2] - y_off = img_metas[i]['border'][0] - dets[:, [0, 2]] -= x_off - dets[:, [1, 3]] -= y_off - dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype) - - dets_results = bbox2result(dets, labels, len(self.CLASSES)) - - if batch_masks is not None: - masks = batch_masks[i] - img_h, img_w = img_metas[i]['img_shape'][:2] - ori_h, ori_w = img_metas[i]['ori_shape'][:2] - masks = masks[:, :img_h, :img_w] - if rescale: - masks = masks.astype(np.float32) - masks = torch.from_numpy(masks) - masks = torch.nn.functional.interpolate( - masks.unsqueeze(0), size=(ori_h, ori_w)) - masks = masks.squeeze(0).detach().numpy() - if masks.dtype != np.bool: - masks = masks >= 0.5 - segms_results = [[] for _ in range(len(self.CLASSES))] - for j in range(len(dets)): - segms_results[labels[j]].append(masks[j]) - results.append((dets_results, segms_results)) - else: - results.append(dets_results) - return results - - -class ONNXRuntimeDetector(DeployBaseDetector): - """Wrapper for detector's inference with ONNXRuntime.""" - - def __init__(self, onnx_file, class_names, device_id): - super(ONNXRuntimeDetector, self).__init__(class_names, device_id) - import onnxruntime as ort - - # get the custom op path - ort_custom_op_path = '' - try: - from mmcv.ops import get_onnxruntime_op_path - ort_custom_op_path = get_onnxruntime_op_path() - except (ImportError, ModuleNotFoundError): - warnings.warn('If input model has custom op from mmcv, \ - you may have to build mmcv with ONNXRuntime from source.') - session_options = ort.SessionOptions() - # register custom op for onnxruntime - if osp.exists(ort_custom_op_path): - session_options.register_custom_ops_library(ort_custom_op_path) - sess = ort.InferenceSession(onnx_file, session_options) - providers = ['CPUExecutionProvider'] - options = [{}] - is_cuda_available = ort.get_device() == 'GPU' - if is_cuda_available: - providers.insert(0, 'CUDAExecutionProvider') - options.insert(0, {'device_id': device_id}) - - sess.set_providers(providers, options) - - self.sess = sess - self.io_binding = sess.io_binding() - self.output_names = [_.name for _ in sess.get_outputs()] - self.is_cuda_available = is_cuda_available - - def forward_test(self, imgs, img_metas, **kwargs): - input_data = imgs[0] - # set io binding for inputs/outputs - device_type = 'cuda' if self.is_cuda_available else 'cpu' - if not self.is_cuda_available: - input_data = input_data.cpu() - self.io_binding.bind_input( - name='input', - device_type=device_type, - device_id=self.device_id, - element_type=np.float32, - shape=input_data.shape, - buffer_ptr=input_data.data_ptr()) - - for name in self.output_names: - self.io_binding.bind_output(name) - # run session to get outputs - self.sess.run_with_iobinding(self.io_binding) - ort_outputs = self.io_binding.copy_outputs_to_cpu() - return ort_outputs - - -class TensorRTDetector(DeployBaseDetector): - """Wrapper for detector's inference with TensorRT.""" - - def __init__(self, engine_file, class_names, device_id, output_names=None): - super(TensorRTDetector, self).__init__(class_names, device_id) - warnings.warn('`output_names` is deprecated and will be removed in ' - 'future releases.') - from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin - try: - load_tensorrt_plugin() - except (ImportError, ModuleNotFoundError): - warnings.warn('If input model has custom op from mmcv, \ - you may have to build mmcv with TensorRT from source.') - - output_names = ['dets', 'labels'] - model = TRTWraper(engine_file, ['input'], output_names) - with_masks = False - # if TensorRT has totally 4 inputs/outputs, then - # the detector should have `mask` output. - if len(model.engine) == 4: - model.output_names = output_names + ['masks'] - with_masks = True - self.model = model - self.with_masks = with_masks - - def forward_test(self, imgs, img_metas, **kwargs): - input_data = imgs[0].contiguous() - with torch.cuda.device(self.device_id), torch.no_grad(): - outputs = self.model({'input': input_data}) - outputs = [outputs[name] for name in self.model.output_names] - outputs = [out.detach().cpu().numpy() for out in outputs] - return outputs diff --git a/cv/detection/co-detr/pytorch/mmdet/core/export/onnx_helper.py b/cv/detection/co-detr/pytorch/mmdet/core/export/onnx_helper.py deleted file mode 100644 index 9f6b9a012a621be616fe9c086740fb9367ec2311..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/export/onnx_helper.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os - -import torch - - -def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape): - """Clip boxes dynamically for onnx. - - Since torch.clamp cannot have dynamic `min` and `max`, we scale the - boxes by 1/max_shape and clamp in the range [0, 1]. - - Args: - x1 (Tensor): The x1 for bounding boxes. - y1 (Tensor): The y1 for bounding boxes. - x2 (Tensor): The x2 for bounding boxes. - y2 (Tensor): The y2 for bounding boxes. - max_shape (Tensor or torch.Size): The (H,W) of original image. - Returns: - tuple(Tensor): The clipped x1, y1, x2, y2. - """ - assert isinstance( - max_shape, - torch.Tensor), '`max_shape` should be tensor of (h,w) for onnx' - - # scale by 1/max_shape - x1 = x1 / max_shape[1] - y1 = y1 / max_shape[0] - x2 = x2 / max_shape[1] - y2 = y2 / max_shape[0] - - # clamp [0, 1] - x1 = torch.clamp(x1, 0, 1) - y1 = torch.clamp(y1, 0, 1) - x2 = torch.clamp(x2, 0, 1) - y2 = torch.clamp(y2, 0, 1) - - # scale back - x1 = x1 * max_shape[1] - y1 = y1 * max_shape[0] - x2 = x2 * max_shape[1] - y2 = y2 * max_shape[0] - return x1, y1, x2, y2 - - -def get_k_for_topk(k, size): - """Get k of TopK for onnx exporting. - - The K of TopK in TensorRT should not be a Tensor, while in ONNX Runtime - it could be a Tensor.Due to dynamic shape feature, we have to decide - whether to do TopK and what K it should be while exporting to ONNX. - If returned K is less than zero, it means we do not have to do - TopK operation. - - Args: - k (int or Tensor): The set k value for nms from config file. - size (Tensor or torch.Size): The number of elements of \ - TopK's input tensor - Returns: - tuple: (int or Tensor): The final K for TopK. - """ - ret_k = -1 - if k <= 0 or size <= 0: - return ret_k - if torch.onnx.is_in_onnx_export(): - is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' - if is_trt_backend: - # TensorRT does not support dynamic K with TopK op - if 0 < k < size: - ret_k = k - else: - # Always keep topk op for dynamic input in onnx for ONNX Runtime - ret_k = torch.where(k < size, k, size) - elif k < size: - ret_k = k - else: - # ret_k is -1 - pass - return ret_k - - -def add_dummy_nms_for_onnx(boxes, - scores, - max_output_boxes_per_class=1000, - iou_threshold=0.5, - score_threshold=0.05, - pre_top_k=-1, - after_top_k=-1, - labels=None): - """Create a dummy onnx::NonMaxSuppression op while exporting to ONNX. - - This function helps exporting to onnx with batch and multiclass NMS op. - It only supports class-agnostic detection results. That is, the scores - is of shape (N, num_bboxes, num_classes) and the boxes is of shape - (N, num_boxes, 4). - - Args: - boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4] - scores (Tensor): The detection scores of shape - [N, num_boxes, num_classes] - max_output_boxes_per_class (int): Maximum number of output - boxes per class of nms. Defaults to 1000. - iou_threshold (float): IOU threshold of nms. Defaults to 0.5 - score_threshold (float): score threshold of nms. - Defaults to 0.05. - pre_top_k (bool): Number of top K boxes to keep before nms. - Defaults to -1. - after_top_k (int): Number of top K boxes to keep after nms. - Defaults to -1. - labels (Tensor, optional): It not None, explicit labels would be used. - Otherwise, labels would be automatically generated using - num_classed. Defaults to None. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - max_output_boxes_per_class = torch.LongTensor([max_output_boxes_per_class]) - iou_threshold = torch.tensor([iou_threshold], dtype=torch.float32) - score_threshold = torch.tensor([score_threshold], dtype=torch.float32) - batch_size = scores.shape[0] - num_class = scores.shape[2] - - nms_pre = torch.tensor(pre_top_k, device=scores.device, dtype=torch.long) - nms_pre = get_k_for_topk(nms_pre, boxes.shape[1]) - - if nms_pre > 0: - max_scores, _ = scores.max(-1) - _, topk_inds = max_scores.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds).long() - # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 - transformed_inds = boxes.shape[1] * batch_inds + topk_inds - boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( - batch_size, -1, 4) - scores = scores.reshape(-1, num_class)[transformed_inds, :].reshape( - batch_size, -1, num_class) - if labels is not None: - labels = labels.reshape(-1, 1)[transformed_inds].reshape( - batch_size, -1) - - scores = scores.permute(0, 2, 1) - num_box = boxes.shape[1] - # turn off tracing to create a dummy output of nms - state = torch._C._get_tracing_state() - # dummy indices of nms's output - num_fake_det = 2 - batch_inds = torch.randint(batch_size, (num_fake_det, 1)) - cls_inds = torch.randint(num_class, (num_fake_det, 1)) - box_inds = torch.randint(num_box, (num_fake_det, 1)) - indices = torch.cat([batch_inds, cls_inds, box_inds], dim=1) - output = indices - setattr(DummyONNXNMSop, 'output', output) - - # open tracing - torch._C._set_tracing_state(state) - selected_indices = DummyONNXNMSop.apply(boxes, scores, - max_output_boxes_per_class, - iou_threshold, score_threshold) - - batch_inds, cls_inds = selected_indices[:, 0], selected_indices[:, 1] - box_inds = selected_indices[:, 2] - if labels is None: - labels = torch.arange(num_class, dtype=torch.long).to(scores.device) - labels = labels.view(1, num_class, 1).expand_as(scores) - scores = scores.reshape(-1, 1) - boxes = boxes.reshape(batch_size, -1).repeat(1, num_class).reshape(-1, 4) - pos_inds = (num_class * batch_inds + cls_inds) * num_box + box_inds - mask = scores.new_zeros(scores.shape) - # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 - # PyTorch style code: mask[batch_inds, box_inds] += 1 - mask[pos_inds, :] += 1 - scores = scores * mask - boxes = boxes * mask - - scores = scores.reshape(batch_size, -1) - boxes = boxes.reshape(batch_size, -1, 4) - labels = labels.reshape(batch_size, -1) - - nms_after = torch.tensor( - after_top_k, device=scores.device, dtype=torch.long) - nms_after = get_k_for_topk(nms_after, num_box * num_class) - - if nms_after > 0: - _, topk_inds = scores.topk(nms_after) - batch_inds = torch.arange(batch_size).view(-1, 1).expand_as(topk_inds) - # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 - transformed_inds = scores.shape[1] * batch_inds + topk_inds - scores = scores.reshape(-1, 1)[transformed_inds, :].reshape( - batch_size, -1) - boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( - batch_size, -1, 4) - labels = labels.reshape(-1, 1)[transformed_inds, :].reshape( - batch_size, -1) - - scores = scores.unsqueeze(2) - dets = torch.cat([boxes, scores], dim=2) - return dets, labels - - -class DummyONNXNMSop(torch.autograd.Function): - """DummyONNXNMSop. - - This class is only for creating onnx::NonMaxSuppression. - """ - - @staticmethod - def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_threshold, - score_threshold): - - return DummyONNXNMSop.output - - @staticmethod - def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, - score_threshold): - return g.op( - 'NonMaxSuppression', - boxes, - scores, - max_output_boxes_per_class, - iou_threshold, - score_threshold, - outputs=1) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/export/pytorch2onnx.py b/cv/detection/co-detr/pytorch/mmdet/core/export/pytorch2onnx.py deleted file mode 100644 index b8261eed9e81c99db7f49ea929fce3d3ac1c0ca0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/export/pytorch2onnx.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from functools import partial - -import mmcv -import numpy as np -import torch -from mmcv.runner import load_checkpoint - - -def generate_inputs_and_wrap_model(config_path, - checkpoint_path, - input_config, - cfg_options=None): - """Prepare sample input and wrap model for ONNX export. - - The ONNX export API only accept args, and all inputs should be - torch.Tensor or corresponding types (such as tuple of tensor). - So we should call this function before exporting. This function will: - - 1. generate corresponding inputs which are used to execute the model. - 2. Wrap the model's forward function. - - For example, the MMDet models' forward function has a parameter - ``return_loss:bool``. As we want to set it as False while export API - supports neither bool type or kwargs. So we have to replace the forward - method like ``model.forward = partial(model.forward, return_loss=False)``. - - Args: - config_path (str): the OpenMMLab config for the model we want to - export to ONNX - checkpoint_path (str): Path to the corresponding checkpoint - input_config (dict): the exactly data in this dict depends on the - framework. For MMSeg, we can just declare the input shape, - and generate the dummy data accordingly. However, for MMDet, - we may pass the real img path, or the NMS will return None - as there is no legal bbox. - - Returns: - tuple: (model, tensor_data) wrapped model which can be called by - ``model(*tensor_data)`` and a list of inputs which are used to - execute the model while exporting. - """ - - model = build_model_from_cfg( - config_path, checkpoint_path, cfg_options=cfg_options) - one_img, one_meta = preprocess_example_input(input_config) - tensor_data = [one_img] - model.forward = partial( - model.forward, img_metas=[[one_meta]], return_loss=False) - - # pytorch has some bug in pytorch1.3, we have to fix it - # by replacing these existing op - opset_version = 11 - # put the import within the function thus it will not cause import error - # when not using this function - try: - from mmcv.onnx.symbolic import register_extra_symbolics - except ModuleNotFoundError: - raise NotImplementedError('please update mmcv to version>=v1.0.4') - register_extra_symbolics(opset_version) - - return model, tensor_data - - -def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None): - """Build a model from config and load the given checkpoint. - - Args: - config_path (str): the OpenMMLab config for the model we want to - export to ONNX - checkpoint_path (str): Path to the corresponding checkpoint - - Returns: - torch.nn.Module: the built model - """ - from mmdet.models import build_detector - - cfg = mmcv.Config.fromfile(config_path) - if cfg_options is not None: - cfg.merge_from_dict(cfg_options) - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - cfg.model.pretrained = None - cfg.data.test.test_mode = True - - # build the model - cfg.model.train_cfg = None - model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) - checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu') - if 'CLASSES' in checkpoint.get('meta', {}): - model.CLASSES = checkpoint['meta']['CLASSES'] - else: - from mmdet.datasets import DATASETS - dataset = DATASETS.get(cfg.data.test['type']) - assert (dataset is not None) - model.CLASSES = dataset.CLASSES - model.cpu().eval() - return model - - -def preprocess_example_input(input_config): - """Prepare an example input image for ``generate_inputs_and_wrap_model``. - - Args: - input_config (dict): customized config describing the example input. - - Returns: - tuple: (one_img, one_meta), tensor of the example input image and \ - meta information for the example input image. - - Examples: - >>> from mmdet.core.export import preprocess_example_input - >>> input_config = { - >>> 'input_shape': (1,3,224,224), - >>> 'input_path': 'demo/demo.jpg', - >>> 'normalize_cfg': { - >>> 'mean': (123.675, 116.28, 103.53), - >>> 'std': (58.395, 57.12, 57.375) - >>> } - >>> } - >>> one_img, one_meta = preprocess_example_input(input_config) - >>> print(one_img.shape) - torch.Size([1, 3, 224, 224]) - >>> print(one_meta) - {'img_shape': (224, 224, 3), - 'ori_shape': (224, 224, 3), - 'pad_shape': (224, 224, 3), - 'filename': '.png', - 'scale_factor': 1.0, - 'flip': False} - """ - input_path = input_config['input_path'] - input_shape = input_config['input_shape'] - one_img = mmcv.imread(input_path) - one_img = mmcv.imresize(one_img, input_shape[2:][::-1]) - show_img = one_img.copy() - if 'normalize_cfg' in input_config.keys(): - normalize_cfg = input_config['normalize_cfg'] - mean = np.array(normalize_cfg['mean'], dtype=np.float32) - std = np.array(normalize_cfg['std'], dtype=np.float32) - to_rgb = normalize_cfg.get('to_rgb', True) - one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb) - one_img = one_img.transpose(2, 0, 1) - one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_( - True) - (_, C, H, W) = input_shape - one_meta = { - 'img_shape': (H, W, C), - 'ori_shape': (H, W, C), - 'pad_shape': (H, W, C), - 'filename': '.png', - 'scale_factor': np.ones(4, dtype=np.float32), - 'flip': False, - 'show_img': show_img, - 'flip_direction': None - } - - return one_img, one_meta diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/__init__.py deleted file mode 100644 index 7b9ac9ff3efcff73c44d34dd9ce699da5c009534..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .checkloss_hook import CheckInvalidLossHook -from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook -from .memory_profiler_hook import MemoryProfilerHook -from .set_epoch_info_hook import SetEpochInfoHook -from .sync_norm_hook import SyncNormHook -from .sync_random_size_hook import SyncRandomSizeHook -from .wandblogger_hook import MMDetWandbHook -from .yolox_lrupdater_hook import YOLOXLrUpdaterHook -from .yolox_mode_switch_hook import YOLOXModeSwitchHook - -__all__ = [ - 'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook', - 'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook', - 'CheckInvalidLossHook', 'SetEpochInfoHook', 'MemoryProfilerHook', - 'MMDetWandbHook' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/checkloss_hook.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/checkloss_hook.py deleted file mode 100644 index 754e61bef87dd074f4b7a06943b7db7060d5f1e6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/checkloss_hook.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.runner.hooks import HOOKS, Hook - - -@HOOKS.register_module() -class CheckInvalidLossHook(Hook): - """Check invalid loss hook. - - This hook will regularly check whether the loss is valid - during training. - - Args: - interval (int): Checking interval (every k iterations). - Default: 50. - """ - - def __init__(self, interval=50): - self.interval = interval - - def after_train_iter(self, runner): - if self.every_n_iters(runner, self.interval): - assert torch.isfinite(runner.outputs['loss']), \ - runner.logger.info('loss become infinite or NaN!') diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/ema.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/ema.py deleted file mode 100644 index ff7bfbabe0284db6f7396dbaa66656f3b7bfc9ba..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/ema.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -from mmcv.parallel import is_module_wrapper -from mmcv.runner.hooks import HOOKS, Hook - - -class BaseEMAHook(Hook): - """Exponential Moving Average Hook. - - Use Exponential Moving Average on all parameters of model in training - process. All parameters have a ema backup, which update by the formula - as below. EMAHook takes priority over EvalHook and CheckpointHook. Note, - the original model parameters are actually saved in ema field after train. - - Args: - momentum (float): The momentum used for updating ema parameter. - Ema's parameter are updated with the formula: - `ema_param = (1-momentum) * ema_param + momentum * cur_param`. - Defaults to 0.0002. - skip_buffers (bool): Whether to skip the model buffers, such as - batchnorm running stats (running_mean, running_var), it does not - perform the ema operation. Default to False. - interval (int): Update ema parameter every interval iteration. - Defaults to 1. - resume_from (str, optional): The checkpoint path. Defaults to None. - momentum_fun (func, optional): The function to change momentum - during early iteration (also warmup) to help early training. - It uses `momentum` as a constant. Defaults to None. - """ - - def __init__(self, - momentum=0.0002, - interval=1, - skip_buffers=False, - resume_from=None, - momentum_fun=None): - assert 0 < momentum < 1 - self.momentum = momentum - self.skip_buffers = skip_buffers - self.interval = interval - self.checkpoint = resume_from - self.momentum_fun = momentum_fun - - def before_run(self, runner): - """To resume model with it's ema parameters more friendly. - - Register ema parameter as ``named_buffer`` to model. - """ - model = runner.model - if is_module_wrapper(model): - model = model.module - self.param_ema_buffer = {} - if self.skip_buffers: - self.model_parameters = dict(model.named_parameters()) - else: - self.model_parameters = model.state_dict() - for name, value in self.model_parameters.items(): - # "." is not allowed in module's buffer name - buffer_name = f"ema_{name.replace('.', '_')}" - self.param_ema_buffer[name] = buffer_name - model.register_buffer(buffer_name, value.data.clone()) - self.model_buffers = dict(model.named_buffers()) - if self.checkpoint is not None: - runner.resume(self.checkpoint) - - def get_momentum(self, runner): - return self.momentum_fun(runner.iter) if self.momentum_fun else \ - self.momentum - - def after_train_iter(self, runner): - """Update ema parameter every self.interval iterations.""" - if (runner.iter + 1) % self.interval != 0: - return - momentum = self.get_momentum(runner) - for name, parameter in self.model_parameters.items(): - # exclude num_tracking - if parameter.dtype.is_floating_point: - buffer_name = self.param_ema_buffer[name] - buffer_parameter = self.model_buffers[buffer_name] - buffer_parameter.mul_(1 - momentum).add_( - parameter.data, alpha=momentum) - - def after_train_epoch(self, runner): - """We load parameter values from ema backup to model before the - EvalHook.""" - self._swap_ema_parameters() - - def before_train_epoch(self, runner): - """We recover model's parameter from ema backup after last epoch's - EvalHook.""" - self._swap_ema_parameters() - - def _swap_ema_parameters(self): - """Swap the parameter of model with parameter in ema_buffer.""" - for name, value in self.model_parameters.items(): - temp = value.data.clone() - ema_buffer = self.model_buffers[self.param_ema_buffer[name]] - value.data.copy_(ema_buffer.data) - ema_buffer.data.copy_(temp) - - -@HOOKS.register_module() -class ExpMomentumEMAHook(BaseEMAHook): - """EMAHook using exponential momentum strategy. - - Args: - total_iter (int): The total number of iterations of EMA momentum. - Defaults to 2000. - """ - - def __init__(self, total_iter=2000, **kwargs): - super(ExpMomentumEMAHook, self).__init__(**kwargs) - self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-( - 1 + x) / total_iter) + self.momentum - - -@HOOKS.register_module() -class LinearMomentumEMAHook(BaseEMAHook): - """EMAHook using linear momentum strategy. - - Args: - warm_up (int): During first warm_up steps, we may use smaller decay - to update ema parameters more slowly. Defaults to 100. - """ - - def __init__(self, warm_up=100, **kwargs): - super(LinearMomentumEMAHook, self).__init__(**kwargs) - self.momentum_fun = lambda x: min(self.momentum**self.interval, - (1 + x) / (warm_up + x)) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/memory_profiler_hook.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/memory_profiler_hook.py deleted file mode 100644 index a473061b566f92f4bee6280ec33875e2c50a51dd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/memory_profiler_hook.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.runner.hooks import HOOKS, Hook - - -@HOOKS.register_module() -class MemoryProfilerHook(Hook): - """Memory profiler hook recording memory information including virtual - memory, swap memory, and the memory of the current process. - - Args: - interval (int): Checking interval (every k iterations). - Default: 50. - """ - - def __init__(self, interval=50): - try: - from psutil import swap_memory, virtual_memory - self._swap_memory = swap_memory - self._virtual_memory = virtual_memory - except ImportError: - raise ImportError('psutil is not installed, please install it by: ' - 'pip install psutil') - - try: - from memory_profiler import memory_usage - self._memory_usage = memory_usage - except ImportError: - raise ImportError( - 'memory_profiler is not installed, please install it by: ' - 'pip install memory_profiler') - - self.interval = interval - - def after_iter(self, runner): - if self.every_n_iters(runner, self.interval): - # in Byte - virtual_memory = self._virtual_memory() - swap_memory = self._swap_memory() - # in MB - process_memory = self._memory_usage()[0] - factor = 1024 * 1024 - runner.logger.info( - 'Memory information ' - 'available_memory: ' - f'{round(virtual_memory.available / factor)} MB, ' - 'used_memory: ' - f'{round(virtual_memory.used / factor)} MB, ' - f'memory_utilization: {virtual_memory.percent} %, ' - 'available_swap_memory: ' - f'{round((swap_memory.total - swap_memory.used) / factor)}' - ' MB, ' - f'used_swap_memory: {round(swap_memory.used / factor)} MB, ' - f'swap_memory_utilization: {swap_memory.percent} %, ' - 'current_process_memory: ' - f'{round(process_memory)} MB') diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/set_epoch_info_hook.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/set_epoch_info_hook.py deleted file mode 100644 index c2b134ceb69856338097cf283f67d7e2c580739f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/set_epoch_info_hook.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.parallel import is_module_wrapper -from mmcv.runner import HOOKS, Hook - - -@HOOKS.register_module() -class SetEpochInfoHook(Hook): - """Set runner's epoch information to the model.""" - - def before_train_epoch(self, runner): - epoch = runner.epoch - model = runner.model - if is_module_wrapper(model): - model = model.module - model.set_epoch(epoch) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/sync_norm_hook.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/sync_norm_hook.py deleted file mode 100644 index 82931cef3bcaba0521a0d9c56cff1e5f50fe8db7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/sync_norm_hook.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import OrderedDict - -from mmcv.runner import get_dist_info -from mmcv.runner.hooks import HOOKS, Hook -from torch import nn - -from ..utils.dist_utils import all_reduce_dict - - -def get_norm_states(module): - async_norm_states = OrderedDict() - for name, child in module.named_modules(): - if isinstance(child, nn.modules.batchnorm._NormBase): - for k, v in child.state_dict().items(): - async_norm_states['.'.join([name, k])] = v - return async_norm_states - - -@HOOKS.register_module() -class SyncNormHook(Hook): - """Synchronize Norm states after training epoch, currently used in YOLOX. - - Args: - num_last_epochs (int): The number of latter epochs in the end of the - training to switch to synchronizing norm interval. Default: 15. - interval (int): Synchronizing norm interval. Default: 1. - """ - - def __init__(self, num_last_epochs=15, interval=1): - self.interval = interval - self.num_last_epochs = num_last_epochs - - def before_train_epoch(self, runner): - epoch = runner.epoch - if (epoch + 1) == runner.max_epochs - self.num_last_epochs: - # Synchronize norm every epoch. - self.interval = 1 - - def after_train_epoch(self, runner): - """Synchronizing norm.""" - epoch = runner.epoch - module = runner.model - if (epoch + 1) % self.interval == 0: - _, world_size = get_dist_info() - if world_size == 1: - return - norm_states = get_norm_states(module) - if len(norm_states) == 0: - return - norm_states = all_reduce_dict(norm_states, op='mean') - module.load_state_dict(norm_states, strict=False) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/sync_random_size_hook.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/sync_random_size_hook.py deleted file mode 100644 index 6d7e96c6aaf5207faef9bd835806bdded475bd72..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/sync_random_size_hook.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import random -import warnings - -import torch -from mmcv.runner import get_dist_info -from mmcv.runner.hooks import HOOKS, Hook -from torch import distributed as dist - - -@HOOKS.register_module() -class SyncRandomSizeHook(Hook): - """Change and synchronize the random image size across ranks. - SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve - similar functions. Such as `dict(type='Resize', img_scale=[(448, 448), - (832, 832)], multiscale_mode='range', keep_ratio=True)`. - - Note: Due to the multi-process dataloader, its behavior is different - from YOLOX's official implementation, the official is to change the - size every fixed iteration interval and what we achieved is a fixed - epoch interval. - - Args: - ratio_range (tuple[int]): Random ratio range. It will be multiplied - by 32, and then change the dataset output image size. - Default: (14, 26). - img_scale (tuple[int]): Size of input image. Default: (640, 640). - interval (int): The epoch interval of change image size. Default: 1. - device (torch.device | str): device for returned tensors. - Default: 'cuda'. - """ - - def __init__(self, - ratio_range=(14, 26), - img_scale=(640, 640), - interval=1, - device='cuda'): - warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. ' - 'Please use Resize pipeline to achieve similar ' - 'functions. Due to the multi-process dataloader, ' - 'its behavior is different from YOLOX\'s official ' - 'implementation, the official is to change the size ' - 'every fixed iteration interval and what we achieved ' - 'is a fixed epoch interval.') - self.rank, world_size = get_dist_info() - self.is_distributed = world_size > 1 - self.ratio_range = ratio_range - self.img_scale = img_scale - self.interval = interval - self.device = device - - def after_train_epoch(self, runner): - """Change the dataset output image size.""" - if self.ratio_range is not None and (runner.epoch + - 1) % self.interval == 0: - # Due to DDP and DP get the device behavior inconsistent, - # so we did not get the device from runner.model. - tensor = torch.LongTensor(2).to(self.device) - - if self.rank == 0: - size_factor = self.img_scale[1] * 1. / self.img_scale[0] - size = random.randint(*self.ratio_range) - size = (int(32 * size), 32 * int(size * size_factor)) - tensor[0] = size[0] - tensor[1] = size[1] - - if self.is_distributed: - dist.barrier() - dist.broadcast(tensor, 0) - - runner.data_loader.dataset.update_dynamic_scale( - (tensor[0].item(), tensor[1].item())) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/wandblogger_hook.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/wandblogger_hook.py deleted file mode 100644 index 234506ce313119c3c52d047c7baca30b698eebca..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/wandblogger_hook.py +++ /dev/null @@ -1,589 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import importlib -import os.path as osp -import sys -import warnings - -import mmcv -import numpy as np -import pycocotools.mask as mask_util -from mmcv.runner import HOOKS -from mmcv.runner.dist_utils import master_only -from mmcv.runner.hooks.checkpoint import CheckpointHook -from mmcv.runner.hooks.logger.wandb import WandbLoggerHook -from mmcv.utils import digit_version - -from mmdet.core import DistEvalHook, EvalHook -from mmdet.core.mask.structures import polygon_to_bitmap - - -@HOOKS.register_module() -class MMDetWandbHook(WandbLoggerHook): - """Enhanced Wandb logger hook for MMDetection. - - Comparing with the :cls:`mmcv.runner.WandbLoggerHook`, this hook can not - only automatically log all the metrics but also log the following extra - information - saves model checkpoints as W&B Artifact, and - logs model prediction as interactive W&B Tables. - - - Metrics: The MMDetWandbHook will automatically log training - and validation metrics along with system metrics (CPU/GPU). - - - Checkpointing: If `log_checkpoint` is True, the checkpoint saved at - every checkpoint interval will be saved as W&B Artifacts. - This depends on the : class:`mmcv.runner.CheckpointHook` whose priority - is higher than this hook. Please refer to - https://docs.wandb.ai/guides/artifacts/model-versioning - to learn more about model versioning with W&B Artifacts. - - - Checkpoint Metadata: If evaluation results are available for a given - checkpoint artifact, it will have a metadata associated with it. - The metadata contains the evaluation metrics computed on validation - data with that checkpoint along with the current epoch. It depends - on `EvalHook` whose priority is more than MMDetWandbHook. - - - Evaluation: At every evaluation interval, the `MMDetWandbHook` logs the - model prediction as interactive W&B Tables. The number of samples - logged is given by `num_eval_images`. Currently, the `MMDetWandbHook` - logs the predicted bounding boxes along with the ground truth at every - evaluation interval. This depends on the `EvalHook` whose priority is - more than `MMDetWandbHook`. Also note that the data is just logged once - and subsequent evaluation tables uses reference to the logged data - to save memory usage. Please refer to - https://docs.wandb.ai/guides/data-vis to learn more about W&B Tables. - - For more details check out W&B's MMDetection docs: - https://docs.wandb.ai/guides/integrations/mmdetection - - ``` - Example: - log_config = dict( - ... - hooks=[ - ..., - dict(type='MMDetWandbHook', - init_kwargs={ - 'entity': "YOUR_ENTITY", - 'project': "YOUR_PROJECT_NAME" - }, - interval=50, - log_checkpoint=True, - log_checkpoint_metadata=True, - num_eval_images=100, - bbox_score_thr=0.3) - ]) - ``` - - Args: - init_kwargs (dict): A dict passed to wandb.init to initialize - a W&B run. Please refer to https://docs.wandb.ai/ref/python/init - for possible key-value pairs. - interval (int): Logging interval (every k iterations). Defaults to 50. - log_checkpoint (bool): Save the checkpoint at every checkpoint interval - as W&B Artifacts. Use this for model versioning where each version - is a checkpoint. Defaults to False. - log_checkpoint_metadata (bool): Log the evaluation metrics computed - on the validation data with the checkpoint, along with current - epoch as a metadata to that checkpoint. - Defaults to True. - num_eval_images (int): The number of validation images to be logged. - If zero, the evaluation won't be logged. Defaults to 100. - bbox_score_thr (float): Threshold for bounding box scores. - Defaults to 0.3. - """ - - def __init__(self, - init_kwargs=None, - interval=50, - log_checkpoint=False, - log_checkpoint_metadata=False, - num_eval_images=100, - bbox_score_thr=0.3, - **kwargs): - super(MMDetWandbHook, self).__init__(init_kwargs, interval, **kwargs) - - self.log_checkpoint = log_checkpoint - self.log_checkpoint_metadata = ( - log_checkpoint and log_checkpoint_metadata) - self.num_eval_images = num_eval_images - self.bbox_score_thr = bbox_score_thr - self.log_evaluation = (num_eval_images > 0) - self.ckpt_hook: CheckpointHook = None - self.eval_hook: EvalHook = None - - def import_wandb(self): - try: - import wandb - from wandb import init # noqa - - # Fix ResourceWarning when calling wandb.log in wandb v0.12.10. - # https://github.com/wandb/client/issues/2837 - if digit_version(wandb.__version__) < digit_version('0.12.10'): - warnings.warn( - f'The current wandb {wandb.__version__} is ' - f'lower than v0.12.10 will cause ResourceWarning ' - f'when calling wandb.log, Please run ' - f'"pip install --upgrade wandb"') - - except ImportError: - raise ImportError( - 'Please run "pip install "wandb>=0.12.10"" to install wandb') - self.wandb = wandb - - @master_only - def before_run(self, runner): - super(MMDetWandbHook, self).before_run(runner) - - # Save and Log config. - if runner.meta is not None and runner.meta.get('exp_name', - None) is not None: - src_cfg_path = osp.join(runner.work_dir, - runner.meta.get('exp_name', None)) - if osp.exists(src_cfg_path): - self.wandb.save(src_cfg_path, base_path=runner.work_dir) - self._update_wandb_config(runner) - else: - runner.logger.warning('No meta information found in the runner. ') - - # Inspect CheckpointHook and EvalHook - for hook in runner.hooks: - if isinstance(hook, CheckpointHook): - self.ckpt_hook = hook - if isinstance(hook, (EvalHook, DistEvalHook)): - self.eval_hook = hook - - # Check conditions to log checkpoint - if self.log_checkpoint: - if self.ckpt_hook is None: - self.log_checkpoint = False - self.log_checkpoint_metadata = False - runner.logger.warning( - 'To log checkpoint in MMDetWandbHook, `CheckpointHook` is' - 'required, please check hooks in the runner.') - else: - self.ckpt_interval = self.ckpt_hook.interval - - # Check conditions to log evaluation - if self.log_evaluation or self.log_checkpoint_metadata: - if self.eval_hook is None: - self.log_evaluation = False - self.log_checkpoint_metadata = False - runner.logger.warning( - 'To log evaluation or checkpoint metadata in ' - 'MMDetWandbHook, `EvalHook` or `DistEvalHook` in mmdet ' - 'is required, please check whether the validation ' - 'is enabled.') - else: - self.eval_interval = self.eval_hook.interval - self.val_dataset = self.eval_hook.dataloader.dataset - # Determine the number of samples to be logged. - if self.num_eval_images > len(self.val_dataset): - self.num_eval_images = len(self.val_dataset) - runner.logger.warning( - f'The num_eval_images ({self.num_eval_images}) is ' - 'greater than the total number of validation samples ' - f'({len(self.val_dataset)}). The complete validation ' - 'dataset will be logged.') - - # Check conditions to log checkpoint metadata - if self.log_checkpoint_metadata: - assert self.ckpt_interval % self.eval_interval == 0, \ - 'To log checkpoint metadata in MMDetWandbHook, the interval ' \ - f'of checkpoint saving ({self.ckpt_interval}) should be ' \ - 'divisible by the interval of evaluation ' \ - f'({self.eval_interval}).' - - # Initialize evaluation table - if self.log_evaluation: - # Initialize data table - self._init_data_table() - # Add data to the data table - self._add_ground_truth(runner) - # Log ground truth data - self._log_data_table() - - @master_only - def after_train_epoch(self, runner): - super(MMDetWandbHook, self).after_train_epoch(runner) - - if not self.by_epoch: - return - - # Log checkpoint and metadata. - if (self.log_checkpoint - and self.every_n_epochs(runner, self.ckpt_interval) - or (self.ckpt_hook.save_last and self.is_last_epoch(runner))): - if self.log_checkpoint_metadata and self.eval_hook: - metadata = { - 'epoch': runner.epoch + 1, - **self._get_eval_results() - } - else: - metadata = None - aliases = [f'epoch_{runner.epoch + 1}', 'latest'] - model_path = osp.join(self.ckpt_hook.out_dir, - f'epoch_{runner.epoch + 1}.pth') - self._log_ckpt_as_artifact(model_path, aliases, metadata) - - # Save prediction table - if self.log_evaluation and self.eval_hook._should_evaluate(runner): - results = self.eval_hook.latest_results - # Initialize evaluation table - self._init_pred_table() - # Log predictions - self._log_predictions(results) - # Log the table - self._log_eval_table(runner.epoch + 1) - - @master_only - def after_train_iter(self, runner): - if self.get_mode(runner) == 'train': - # An ugly patch. The iter-based eval hook will call the - # `after_train_iter` method of all logger hooks before evaluation. - # Use this trick to skip that call. - # Don't call super method at first, it will clear the log_buffer - return super(MMDetWandbHook, self).after_train_iter(runner) - else: - super(MMDetWandbHook, self).after_train_iter(runner) - - if self.by_epoch: - return - - # Save checkpoint and metadata - if (self.log_checkpoint - and self.every_n_iters(runner, self.ckpt_interval) - or (self.ckpt_hook.save_last and self.is_last_iter(runner))): - if self.log_checkpoint_metadata and self.eval_hook: - metadata = { - 'iter': runner.iter + 1, - **self._get_eval_results() - } - else: - metadata = None - aliases = [f'iter_{runner.iter + 1}', 'latest'] - model_path = osp.join(self.ckpt_hook.out_dir, - f'iter_{runner.iter + 1}.pth') - self._log_ckpt_as_artifact(model_path, aliases, metadata) - - # Save prediction table - if self.log_evaluation and self.eval_hook._should_evaluate(runner): - results = self.eval_hook.latest_results - # Initialize evaluation table - self._init_pred_table() - # Log predictions - self._log_predictions(results) - # Log the table - self._log_eval_table(runner.iter + 1) - - @master_only - def after_run(self, runner): - self.wandb.finish() - - def _update_wandb_config(self, runner): - """Update wandb config.""" - # Import the config file. - sys.path.append(runner.work_dir) - config_filename = runner.meta['exp_name'][:-3] - configs = importlib.import_module(config_filename) - # Prepare a nested dict of config variables. - config_keys = [key for key in dir(configs) if not key.startswith('__')] - config_dict = {key: getattr(configs, key) for key in config_keys} - # Update the W&B config. - self.wandb.config.update(config_dict) - - def _log_ckpt_as_artifact(self, model_path, aliases, metadata=None): - """Log model checkpoint as W&B Artifact. - - Args: - model_path (str): Path of the checkpoint to log. - aliases (list): List of the aliases associated with this artifact. - metadata (dict, optional): Metadata associated with this artifact. - """ - model_artifact = self.wandb.Artifact( - f'run_{self.wandb.run.id}_model', type='model', metadata=metadata) - model_artifact.add_file(model_path) - self.wandb.log_artifact(model_artifact, aliases=aliases) - - def _get_eval_results(self): - """Get model evaluation results.""" - results = self.eval_hook.latest_results - eval_results = self.val_dataset.evaluate( - results, logger='silent', **self.eval_hook.eval_kwargs) - return eval_results - - def _init_data_table(self): - """Initialize the W&B Tables for validation data.""" - columns = ['image_name', 'image'] - self.data_table = self.wandb.Table(columns=columns) - - def _init_pred_table(self): - """Initialize the W&B Tables for model evaluation.""" - columns = ['image_name', 'ground_truth', 'prediction'] - self.eval_table = self.wandb.Table(columns=columns) - - def _add_ground_truth(self, runner): - # Get image loading pipeline - from mmdet.datasets.pipelines import LoadImageFromFile - img_loader = None - for t in self.val_dataset.pipeline.transforms: - if isinstance(t, LoadImageFromFile): - img_loader = t - - if img_loader is None: - self.log_evaluation = False - runner.logger.warning( - 'LoadImageFromFile is required to add images ' - 'to W&B Tables.') - return - - # Select the images to be logged. - self.eval_image_indexs = np.arange(len(self.val_dataset)) - # Set seed so that same validation set is logged each time. - np.random.seed(42) - np.random.shuffle(self.eval_image_indexs) - self.eval_image_indexs = self.eval_image_indexs[:self.num_eval_images] - - CLASSES = self.val_dataset.CLASSES - self.class_id_to_label = { - id + 1: name - for id, name in enumerate(CLASSES) - } - self.class_set = self.wandb.Classes([{ - 'id': id, - 'name': name - } for id, name in self.class_id_to_label.items()]) - - img_prefix = self.val_dataset.img_prefix - - for idx in self.eval_image_indexs: - img_info = self.val_dataset.data_infos[idx] - image_name = img_info.get('filename', f'img_{idx}') - img_height, img_width = img_info['height'], img_info['width'] - - img_meta = img_loader( - dict(img_info=img_info, img_prefix=img_prefix)) - - # Get image and convert from BGR to RGB - image = mmcv.bgr2rgb(img_meta['img']) - - data_ann = self.val_dataset.get_ann_info(idx) - bboxes = data_ann['bboxes'] - labels = data_ann['labels'] - masks = data_ann.get('masks', None) - - # Get dict of bounding boxes to be logged. - assert len(bboxes) == len(labels) - wandb_boxes = self._get_wandb_bboxes(bboxes, labels) - - # Get dict of masks to be logged. - if masks is not None: - wandb_masks = self._get_wandb_masks( - masks, - labels, - is_poly_mask=True, - height=img_height, - width=img_width) - else: - wandb_masks = None - # TODO: Panoramic segmentation visualization. - - # Log a row to the data table. - self.data_table.add_data( - image_name, - self.wandb.Image( - image, - boxes=wandb_boxes, - masks=wandb_masks, - classes=self.class_set)) - - def _log_predictions(self, results): - table_idxs = self.data_table_ref.get_index() - assert len(table_idxs) == len(self.eval_image_indexs) - - for ndx, eval_image_index in enumerate(self.eval_image_indexs): - # Get the result - result = results[eval_image_index] - if isinstance(result, tuple): - bbox_result, segm_result = result - if isinstance(segm_result, tuple): - segm_result = segm_result[0] # ms rcnn - else: - bbox_result, segm_result = result, None - assert len(bbox_result) == len(self.class_id_to_label) - - # Get labels - bboxes = np.vstack(bbox_result) - labels = [ - np.full(bbox.shape[0], i, dtype=np.int32) - for i, bbox in enumerate(bbox_result) - ] - labels = np.concatenate(labels) - - # Get segmentation mask if available. - segms = None - if segm_result is not None and len(labels) > 0: - segms = mmcv.concat_list(segm_result) - segms = mask_util.decode(segms) - segms = segms.transpose(2, 0, 1) - assert len(segms) == len(labels) - # TODO: Panoramic segmentation visualization. - - # Remove bounding boxes and masks with score lower than threshold. - if self.bbox_score_thr > 0: - assert bboxes is not None and bboxes.shape[1] == 5 - scores = bboxes[:, -1] - inds = scores > self.bbox_score_thr - bboxes = bboxes[inds, :] - labels = labels[inds] - if segms is not None: - segms = segms[inds, ...] - - # Get dict of bounding boxes to be logged. - wandb_boxes = self._get_wandb_bboxes(bboxes, labels, log_gt=False) - # Get dict of masks to be logged. - if segms is not None: - wandb_masks = self._get_wandb_masks(segms, labels) - else: - wandb_masks = None - - # Log a row to the eval table. - self.eval_table.add_data( - self.data_table_ref.data[ndx][0], - self.data_table_ref.data[ndx][1], - self.wandb.Image( - self.data_table_ref.data[ndx][1], - boxes=wandb_boxes, - masks=wandb_masks, - classes=self.class_set)) - - def _get_wandb_bboxes(self, bboxes, labels, log_gt=True): - """Get list of structured dict for logging bounding boxes to W&B. - - Args: - bboxes (list): List of bounding box coordinates in - (minX, minY, maxX, maxY) format. - labels (int): List of label ids. - log_gt (bool): Whether to log ground truth or prediction boxes. - - Returns: - Dictionary of bounding boxes to be logged. - """ - wandb_boxes = {} - - box_data = [] - for bbox, label in zip(bboxes, labels): - if not isinstance(label, int): - label = int(label) - label = label + 1 - - if len(bbox) == 5: - confidence = float(bbox[4]) - class_name = self.class_id_to_label[label] - box_caption = f'{class_name} {confidence:.2f}' - else: - box_caption = str(self.class_id_to_label[label]) - - position = dict( - minX=int(bbox[0]), - minY=int(bbox[1]), - maxX=int(bbox[2]), - maxY=int(bbox[3])) - - box_data.append({ - 'position': position, - 'class_id': label, - 'box_caption': box_caption, - 'domain': 'pixel' - }) - - wandb_bbox_dict = { - 'box_data': box_data, - 'class_labels': self.class_id_to_label - } - - if log_gt: - wandb_boxes['ground_truth'] = wandb_bbox_dict - else: - wandb_boxes['predictions'] = wandb_bbox_dict - - return wandb_boxes - - def _get_wandb_masks(self, - masks, - labels, - is_poly_mask=False, - height=None, - width=None): - """Get list of structured dict for logging masks to W&B. - - Args: - masks (list): List of masks. - labels (int): List of label ids. - is_poly_mask (bool): Whether the mask is polygonal or not. - This is true for CocoDataset. - height (int): Height of the image. - width (int): Width of the image. - - Returns: - Dictionary of masks to be logged. - """ - mask_label_dict = dict() - for mask, label in zip(masks, labels): - label = label + 1 - # Get bitmap mask from polygon. - if is_poly_mask: - if height is not None and width is not None: - mask = polygon_to_bitmap(mask, height, width) - # Create composite masks for each class. - if label not in mask_label_dict.keys(): - mask_label_dict[label] = mask - else: - mask_label_dict[label] = np.logical_or(mask_label_dict[label], - mask) - - wandb_masks = dict() - for key, value in mask_label_dict.items(): - # Create mask for that class. - value = value.astype(np.uint8) - value[value > 0] = key - - # Create dict of masks for logging. - class_name = self.class_id_to_label[key] - wandb_masks[class_name] = { - 'mask_data': value, - 'class_labels': self.class_id_to_label - } - - return wandb_masks - - def _log_data_table(self): - """Log the W&B Tables for validation data as artifact and calls - `use_artifact` on it so that the evaluation table can use the reference - of already uploaded images. - - This allows the data to be uploaded just once. - """ - data_artifact = self.wandb.Artifact('val', type='dataset') - data_artifact.add(self.data_table, 'val_data') - - if not self.wandb.run.offline: - self.wandb.run.use_artifact(data_artifact) - data_artifact.wait() - self.data_table_ref = data_artifact.get('val_data') - else: - self.data_table_ref = self.data_table - - def _log_eval_table(self, idx): - """Log the W&B Tables for model evaluation. - - The table will be logged multiple times creating new version. Use this - to compare models at different intervals interactively. - """ - pred_artifact = self.wandb.Artifact( - f'run_{self.wandb.run.id}_pred', type='evaluation') - pred_artifact.add(self.eval_table, 'eval_data') - if self.by_epoch: - aliases = ['latest', f'epoch_{idx}'] - else: - aliases = ['latest', f'iter_{idx}'] - self.wandb.run.log_artifact(pred_artifact, aliases=aliases) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/yolox_lrupdater_hook.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/yolox_lrupdater_hook.py deleted file mode 100644 index ecb028ed252047dd07086eef18d3b0e5abc778c0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/yolox_lrupdater_hook.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.runner.hooks import HOOKS -from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook, - annealing_cos) - - -@HOOKS.register_module() -class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): - """YOLOX learning rate scheme. - - There are two main differences between YOLOXLrUpdaterHook - and CosineAnnealingLrUpdaterHook. - - 1. When the current running epoch is greater than - `max_epoch-last_epoch`, a fixed learning rate will be used - 2. The exp warmup scheme is different with LrUpdaterHook in MMCV - - Args: - num_last_epochs (int): The number of epochs with a fixed learning rate - before the end of the training. - """ - - def __init__(self, num_last_epochs, **kwargs): - self.num_last_epochs = num_last_epochs - super(YOLOXLrUpdaterHook, self).__init__(**kwargs) - - def get_warmup_lr(self, cur_iters): - - def _get_warmup_lr(cur_iters, regular_lr): - # exp warmup scheme - k = self.warmup_ratio * pow( - (cur_iters + 1) / float(self.warmup_iters), 2) - warmup_lr = [_lr * k for _lr in regular_lr] - return warmup_lr - - if isinstance(self.base_lr, dict): - lr_groups = {} - for key, base_lr in self.base_lr.items(): - lr_groups[key] = _get_warmup_lr(cur_iters, base_lr) - return lr_groups - else: - return _get_warmup_lr(cur_iters, self.base_lr) - - def get_lr(self, runner, base_lr): - last_iter = len(runner.data_loader) * self.num_last_epochs - - if self.by_epoch: - progress = runner.epoch - max_progress = runner.max_epochs - else: - progress = runner.iter - max_progress = runner.max_iters - - progress += 1 - - if self.min_lr_ratio is not None: - target_lr = base_lr * self.min_lr_ratio - else: - target_lr = self.min_lr - - if progress >= max_progress - last_iter: - # fixed learning rate - return target_lr - else: - return annealing_cos( - base_lr, target_lr, (progress - self.warmup_iters) / - (max_progress - self.warmup_iters - last_iter)) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/hook/yolox_mode_switch_hook.py b/cv/detection/co-detr/pytorch/mmdet/core/hook/yolox_mode_switch_hook.py deleted file mode 100644 index 10834e686af5c7f70c1f01ce1bef0c707740aea5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/hook/yolox_mode_switch_hook.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.parallel import is_module_wrapper -from mmcv.runner.hooks import HOOKS, Hook - - -@HOOKS.register_module() -class YOLOXModeSwitchHook(Hook): - """Switch the mode of YOLOX during training. - - This hook turns off the mosaic and mixup data augmentation and switches - to use L1 loss in bbox_head. - - Args: - num_last_epochs (int): The number of latter epochs in the end of the - training to close the data augmentation and switch to L1 loss. - Default: 15. - skip_type_keys (list[str], optional): Sequence of type string to be - skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp') - """ - - def __init__(self, - num_last_epochs=15, - skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')): - self.num_last_epochs = num_last_epochs - self.skip_type_keys = skip_type_keys - self._restart_dataloader = False - - def before_train_epoch(self, runner): - """Close mosaic and mixup augmentation and switches to use L1 loss.""" - epoch = runner.epoch - train_loader = runner.data_loader - model = runner.model - if is_module_wrapper(model): - model = model.module - if (epoch + 1) == runner.max_epochs - self.num_last_epochs: - runner.logger.info('No mosaic and mixup aug now!') - # The dataset pipeline cannot be updated when persistent_workers - # is True, so we need to force the dataloader's multi-process - # restart. This is a very hacky approach. - train_loader.dataset.update_skip_type_keys(self.skip_type_keys) - if hasattr(train_loader, 'persistent_workers' - ) and train_loader.persistent_workers is True: - train_loader._DataLoader__initialized = False - train_loader._iterator = None - self._restart_dataloader = True - runner.logger.info('Add additional L1 loss now!') - model.bbox_head.use_l1 = True - else: - # Once the restart is complete, we need to restore - # the initialization flag. - if self._restart_dataloader: - train_loader._DataLoader__initialized = True diff --git a/cv/detection/co-detr/pytorch/mmdet/core/mask/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/mask/__init__.py deleted file mode 100644 index 644a9b1d9b4c2a557561da6c048f9056a1090526..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/mask/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .mask_target import mask_target -from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks -from .utils import encode_mask_results, mask2bbox, split_combined_polys - -__all__ = [ - 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', - 'PolygonMasks', 'encode_mask_results', 'mask2bbox' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/mask/mask_target.py b/cv/detection/co-detr/pytorch/mmdet/core/mask/mask_target.py deleted file mode 100644 index 273e7678fc14cec9f34a88edf6d6cac6c04e30fb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/mask/mask_target.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -from torch.nn.modules.utils import _pair - - -def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, - cfg): - """Compute mask target for positive proposals in multiple images. - - Args: - pos_proposals_list (list[Tensor]): Positive proposals in multiple - images. - pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each - positive proposals. - gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of - each image. - cfg (dict): Config dict that specifies the mask size. - - Returns: - list[Tensor]: Mask target of each image. - - Example: - >>> import mmcv - >>> import mmdet - >>> from mmdet.core.mask import BitmapMasks - >>> from mmdet.core.mask.mask_target import * - >>> H, W = 17, 18 - >>> cfg = mmcv.Config({'mask_size': (13, 14)}) - >>> rng = np.random.RandomState(0) - >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image - >>> pos_proposals_list = [ - >>> torch.Tensor([ - >>> [ 7.2425, 5.5929, 13.9414, 14.9541], - >>> [ 7.3241, 3.6170, 16.3850, 15.3102], - >>> ]), - >>> torch.Tensor([ - >>> [ 4.8448, 6.4010, 7.0314, 9.7681], - >>> [ 5.9790, 2.6989, 7.4416, 4.8580], - >>> [ 0.0000, 0.0000, 0.1398, 9.8232], - >>> ]), - >>> ] - >>> # Corresponding class index for each proposal for each image - >>> pos_assigned_gt_inds_list = [ - >>> torch.LongTensor([7, 0]), - >>> torch.LongTensor([5, 4, 1]), - >>> ] - >>> # Ground truth mask for each true object for each image - >>> gt_masks_list = [ - >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), - >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), - >>> ] - >>> mask_targets = mask_target( - >>> pos_proposals_list, pos_assigned_gt_inds_list, - >>> gt_masks_list, cfg) - >>> assert mask_targets.shape == (5,) + cfg['mask_size'] - """ - cfg_list = [cfg for _ in range(len(pos_proposals_list))] - mask_targets = map(mask_target_single, pos_proposals_list, - pos_assigned_gt_inds_list, gt_masks_list, cfg_list) - mask_targets = list(mask_targets) - if len(mask_targets) > 0: - mask_targets = torch.cat(mask_targets) - return mask_targets - - -def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): - """Compute mask target for each positive proposal in the image. - - Args: - pos_proposals (Tensor): Positive proposals. - pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. - gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap - or Polygon. - cfg (dict): Config dict that indicate the mask size. - - Returns: - Tensor: Mask target of each positive proposals in the image. - - Example: - >>> import mmcv - >>> import mmdet - >>> from mmdet.core.mask import BitmapMasks - >>> from mmdet.core.mask.mask_target import * # NOQA - >>> H, W = 32, 32 - >>> cfg = mmcv.Config({'mask_size': (7, 11)}) - >>> rng = np.random.RandomState(0) - >>> # Masks for each ground truth box (relative to the image) - >>> gt_masks_data = rng.rand(3, H, W) - >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W) - >>> # Predicted positive boxes in one image - >>> pos_proposals = torch.FloatTensor([ - >>> [ 16.2, 5.5, 19.9, 20.9], - >>> [ 17.3, 13.6, 19.3, 19.3], - >>> [ 14.8, 16.4, 17.0, 23.7], - >>> [ 0.0, 0.0, 16.0, 16.0], - >>> [ 4.0, 0.0, 20.0, 16.0], - >>> ]) - >>> # For each predicted proposal, its assignment to a gt mask - >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1]) - >>> mask_targets = mask_target_single( - >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg) - >>> assert mask_targets.shape == (5,) + cfg['mask_size'] - """ - device = pos_proposals.device - mask_size = _pair(cfg.mask_size) - binarize = not cfg.get('soft_mask_target', False) - num_pos = pos_proposals.size(0) - if num_pos > 0: - proposals_np = pos_proposals.cpu().numpy() - maxh, maxw = gt_masks.height, gt_masks.width - proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) - proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) - pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() - - mask_targets = gt_masks.crop_and_resize( - proposals_np, - mask_size, - device=device, - inds=pos_assigned_gt_inds, - binarize=binarize).to_ndarray() - - mask_targets = torch.from_numpy(mask_targets).float().to(device) - else: - mask_targets = pos_proposals.new_zeros((0, ) + mask_size) - - return mask_targets diff --git a/cv/detection/co-detr/pytorch/mmdet/core/mask/structures.py b/cv/detection/co-detr/pytorch/mmdet/core/mask/structures.py deleted file mode 100644 index a9d0ebb4ba4ab97f8ab4f684795c7e4bb253557d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/mask/structures.py +++ /dev/null @@ -1,1102 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -import cv2 -import mmcv -import numpy as np -import pycocotools.mask as maskUtils -import torch -from mmcv.ops.roi_align import roi_align - - -class BaseInstanceMasks(metaclass=ABCMeta): - """Base class for instance masks.""" - - @abstractmethod - def rescale(self, scale, interpolation='nearest'): - """Rescale masks as large as possible while keeping the aspect ratio. - For details can refer to `mmcv.imrescale`. - - Args: - scale (tuple[int]): The maximum size (h, w) of rescaled mask. - interpolation (str): Same as :func:`mmcv.imrescale`. - - Returns: - BaseInstanceMasks: The rescaled masks. - """ - - @abstractmethod - def resize(self, out_shape, interpolation='nearest'): - """Resize masks to the given out_shape. - - Args: - out_shape: Target (h, w) of resized mask. - interpolation (str): See :func:`mmcv.imresize`. - - Returns: - BaseInstanceMasks: The resized masks. - """ - - @abstractmethod - def flip(self, flip_direction='horizontal'): - """Flip masks alone the given direction. - - Args: - flip_direction (str): Either 'horizontal' or 'vertical'. - - Returns: - BaseInstanceMasks: The flipped masks. - """ - - @abstractmethod - def pad(self, out_shape, pad_val): - """Pad masks to the given size of (h, w). - - Args: - out_shape (tuple[int]): Target (h, w) of padded mask. - pad_val (int): The padded value. - - Returns: - BaseInstanceMasks: The padded masks. - """ - - @abstractmethod - def crop(self, bbox): - """Crop each mask by the given bbox. - - Args: - bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). - - Return: - BaseInstanceMasks: The cropped masks. - """ - - @abstractmethod - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device, - interpolation='bilinear', - binarize=True): - """Crop and resize masks by the given bboxes. - - This function is mainly used in mask targets computation. - It firstly align mask to bboxes by assigned_inds, then crop mask by the - assigned bbox and resize to the size of (mask_h, mask_w) - - Args: - bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) - out_shape (tuple[int]): Target (h, w) of resized mask - inds (ndarray): Indexes to assign masks to each bbox, - shape (N,) and values should be between [0, num_masks - 1]. - device (str): Device of bboxes - interpolation (str): See `mmcv.imresize` - binarize (bool): if True fractional values are rounded to 0 or 1 - after the resize operation. if False and unsupported an error - will be raised. Defaults to True. - - Return: - BaseInstanceMasks: the cropped and resized masks. - """ - - @abstractmethod - def expand(self, expanded_h, expanded_w, top, left): - """see :class:`Expand`.""" - - @property - @abstractmethod - def areas(self): - """ndarray: areas of each instance.""" - - @abstractmethod - def to_ndarray(self): - """Convert masks to the format of ndarray. - - Return: - ndarray: Converted masks in the format of ndarray. - """ - - @abstractmethod - def to_tensor(self, dtype, device): - """Convert masks to the format of Tensor. - - Args: - dtype (str): Dtype of converted mask. - device (torch.device): Device of converted masks. - - Returns: - Tensor: Converted masks in the format of Tensor. - """ - - @abstractmethod - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=0, - interpolation='bilinear'): - """Translate the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - offset (int | float): The offset for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - fill_val (int | float): Border value. Default 0. - interpolation (str): Same as :func:`mmcv.imtranslate`. - - Returns: - Translated masks. - """ - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Shear the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - magnitude (int | float): The magnitude used for shear. - direction (str): The shear direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. Default 0. - interpolation (str): Same as in :func:`mmcv.imshear`. - - Returns: - ndarray: Sheared masks. - """ - - @abstractmethod - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """Rotate the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - angle (int | float): Rotation angle in degrees. Positive values - mean counter-clockwise rotation. - center (tuple[float], optional): Center point (w, h) of the - rotation in source image. If not specified, the center of - the image will be used. - scale (int | float): Isotropic scale factor. - fill_val (int | float): Border value. Default 0 for masks. - - Returns: - Rotated masks. - """ - - -class BitmapMasks(BaseInstanceMasks): - """This class represents masks in the form of bitmaps. - - Args: - masks (ndarray): ndarray of masks in shape (N, H, W), where N is - the number of objects. - height (int): height of masks - width (int): width of masks - - Example: - >>> from mmdet.core.mask.structures import * # NOQA - >>> num_masks, H, W = 3, 32, 32 - >>> rng = np.random.RandomState(0) - >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int) - >>> self = BitmapMasks(masks, height=H, width=W) - - >>> # demo crop_and_resize - >>> num_boxes = 5 - >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) - >>> out_shape = (14, 14) - >>> inds = torch.randint(0, len(self), size=(num_boxes,)) - >>> device = 'cpu' - >>> interpolation = 'bilinear' - >>> new = self.crop_and_resize( - ... bboxes, out_shape, inds, device, interpolation) - >>> assert len(new) == num_boxes - >>> assert new.height, new.width == out_shape - """ - - def __init__(self, masks, height, width): - self.height = height - self.width = width - if len(masks) == 0: - self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) - else: - assert isinstance(masks, (list, np.ndarray)) - if isinstance(masks, list): - assert isinstance(masks[0], np.ndarray) - assert masks[0].ndim == 2 # (H, W) - else: - assert masks.ndim == 3 # (N, H, W) - - self.masks = np.stack(masks).reshape(-1, height, width) - assert self.masks.shape[1] == self.height - assert self.masks.shape[2] == self.width - - def __getitem__(self, index): - """Index the BitmapMask. - - Args: - index (int | ndarray): Indices in the format of integer or ndarray. - - Returns: - :obj:`BitmapMasks`: Indexed bitmap masks. - """ - masks = self.masks[index].reshape(-1, self.height, self.width) - return BitmapMasks(masks, self.height, self.width) - - def __iter__(self): - return iter(self.masks) - - def __repr__(self): - s = self.__class__.__name__ + '(' - s += f'num_masks={len(self.masks)}, ' - s += f'height={self.height}, ' - s += f'width={self.width})' - return s - - def __len__(self): - """Number of masks.""" - return len(self.masks) - - def rescale(self, scale, interpolation='nearest'): - """See :func:`BaseInstanceMasks.rescale`.""" - if len(self.masks) == 0: - new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) - rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) - else: - rescaled_masks = np.stack([ - mmcv.imrescale(mask, scale, interpolation=interpolation) - for mask in self.masks - ]) - height, width = rescaled_masks.shape[1:] - return BitmapMasks(rescaled_masks, height, width) - - def resize(self, out_shape, interpolation='nearest'): - """See :func:`BaseInstanceMasks.resize`.""" - if len(self.masks) == 0: - resized_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - resized_masks = np.stack([ - mmcv.imresize( - mask, out_shape[::-1], interpolation=interpolation) - for mask in self.masks - ]) - return BitmapMasks(resized_masks, *out_shape) - - def flip(self, flip_direction='horizontal'): - """See :func:`BaseInstanceMasks.flip`.""" - assert flip_direction in ('horizontal', 'vertical', 'diagonal') - - if len(self.masks) == 0: - flipped_masks = self.masks - else: - flipped_masks = np.stack([ - mmcv.imflip(mask, direction=flip_direction) - for mask in self.masks - ]) - return BitmapMasks(flipped_masks, self.height, self.width) - - def pad(self, out_shape, pad_val=0): - """See :func:`BaseInstanceMasks.pad`.""" - if len(self.masks) == 0: - padded_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - padded_masks = np.stack([ - mmcv.impad(mask, shape=out_shape, pad_val=pad_val) - for mask in self.masks - ]) - return BitmapMasks(padded_masks, *out_shape) - - def crop(self, bbox): - """See :func:`BaseInstanceMasks.crop`.""" - assert isinstance(bbox, np.ndarray) - assert bbox.ndim == 1 - - # clip the boundary - bbox = bbox.copy() - bbox[0::2] = np.clip(bbox[0::2], 0, self.width) - bbox[1::2] = np.clip(bbox[1::2], 0, self.height) - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - - if len(self.masks) == 0: - cropped_masks = np.empty((0, h, w), dtype=np.uint8) - else: - cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] - return BitmapMasks(cropped_masks, h, w) - - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device='cpu', - interpolation='bilinear', - binarize=True): - """See :func:`BaseInstanceMasks.crop_and_resize`.""" - if len(self.masks) == 0: - empty_masks = np.empty((0, *out_shape), dtype=np.uint8) - return BitmapMasks(empty_masks, *out_shape) - - # convert bboxes to tensor - if isinstance(bboxes, np.ndarray): - bboxes = torch.from_numpy(bboxes).to(device=device) - if isinstance(inds, np.ndarray): - inds = torch.from_numpy(inds).to(device=device) - - num_bbox = bboxes.shape[0] - fake_inds = torch.arange( - num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] - rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 - rois = rois.to(device=device) - if num_bbox > 0: - gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( - 0, inds).to(dtype=rois.dtype) - targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, - 1.0, 0, 'avg', True).squeeze(1) - if binarize: - resized_masks = (targets >= 0.5).cpu().numpy() - else: - resized_masks = targets.cpu().numpy() - else: - resized_masks = [] - return BitmapMasks(resized_masks, *out_shape) - - def expand(self, expanded_h, expanded_w, top, left): - """See :func:`BaseInstanceMasks.expand`.""" - if len(self.masks) == 0: - expanded_mask = np.empty((0, expanded_h, expanded_w), - dtype=np.uint8) - else: - expanded_mask = np.zeros((len(self), expanded_h, expanded_w), - dtype=np.uint8) - expanded_mask[:, top:top + self.height, - left:left + self.width] = self.masks - return BitmapMasks(expanded_mask, expanded_h, expanded_w) - - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=0, - interpolation='bilinear'): - """Translate the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - offset (int | float): The offset for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - fill_val (int | float): Border value. Default 0 for masks. - interpolation (str): Same as :func:`mmcv.imtranslate`. - - Returns: - BitmapMasks: Translated BitmapMasks. - - Example: - >>> from mmdet.core.mask.structures import BitmapMasks - >>> self = BitmapMasks.random(dtype=np.uint8) - >>> out_shape = (32, 32) - >>> offset = 4 - >>> direction = 'horizontal' - >>> fill_val = 0 - >>> interpolation = 'bilinear' - >>> # Note, There seem to be issues when: - >>> # * out_shape is different than self's shape - >>> # * the mask dtype is not supported by cv2.AffineWarp - >>> new = self.translate(out_shape, offset, direction, fill_val, - >>> interpolation) - >>> assert len(new) == len(self) - >>> assert new.height, new.width == out_shape - """ - if len(self.masks) == 0: - translated_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - translated_masks = mmcv.imtranslate( - self.masks.transpose((1, 2, 0)), - offset, - direction, - border_value=fill_val, - interpolation=interpolation) - if translated_masks.ndim == 2: - translated_masks = translated_masks[:, :, None] - translated_masks = translated_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(translated_masks, *out_shape) - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Shear the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - magnitude (int | float): The magnitude used for shear. - direction (str): The shear direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. - interpolation (str): Same as in :func:`mmcv.imshear`. - - Returns: - BitmapMasks: The sheared masks. - """ - if len(self.masks) == 0: - sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - sheared_masks = mmcv.imshear( - self.masks.transpose((1, 2, 0)), - magnitude, - direction, - border_value=border_value, - interpolation=interpolation) - if sheared_masks.ndim == 2: - sheared_masks = sheared_masks[:, :, None] - sheared_masks = sheared_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(sheared_masks, *out_shape) - - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """Rotate the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - angle (int | float): Rotation angle in degrees. Positive values - mean counter-clockwise rotation. - center (tuple[float], optional): Center point (w, h) of the - rotation in source image. If not specified, the center of - the image will be used. - scale (int | float): Isotropic scale factor. - fill_val (int | float): Border value. Default 0 for masks. - - Returns: - BitmapMasks: Rotated BitmapMasks. - """ - if len(self.masks) == 0: - rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) - else: - rotated_masks = mmcv.imrotate( - self.masks.transpose((1, 2, 0)), - angle, - center=center, - scale=scale, - border_value=fill_val) - if rotated_masks.ndim == 2: - # case when only one mask, (h, w) - rotated_masks = rotated_masks[:, :, None] # (h, w, 1) - rotated_masks = rotated_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(rotated_masks, *out_shape) - - @property - def areas(self): - """See :py:attr:`BaseInstanceMasks.areas`.""" - return self.masks.sum((1, 2)) - - def to_ndarray(self): - """See :func:`BaseInstanceMasks.to_ndarray`.""" - return self.masks - - def to_tensor(self, dtype, device): - """See :func:`BaseInstanceMasks.to_tensor`.""" - return torch.tensor(self.masks, dtype=dtype, device=device) - - @classmethod - def random(cls, - num_masks=3, - height=32, - width=32, - dtype=np.uint8, - rng=None): - """Generate random bitmap masks for demo / testing purposes. - - Example: - >>> from mmdet.core.mask.structures import BitmapMasks - >>> self = BitmapMasks.random() - >>> print('self = {}'.format(self)) - self = BitmapMasks(num_masks=3, height=32, width=32) - """ - from mmdet.utils.util_random import ensure_rng - rng = ensure_rng(rng) - masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) - self = cls(masks, height=height, width=width) - return self - - def get_bboxes(self): - num_masks = len(self) - boxes = np.zeros((num_masks, 4), dtype=np.float32) - x_any = self.masks.any(axis=1) - y_any = self.masks.any(axis=2) - for idx in range(num_masks): - x = np.where(x_any[idx, :])[0] - y = np.where(y_any[idx, :])[0] - if len(x) > 0 and len(y) > 0: - # use +1 for x_max and y_max so that the right and bottom - # boundary of instance masks are fully included by the box - boxes[idx, :] = np.array([x[0], y[0], x[-1] + 1, y[-1] + 1], - dtype=np.float32) - return boxes - - -class PolygonMasks(BaseInstanceMasks): - """This class represents masks in the form of polygons. - - Polygons is a list of three levels. The first level of the list - corresponds to objects, the second level to the polys that compose the - object, the third level to the poly coordinates - - Args: - masks (list[list[ndarray]]): The first level of the list - corresponds to objects, the second level to the polys that - compose the object, the third level to the poly coordinates - height (int): height of masks - width (int): width of masks - - Example: - >>> from mmdet.core.mask.structures import * # NOQA - >>> masks = [ - >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] - >>> ] - >>> height, width = 16, 16 - >>> self = PolygonMasks(masks, height, width) - - >>> # demo translate - >>> new = self.translate((16, 16), 4., direction='horizontal') - >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) - >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) - - >>> # demo crop_and_resize - >>> num_boxes = 3 - >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) - >>> out_shape = (16, 16) - >>> inds = torch.randint(0, len(self), size=(num_boxes,)) - >>> device = 'cpu' - >>> interpolation = 'bilinear' - >>> new = self.crop_and_resize( - ... bboxes, out_shape, inds, device, interpolation) - >>> assert len(new) == num_boxes - >>> assert new.height, new.width == out_shape - """ - - def __init__(self, masks, height, width): - assert isinstance(masks, list) - if len(masks) > 0: - assert isinstance(masks[0], list) - assert isinstance(masks[0][0], np.ndarray) - - self.height = height - self.width = width - self.masks = masks - - def __getitem__(self, index): - """Index the polygon masks. - - Args: - index (ndarray | List): The indices. - - Returns: - :obj:`PolygonMasks`: The indexed polygon masks. - """ - if isinstance(index, np.ndarray): - index = index.tolist() - if isinstance(index, list): - masks = [self.masks[i] for i in index] - else: - try: - masks = self.masks[index] - except Exception: - raise ValueError( - f'Unsupported input of type {type(index)} for indexing!') - if len(masks) and isinstance(masks[0], np.ndarray): - masks = [masks] # ensure a list of three levels - return PolygonMasks(masks, self.height, self.width) - - def __iter__(self): - return iter(self.masks) - - def __repr__(self): - s = self.__class__.__name__ + '(' - s += f'num_masks={len(self.masks)}, ' - s += f'height={self.height}, ' - s += f'width={self.width})' - return s - - def __len__(self): - """Number of masks.""" - return len(self.masks) - - def rescale(self, scale, interpolation=None): - """see :func:`BaseInstanceMasks.rescale`""" - new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) - if len(self.masks) == 0: - rescaled_masks = PolygonMasks([], new_h, new_w) - else: - rescaled_masks = self.resize((new_h, new_w)) - return rescaled_masks - - def resize(self, out_shape, interpolation=None): - """see :func:`BaseInstanceMasks.resize`""" - if len(self.masks) == 0: - resized_masks = PolygonMasks([], *out_shape) - else: - h_scale = out_shape[0] / self.height - w_scale = out_shape[1] / self.width - resized_masks = [] - for poly_per_obj in self.masks: - resized_poly = [] - for p in poly_per_obj: - p = p.copy() - p[0::2] = p[0::2] * w_scale - p[1::2] = p[1::2] * h_scale - resized_poly.append(p) - resized_masks.append(resized_poly) - resized_masks = PolygonMasks(resized_masks, *out_shape) - return resized_masks - - def flip(self, flip_direction='horizontal'): - """see :func:`BaseInstanceMasks.flip`""" - assert flip_direction in ('horizontal', 'vertical', 'diagonal') - if len(self.masks) == 0: - flipped_masks = PolygonMasks([], self.height, self.width) - else: - flipped_masks = [] - for poly_per_obj in self.masks: - flipped_poly_per_obj = [] - for p in poly_per_obj: - p = p.copy() - if flip_direction == 'horizontal': - p[0::2] = self.width - p[0::2] - elif flip_direction == 'vertical': - p[1::2] = self.height - p[1::2] - else: - p[0::2] = self.width - p[0::2] - p[1::2] = self.height - p[1::2] - flipped_poly_per_obj.append(p) - flipped_masks.append(flipped_poly_per_obj) - flipped_masks = PolygonMasks(flipped_masks, self.height, - self.width) - return flipped_masks - - def crop(self, bbox): - """see :func:`BaseInstanceMasks.crop`""" - assert isinstance(bbox, np.ndarray) - assert bbox.ndim == 1 - - # clip the boundary - bbox = bbox.copy() - bbox[0::2] = np.clip(bbox[0::2], 0, self.width) - bbox[1::2] = np.clip(bbox[1::2], 0, self.height) - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - - if len(self.masks) == 0: - cropped_masks = PolygonMasks([], h, w) - else: - cropped_masks = [] - for poly_per_obj in self.masks: - cropped_poly_per_obj = [] - for p in poly_per_obj: - # pycocotools will clip the boundary - p = p.copy() - p[0::2] = p[0::2] - bbox[0] - p[1::2] = p[1::2] - bbox[1] - cropped_poly_per_obj.append(p) - cropped_masks.append(cropped_poly_per_obj) - cropped_masks = PolygonMasks(cropped_masks, h, w) - return cropped_masks - - def pad(self, out_shape, pad_val=0): - """padding has no effect on polygons`""" - return PolygonMasks(self.masks, *out_shape) - - def expand(self, *args, **kwargs): - """TODO: Add expand for polygon""" - raise NotImplementedError - - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device='cpu', - interpolation='bilinear', - binarize=True): - """see :func:`BaseInstanceMasks.crop_and_resize`""" - out_h, out_w = out_shape - if len(self.masks) == 0: - return PolygonMasks([], out_h, out_w) - - if not binarize: - raise ValueError('Polygons are always binary, ' - 'setting binarize=False is unsupported') - - resized_masks = [] - for i in range(len(bboxes)): - mask = self.masks[inds[i]] - bbox = bboxes[i, :] - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - h_scale = out_h / max(h, 0.1) # avoid too large scale - w_scale = out_w / max(w, 0.1) - - resized_mask = [] - for p in mask: - p = p.copy() - # crop - # pycocotools will clip the boundary - p[0::2] = p[0::2] - bbox[0] - p[1::2] = p[1::2] - bbox[1] - - # resize - p[0::2] = p[0::2] * w_scale - p[1::2] = p[1::2] * h_scale - resized_mask.append(p) - resized_masks.append(resized_mask) - return PolygonMasks(resized_masks, *out_shape) - - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=None, - interpolation=None): - """Translate the PolygonMasks. - - Example: - >>> self = PolygonMasks.random(dtype=np.int) - >>> out_shape = (self.height, self.width) - >>> new = self.translate(out_shape, 4., direction='horizontal') - >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) - >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 - """ - assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ - f'used, and defaultly should be None or 0. got {fill_val}.' - if len(self.masks) == 0: - translated_masks = PolygonMasks([], *out_shape) - else: - translated_masks = [] - for poly_per_obj in self.masks: - translated_poly_per_obj = [] - for p in poly_per_obj: - p = p.copy() - if direction == 'horizontal': - p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) - elif direction == 'vertical': - p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) - translated_poly_per_obj.append(p) - translated_masks.append(translated_poly_per_obj) - translated_masks = PolygonMasks(translated_masks, *out_shape) - return translated_masks - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """See :func:`BaseInstanceMasks.shear`.""" - if len(self.masks) == 0: - sheared_masks = PolygonMasks([], *out_shape) - else: - sheared_masks = [] - if direction == 'horizontal': - shear_matrix = np.stack([[1, magnitude], - [0, 1]]).astype(np.float32) - elif direction == 'vertical': - shear_matrix = np.stack([[1, 0], [magnitude, - 1]]).astype(np.float32) - for poly_per_obj in self.masks: - sheared_poly = [] - for p in poly_per_obj: - p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] - new_coords = np.matmul(shear_matrix, p) # [2, n] - new_coords[0, :] = np.clip(new_coords[0, :], 0, - out_shape[1]) - new_coords[1, :] = np.clip(new_coords[1, :], 0, - out_shape[0]) - sheared_poly.append( - new_coords.transpose((1, 0)).reshape(-1)) - sheared_masks.append(sheared_poly) - sheared_masks = PolygonMasks(sheared_masks, *out_shape) - return sheared_masks - - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """See :func:`BaseInstanceMasks.rotate`.""" - if len(self.masks) == 0: - rotated_masks = PolygonMasks([], *out_shape) - else: - rotated_masks = [] - rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) - for poly_per_obj in self.masks: - rotated_poly = [] - for p in poly_per_obj: - p = p.copy() - coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] - # pad 1 to convert from format [x, y] to homogeneous - # coordinates format [x, y, 1] - coords = np.concatenate( - (coords, np.ones((coords.shape[0], 1), coords.dtype)), - axis=1) # [n, 3] - rotated_coords = np.matmul( - rotate_matrix[None, :, :], - coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] - rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, - out_shape[1]) - rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, - out_shape[0]) - rotated_poly.append(rotated_coords.reshape(-1)) - rotated_masks.append(rotated_poly) - rotated_masks = PolygonMasks(rotated_masks, *out_shape) - return rotated_masks - - def to_bitmap(self): - """convert polygon masks to bitmap masks.""" - bitmap_masks = self.to_ndarray() - return BitmapMasks(bitmap_masks, self.height, self.width) - - @property - def areas(self): - """Compute areas of masks. - - This func is modified from `detectron2 - `_. - The function only works with Polygons using the shoelace formula. - - Return: - ndarray: areas of each instance - """ # noqa: W501 - area = [] - for polygons_per_obj in self.masks: - area_per_obj = 0 - for p in polygons_per_obj: - area_per_obj += self._polygon_area(p[0::2], p[1::2]) - area.append(area_per_obj) - return np.asarray(area) - - def _polygon_area(self, x, y): - """Compute the area of a component of a polygon. - - Using the shoelace formula: - https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates - - Args: - x (ndarray): x coordinates of the component - y (ndarray): y coordinates of the component - - Return: - float: the are of the component - """ # noqa: 501 - return 0.5 * np.abs( - np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) - - def to_ndarray(self): - """Convert masks to the format of ndarray.""" - if len(self.masks) == 0: - return np.empty((0, self.height, self.width), dtype=np.uint8) - bitmap_masks = [] - for poly_per_obj in self.masks: - bitmap_masks.append( - polygon_to_bitmap(poly_per_obj, self.height, self.width)) - return np.stack(bitmap_masks) - - def to_tensor(self, dtype, device): - """See :func:`BaseInstanceMasks.to_tensor`.""" - if len(self.masks) == 0: - return torch.empty((0, self.height, self.width), - dtype=dtype, - device=device) - ndarray_masks = self.to_ndarray() - return torch.tensor(ndarray_masks, dtype=dtype, device=device) - - @classmethod - def random(cls, - num_masks=3, - height=32, - width=32, - n_verts=5, - dtype=np.float32, - rng=None): - """Generate random polygon masks for demo / testing purposes. - - Adapted from [1]_ - - References: - .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 - - Example: - >>> from mmdet.core.mask.structures import PolygonMasks - >>> self = PolygonMasks.random() - >>> print('self = {}'.format(self)) - """ - from mmdet.utils.util_random import ensure_rng - rng = ensure_rng(rng) - - def _gen_polygon(n, irregularity, spikeyness): - """Creates the polygon by sampling points on a circle around the - centre. Random noise is added by varying the angular spacing - between sequential points, and by varying the radial distance of - each point from the centre. - - Based on original code by Mike Ounsworth - - Args: - n (int): number of vertices - irregularity (float): [0,1] indicating how much variance there - is in the angular spacing of vertices. [0,1] will map to - [0, 2pi/numberOfVerts] - spikeyness (float): [0,1] indicating how much variance there is - in each vertex from the circle of radius aveRadius. [0,1] - will map to [0, aveRadius] - - Returns: - a list of vertices, in CCW order. - """ - from scipy.stats import truncnorm - - # Generate around the unit circle - cx, cy = (0.0, 0.0) - radius = 1 - - tau = np.pi * 2 - - irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n - spikeyness = np.clip(spikeyness, 1e-9, 1) - - # generate n angle steps - lower = (tau / n) - irregularity - upper = (tau / n) + irregularity - angle_steps = rng.uniform(lower, upper, n) - - # normalize the steps so that point 0 and point n+1 are the same - k = angle_steps.sum() / (2 * np.pi) - angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) - - # Convert high and low values to be wrt the standard normal range - # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html - low = 0 - high = 2 * radius - mean = radius - std = spikeyness - a = (low - mean) / std - b = (high - mean) / std - tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) - - # now generate the points - radii = tnorm.rvs(n, random_state=rng) - x_pts = cx + radii * np.cos(angles) - y_pts = cy + radii * np.sin(angles) - - points = np.hstack([x_pts[:, None], y_pts[:, None]]) - - # Scale to 0-1 space - points = points - points.min(axis=0) - points = points / points.max(axis=0) - - # Randomly place within 0-1 space - points = points * (rng.rand() * .8 + .2) - min_pt = points.min(axis=0) - max_pt = points.max(axis=0) - - high = (1 - max_pt) - low = (0 - min_pt) - offset = (rng.rand(2) * (high - low)) + low - points = points + offset - return points - - def _order_vertices(verts): - """ - References: - https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise - """ - mlat = verts.T[0].sum() / len(verts) - mlng = verts.T[1].sum() / len(verts) - - tau = np.pi * 2 - angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + - tau) % tau - sortx = angle.argsort() - verts = verts.take(sortx, axis=0) - return verts - - # Generate a random exterior for each requested mask - masks = [] - for _ in range(num_masks): - exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) - exterior = (exterior * [(width, height)]).astype(dtype) - masks.append([exterior.ravel()]) - - self = cls(masks, height, width) - return self - - def get_bboxes(self): - num_masks = len(self) - boxes = np.zeros((num_masks, 4), dtype=np.float32) - for idx, poly_per_obj in enumerate(self.masks): - # simply use a number that is big enough for comparison with - # coordinates - xy_min = np.array([self.width * 2, self.height * 2], - dtype=np.float32) - xy_max = np.zeros(2, dtype=np.float32) - for p in poly_per_obj: - xy = np.array(p).reshape(-1, 2).astype(np.float32) - xy_min = np.minimum(xy_min, np.min(xy, axis=0)) - xy_max = np.maximum(xy_max, np.max(xy, axis=0)) - boxes[idx, :2] = xy_min - boxes[idx, 2:] = xy_max - - return boxes - - -def polygon_to_bitmap(polygons, height, width): - """Convert masks from the form of polygons to bitmaps. - - Args: - polygons (list[ndarray]): masks in polygon representation - height (int): mask height - width (int): mask width - - Return: - ndarray: the converted masks in bitmap representation - """ - rles = maskUtils.frPyObjects(polygons, height, width) - rle = maskUtils.merge(rles) - bitmap_mask = maskUtils.decode(rle).astype(np.bool) - return bitmap_mask - - -def bitmap_to_polygon(bitmap): - """Convert masks from the form of bitmaps to polygons. - - Args: - bitmap (ndarray): masks in bitmap representation. - - Return: - list[ndarray]: the converted mask in polygon representation. - bool: whether the mask has holes. - """ - bitmap = np.ascontiguousarray(bitmap).astype(np.uint8) - # cv2.RETR_CCOMP: retrieves all of the contours and organizes them - # into a two-level hierarchy. At the top level, there are external - # boundaries of the components. At the second level, there are - # boundaries of the holes. If there is another contour inside a hole - # of a connected component, it is still put at the top level. - # cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points. - outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) - contours = outs[-2] - hierarchy = outs[-1] - if hierarchy is None: - return [], False - # hierarchy[i]: 4 elements, for the indexes of next, previous, - # parent, or nested contours. If there is no corresponding contour, - # it will be -1. - with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any() - contours = [c.reshape(-1, 2) for c in contours] - return contours, with_hole diff --git a/cv/detection/co-detr/pytorch/mmdet/core/mask/utils.py b/cv/detection/co-detr/pytorch/mmdet/core/mask/utils.py deleted file mode 100644 index 90544b34f49aa60ac2a1abae10f1a89cc9fe43f0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/mask/utils.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np -import pycocotools.mask as mask_util -import torch - - -def split_combined_polys(polys, poly_lens, polys_per_mask): - """Split the combined 1-D polys into masks. - - A mask is represented as a list of polys, and a poly is represented as - a 1-D array. In dataset, all masks are concatenated into a single 1-D - tensor. Here we need to split the tensor into original representations. - - Args: - polys (list): a list (length = image num) of 1-D tensors - poly_lens (list): a list (length = image num) of poly length - polys_per_mask (list): a list (length = image num) of poly number - of each mask - - Returns: - list: a list (length = image num) of list (length = mask num) of \ - list (length = poly num) of numpy array. - """ - mask_polys_list = [] - for img_id in range(len(polys)): - polys_single = polys[img_id] - polys_lens_single = poly_lens[img_id].tolist() - polys_per_mask_single = polys_per_mask[img_id].tolist() - - split_polys = mmcv.slice_list(polys_single, polys_lens_single) - mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) - mask_polys_list.append(mask_polys) - return mask_polys_list - - -# TODO: move this function to more proper place -def encode_mask_results(mask_results): - """Encode bitmap mask to RLE code. - - Args: - mask_results (list | tuple[list]): bitmap mask results. - In mask scoring rcnn, mask_results is a tuple of (segm_results, - segm_cls_score). - - Returns: - list | tuple: RLE encoded mask. - """ - if isinstance(mask_results, tuple): # mask scoring - cls_segms, cls_mask_scores = mask_results - else: - cls_segms = mask_results - num_classes = len(cls_segms) - encoded_mask_results = [[] for _ in range(num_classes)] - for i in range(len(cls_segms)): - for cls_segm in cls_segms[i]: - encoded_mask_results[i].append( - mask_util.encode( - np.array( - cls_segm[:, :, np.newaxis], order='F', - dtype='uint8'))[0]) # encoded with RLE - if isinstance(mask_results, tuple): - return encoded_mask_results, cls_mask_scores - else: - return encoded_mask_results - - -def mask2bbox(masks): - """Obtain tight bounding boxes of binary masks. - - Args: - masks (Tensor): Binary mask of shape (n, h, w). - - Returns: - Tensor: Bboxe with shape (n, 4) of \ - positive region in binary mask. - """ - N = masks.shape[0] - bboxes = masks.new_zeros((N, 4), dtype=torch.float32) - x_any = torch.any(masks, dim=1) - y_any = torch.any(masks, dim=2) - for i in range(N): - x = torch.where(x_any[i, :])[0] - y = torch.where(y_any[i, :])[0] - if len(x) > 0 and len(y) > 0: - bboxes[i, :] = bboxes.new_tensor( - [x[0], y[0], x[-1] + 1, y[-1] + 1]) - - return bboxes diff --git a/cv/detection/co-detr/pytorch/mmdet/core/optimizers/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/optimizers/__init__.py deleted file mode 100644 index e867d0761cb54a6f228a0fb3e0560dea67b67881..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/optimizers/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import OPTIMIZER_BUILDERS, build_optimizer -from .layer_decay_optimizer_constructor import \ - LearningRateDecayOptimizerConstructor - -__all__ = [ - 'LearningRateDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS', - 'build_optimizer' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/optimizers/builder.py b/cv/detection/co-detr/pytorch/mmdet/core/optimizers/builder.py deleted file mode 100644 index 406dd9b4b7027e9c2254b0d18cf0c80a7161912b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/optimizers/builder.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -from mmcv.runner.optimizer import OPTIMIZER_BUILDERS as MMCV_OPTIMIZER_BUILDERS -from mmcv.utils import Registry, build_from_cfg - -OPTIMIZER_BUILDERS = Registry( - 'optimizer builder', parent=MMCV_OPTIMIZER_BUILDERS) - - -def build_optimizer_constructor(cfg): - constructor_type = cfg.get('type') - if constructor_type in OPTIMIZER_BUILDERS: - return build_from_cfg(cfg, OPTIMIZER_BUILDERS) - elif constructor_type in MMCV_OPTIMIZER_BUILDERS: - return build_from_cfg(cfg, MMCV_OPTIMIZER_BUILDERS) - else: - raise KeyError(f'{constructor_type} is not registered ' - 'in the optimizer builder registry.') - - -def build_optimizer(model, cfg): - optimizer_cfg = copy.deepcopy(cfg) - constructor_type = optimizer_cfg.pop('constructor', - 'DefaultOptimizerConstructor') - paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) - optim_constructor = build_optimizer_constructor( - dict( - type=constructor_type, - optimizer_cfg=optimizer_cfg, - paramwise_cfg=paramwise_cfg)) - optimizer = optim_constructor(model) - return optimizer diff --git a/cv/detection/co-detr/pytorch/mmdet/core/optimizers/layer_decay_optimizer_constructor.py b/cv/detection/co-detr/pytorch/mmdet/core/optimizers/layer_decay_optimizer_constructor.py deleted file mode 100644 index 1bc3469e8884a7a1f0a154ab859b8079575b56ff..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/optimizers/layer_decay_optimizer_constructor.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json - -from mmcv.runner import DefaultOptimizerConstructor, get_dist_info - -from mmdet.utils import get_root_logger -from .builder import OPTIMIZER_BUILDERS - - -def get_layer_id_for_convnext(var_name, max_layer_id): - """Get the layer id to set the different learning rates in ``layer_wise`` - decay_type. - - Args: - var_name (str): The key of the model. - max_layer_id (int): Maximum layer id. - - Returns: - int: The id number corresponding to different learning rate in - ``LearningRateDecayOptimizerConstructor``. - """ - - if var_name in ('backbone.cls_token', 'backbone.mask_token', - 'backbone.pos_embed'): - return 0 - elif var_name.startswith('backbone.downsample_layers'): - stage_id = int(var_name.split('.')[2]) - if stage_id == 0: - layer_id = 0 - elif stage_id == 1: - layer_id = 2 - elif stage_id == 2: - layer_id = 3 - elif stage_id == 3: - layer_id = max_layer_id - return layer_id - elif var_name.startswith('backbone.stages'): - stage_id = int(var_name.split('.')[2]) - block_id = int(var_name.split('.')[3]) - if stage_id == 0: - layer_id = 1 - elif stage_id == 1: - layer_id = 2 - elif stage_id == 2: - layer_id = 3 + block_id // 3 - elif stage_id == 3: - layer_id = max_layer_id - return layer_id - else: - return max_layer_id + 1 - - -def get_stage_id_for_convnext(var_name, max_stage_id): - """Get the stage id to set the different learning rates in ``stage_wise`` - decay_type. - - Args: - var_name (str): The key of the model. - max_stage_id (int): Maximum stage id. - - Returns: - int: The id number corresponding to different learning rate in - ``LearningRateDecayOptimizerConstructor``. - """ - - if var_name in ('backbone.cls_token', 'backbone.mask_token', - 'backbone.pos_embed'): - return 0 - elif var_name.startswith('backbone.downsample_layers'): - return 0 - elif var_name.startswith('backbone.stages'): - stage_id = int(var_name.split('.')[2]) - return stage_id + 1 - else: - return max_stage_id - 1 - - -@OPTIMIZER_BUILDERS.register_module() -class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor): - # Different learning rates are set for different layers of backbone. - # Note: Currently, this optimizer constructor is built for ConvNeXt. - - def add_params(self, params, module, **kwargs): - """Add all parameters of module to the params list. - - The parameters of the given module will be added to the list of param - groups, with specific rules defined by paramwise_cfg. - - Args: - params (list[dict]): A list of param groups, it will be modified - in place. - module (nn.Module): The module to be added. - """ - logger = get_root_logger() - - parameter_groups = {} - logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') - num_layers = self.paramwise_cfg.get('num_layers') + 2 - decay_rate = self.paramwise_cfg.get('decay_rate') - decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') - logger.info('Build LearningRateDecayOptimizerConstructor ' - f'{decay_type} {decay_rate} - {num_layers}') - weight_decay = self.base_wd - for name, param in module.named_parameters(): - if not param.requires_grad: - continue # frozen weights - if len(param.shape) == 1 or name.endswith('.bias') or name in ( - 'pos_embed', 'cls_token'): - group_name = 'no_decay' - this_weight_decay = 0. - else: - group_name = 'decay' - this_weight_decay = weight_decay - if 'layer_wise' in decay_type: - if 'ConvNeXt' in module.backbone.__class__.__name__: - layer_id = get_layer_id_for_convnext( - name, self.paramwise_cfg.get('num_layers')) - logger.info(f'set param {name} as id {layer_id}') - else: - raise NotImplementedError() - elif decay_type == 'stage_wise': - if 'ConvNeXt' in module.backbone.__class__.__name__: - layer_id = get_stage_id_for_convnext(name, num_layers) - logger.info(f'set param {name} as id {layer_id}') - else: - raise NotImplementedError() - group_name = f'layer_{layer_id}_{group_name}' - - if group_name not in parameter_groups: - scale = decay_rate**(num_layers - layer_id - 1) - - parameter_groups[group_name] = { - 'weight_decay': this_weight_decay, - 'params': [], - 'param_names': [], - 'lr_scale': scale, - 'group_name': group_name, - 'lr': scale * self.base_lr, - } - - parameter_groups[group_name]['params'].append(param) - parameter_groups[group_name]['param_names'].append(name) - rank, _ = get_dist_info() - if rank == 0: - to_display = {} - for key in parameter_groups: - to_display[key] = { - 'param_names': parameter_groups[key]['param_names'], - 'lr_scale': parameter_groups[key]['lr_scale'], - 'lr': parameter_groups[key]['lr'], - 'weight_decay': parameter_groups[key]['weight_decay'], - } - logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') - params.extend(parameter_groups.values()) diff --git a/cv/detection/co-detr/pytorch/mmdet/core/post_processing/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/post_processing/__init__.py deleted file mode 100644 index 00376bd49ebf75d53c10a26ff810362917bae81c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/post_processing/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .bbox_nms import fast_nms, multiclass_nms -from .matrix_nms import mask_matrix_nms -from .merge_augs import (merge_aug_bboxes, merge_aug_masks, - merge_aug_proposals, merge_aug_scores) - -__all__ = [ - 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', - 'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/post_processing/bbox_nms.py b/cv/detection/co-detr/pytorch/mmdet/core/post_processing/bbox_nms.py deleted file mode 100644 index 4fcf57bb501de25adbba08d3fb5fe2cc8d00cd1c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/post_processing/bbox_nms.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.ops.nms import batched_nms - -from mmdet.core.bbox.iou_calculators import bbox_overlaps - - -def multiclass_nms(multi_bboxes, - multi_scores, - score_thr, - nms_cfg, - max_num=-1, - score_factors=None, - return_inds=False): - """NMS for multi-class bboxes. - - Args: - multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) - multi_scores (Tensor): shape (n, #class), where the last column - contains scores of the background class, but this will be ignored. - score_thr (float): bbox threshold, bboxes with scores lower than it - will not be considered. - nms_cfg (dict): a dict that contains the arguments of nms operations - max_num (int, optional): if there are more than max_num bboxes after - NMS, only top max_num will be kept. Default to -1. - score_factors (Tensor, optional): The factors multiplied to scores - before applying NMS. Default to None. - return_inds (bool, optional): Whether return the indices of kept - bboxes. Default to False. - - Returns: - tuple: (dets, labels, indices (optional)), tensors of shape (k, 5), - (k), and (k). Dets are boxes with scores. Labels are 0-based. - """ - num_classes = multi_scores.size(1) - 1 - # exclude background category - if multi_bboxes.shape[1] > 4: - bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) - else: - bboxes = multi_bboxes[:, None].expand( - multi_scores.size(0), num_classes, 4) - - scores = multi_scores[:, :-1] - - labels = torch.arange(num_classes, dtype=torch.long, device=scores.device) - labels = labels.view(1, -1).expand_as(scores) - - bboxes = bboxes.reshape(-1, 4) - scores = scores.reshape(-1) - labels = labels.reshape(-1) - - if not torch.onnx.is_in_onnx_export(): - # NonZero not supported in TensorRT - # remove low scoring boxes - valid_mask = scores > score_thr - # multiply score_factor after threshold to preserve more bboxes, improve - # mAP by 1% for YOLOv3 - if score_factors is not None: - # expand the shape to match original shape of score - score_factors = score_factors.view(-1, 1).expand( - multi_scores.size(0), num_classes) - score_factors = score_factors.reshape(-1) - scores = scores * score_factors - - if not torch.onnx.is_in_onnx_export(): - # NonZero not supported in TensorRT - inds = valid_mask.nonzero(as_tuple=False).squeeze(1) - bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] - else: - # TensorRT NMS plugin has invalid output filled with -1 - # add dummy data to make detection output correct. - bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0) - scores = torch.cat([scores, scores.new_zeros(1)], dim=0) - labels = torch.cat([labels, labels.new_zeros(1)], dim=0) - - if bboxes.numel() == 0: - if torch.onnx.is_in_onnx_export(): - raise RuntimeError('[ONNX Error] Can not record NMS ' - 'as it has not been executed this time') - dets = torch.cat([bboxes, scores[:, None]], -1) - if return_inds: - return dets, labels, inds - else: - return dets, labels - - dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) - - if max_num > 0: - dets = dets[:max_num] - keep = keep[:max_num] - - if return_inds: - return dets, labels[keep], inds[keep] - else: - return dets, labels[keep] - - -def fast_nms(multi_bboxes, - multi_scores, - multi_coeffs, - score_thr, - iou_thr, - top_k, - max_num=-1): - """Fast NMS in `YOLACT `_. - - Fast NMS allows already-removed detections to suppress other detections so - that every instance can be decided to be kept or discarded in parallel, - which is not possible in traditional NMS. This relaxation allows us to - implement Fast NMS entirely in standard GPU-accelerated matrix operations. - - Args: - multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) - multi_scores (Tensor): shape (n, #class+1), where the last column - contains scores of the background class, but this will be ignored. - multi_coeffs (Tensor): shape (n, #class*coeffs_dim). - score_thr (float): bbox threshold, bboxes with scores lower than it - will not be considered. - iou_thr (float): IoU threshold to be considered as conflicted. - top_k (int): if there are more than top_k bboxes before NMS, - only top top_k will be kept. - max_num (int): if there are more than max_num bboxes after NMS, - only top max_num will be kept. If -1, keep all the bboxes. - Default: -1. - - Returns: - tuple: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1), - and (k, coeffs_dim). Dets are boxes with scores. - Labels are 0-based. - """ - - scores = multi_scores[:, :-1].t() # [#class, n] - scores, idx = scores.sort(1, descending=True) - - idx = idx[:, :top_k].contiguous() - scores = scores[:, :top_k] # [#class, topk] - num_classes, num_dets = idx.size() - boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) - coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) - - iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] - iou.triu_(diagonal=1) - iou_max, _ = iou.max(dim=1) - - # Now just filter out the ones higher than the threshold - keep = iou_max <= iou_thr - - # Second thresholding introduces 0.2 mAP gain at negligible time cost - keep *= scores > score_thr - - # Assign each kept detection to its corresponding class - classes = torch.arange( - num_classes, device=boxes.device)[:, None].expand_as(keep) - classes = classes[keep] - - boxes = boxes[keep] - coeffs = coeffs[keep] - scores = scores[keep] - - # Only keep the top max_num highest scores across all classes - scores, idx = scores.sort(0, descending=True) - if max_num > 0: - idx = idx[:max_num] - scores = scores[:max_num] - - classes = classes[idx] - boxes = boxes[idx] - coeffs = coeffs[idx] - - cls_dets = torch.cat([boxes, scores[:, None]], dim=1) - return cls_dets, classes, coeffs diff --git a/cv/detection/co-detr/pytorch/mmdet/core/post_processing/matrix_nms.py b/cv/detection/co-detr/pytorch/mmdet/core/post_processing/matrix_nms.py deleted file mode 100644 index 9dc8c4f74e28127fb69ccc684f0bdb2bd3943b20..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/post_processing/matrix_nms.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - - -def mask_matrix_nms(masks, - labels, - scores, - filter_thr=-1, - nms_pre=-1, - max_num=-1, - kernel='gaussian', - sigma=2.0, - mask_area=None): - """Matrix NMS for multi-class masks. - - Args: - masks (Tensor): Has shape (num_instances, h, w) - labels (Tensor): Labels of corresponding masks, - has shape (num_instances,). - scores (Tensor): Mask scores of corresponding masks, - has shape (num_instances). - filter_thr (float): Score threshold to filter the masks - after matrix nms. Default: -1, which means do not - use filter_thr. - nms_pre (int): The max number of instances to do the matrix nms. - Default: -1, which means do not use nms_pre. - max_num (int, optional): If there are more than max_num masks after - matrix, only top max_num will be kept. Default: -1, which means - do not use max_num. - kernel (str): 'linear' or 'gaussian'. - sigma (float): std in gaussian method. - mask_area (Tensor): The sum of seg_masks. - - Returns: - tuple(Tensor): Processed mask results. - - - scores (Tensor): Updated scores, has shape (n,). - - labels (Tensor): Remained labels, has shape (n,). - - masks (Tensor): Remained masks, has shape (n, w, h). - - keep_inds (Tensor): The indices number of - the remaining mask in the input mask, has shape (n,). - """ - assert len(labels) == len(masks) == len(scores) - if len(labels) == 0: - return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( - 0, *masks.shape[-2:]), labels.new_zeros(0) - if mask_area is None: - mask_area = masks.sum((1, 2)).float() - else: - assert len(masks) == len(mask_area) - - # sort and keep top nms_pre - scores, sort_inds = torch.sort(scores, descending=True) - - keep_inds = sort_inds - if nms_pre > 0 and len(sort_inds) > nms_pre: - sort_inds = sort_inds[:nms_pre] - keep_inds = keep_inds[:nms_pre] - scores = scores[:nms_pre] - masks = masks[sort_inds] - mask_area = mask_area[sort_inds] - labels = labels[sort_inds] - - num_masks = len(labels) - flatten_masks = masks.reshape(num_masks, -1).float() - # inter. - inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0)) - expanded_mask_area = mask_area.expand(num_masks, num_masks) - # Upper triangle iou matrix. - iou_matrix = (inter_matrix / - (expanded_mask_area + expanded_mask_area.transpose(1, 0) - - inter_matrix)).triu(diagonal=1) - # label_specific matrix. - expanded_labels = labels.expand(num_masks, num_masks) - # Upper triangle label matrix. - label_matrix = (expanded_labels == expanded_labels.transpose( - 1, 0)).triu(diagonal=1) - - # IoU compensation - compensate_iou, _ = (iou_matrix * label_matrix).max(0) - compensate_iou = compensate_iou.expand(num_masks, - num_masks).transpose(1, 0) - - # IoU decay - decay_iou = iou_matrix * label_matrix - - # Calculate the decay_coefficient - if kernel == 'gaussian': - decay_matrix = torch.exp(-1 * sigma * (decay_iou**2)) - compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2)) - decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0) - elif kernel == 'linear': - decay_matrix = (1 - decay_iou) / (1 - compensate_iou) - decay_coefficient, _ = decay_matrix.min(0) - else: - raise NotImplementedError( - f'{kernel} kernel is not supported in matrix nms!') - # update the score. - scores = scores * decay_coefficient - - if filter_thr > 0: - keep = scores >= filter_thr - keep_inds = keep_inds[keep] - if not keep.any(): - return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( - 0, *masks.shape[-2:]), labels.new_zeros(0) - masks = masks[keep] - scores = scores[keep] - labels = labels[keep] - - # sort and keep top max_num - scores, sort_inds = torch.sort(scores, descending=True) - keep_inds = keep_inds[sort_inds] - if max_num > 0 and len(sort_inds) > max_num: - sort_inds = sort_inds[:max_num] - keep_inds = keep_inds[:max_num] - scores = scores[:max_num] - masks = masks[sort_inds] - labels = labels[sort_inds] - - return scores, labels, masks, keep_inds diff --git a/cv/detection/co-detr/pytorch/mmdet/core/post_processing/merge_augs.py b/cv/detection/co-detr/pytorch/mmdet/core/post_processing/merge_augs.py deleted file mode 100644 index 2ac4603a1aea9e463e35d7041a0bf00bd3b13529..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/post_processing/merge_augs.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings - -import numpy as np -import torch -from mmcv import ConfigDict -from mmcv.ops import nms - -from ..bbox import bbox_mapping_back - - -def merge_aug_proposals(aug_proposals, img_metas, cfg): - """Merge augmented proposals (multiscale, flip, etc.) - - Args: - aug_proposals (list[Tensor]): proposals from different testing - schemes, shape (n, 5). Note that they are not rescaled to the - original image size. - - img_metas (list[dict]): list of image info dict where each dict has: - 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - cfg (dict): rpn test config. - - Returns: - Tensor: shape (n, 4), proposals corresponding to original image scale. - """ - - cfg = copy.deepcopy(cfg) - - # deprecate arguments warning - if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: - warnings.warn( - 'In rpn_proposal or test_cfg, ' - 'nms_thr has been moved to a dict named nms as ' - 'iou_threshold, max_num has been renamed as max_per_img, ' - 'name of original arguments and the way to specify ' - 'iou_threshold of NMS will be deprecated.') - if 'nms' not in cfg: - cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) - if 'max_num' in cfg: - if 'max_per_img' in cfg: - assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ - f'max_per_img at the same time, but get {cfg.max_num} ' \ - f'and {cfg.max_per_img} respectively' \ - f'Please delete max_num which will be deprecated.' - else: - cfg.max_per_img = cfg.max_num - if 'nms_thr' in cfg: - assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ - f'iou_threshold in nms and ' \ - f'nms_thr at the same time, but get ' \ - f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ - f' respectively. Please delete the nms_thr ' \ - f'which will be deprecated.' - - recovered_proposals = [] - for proposals, img_info in zip(aug_proposals, img_metas): - img_shape = img_info['img_shape'] - scale_factor = img_info['scale_factor'] - flip = img_info['flip'] - flip_direction = img_info['flip_direction'] - _proposals = proposals.clone() - _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, - scale_factor, flip, - flip_direction) - recovered_proposals.append(_proposals) - aug_proposals = torch.cat(recovered_proposals, dim=0) - merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), - aug_proposals[:, -1].contiguous(), - cfg.nms.iou_threshold) - scores = merged_proposals[:, 4] - _, order = scores.sort(0, descending=True) - num = min(cfg.max_per_img, merged_proposals.shape[0]) - order = order[:num] - merged_proposals = merged_proposals[order, :] - return merged_proposals - - -def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): - """Merge augmented detection bboxes and scores. - - Args: - aug_bboxes (list[Tensor]): shape (n, 4*#class) - aug_scores (list[Tensor] or None): shape (n, #class) - img_shapes (list[Tensor]): shape (3, ). - rcnn_test_cfg (dict): rcnn test config. - - Returns: - tuple: (bboxes, scores) - """ - recovered_bboxes = [] - for bboxes, img_info in zip(aug_bboxes, img_metas): - img_shape = img_info[0]['img_shape'] - scale_factor = img_info[0]['scale_factor'] - flip = img_info[0]['flip'] - flip_direction = img_info[0]['flip_direction'] - bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, - flip_direction) - recovered_bboxes.append(bboxes) - bboxes = torch.stack(recovered_bboxes).mean(dim=0) - if aug_scores is None: - return bboxes - else: - scores = torch.stack(aug_scores).mean(dim=0) - return bboxes, scores - - -def merge_aug_scores(aug_scores): - """Merge augmented bbox scores.""" - if isinstance(aug_scores[0], torch.Tensor): - return torch.mean(torch.stack(aug_scores), dim=0) - else: - return np.mean(aug_scores, axis=0) - - -def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): - """Merge augmented mask prediction. - - Args: - aug_masks (list[ndarray]): shape (n, #class, h, w) - img_shapes (list[ndarray]): shape (3, ). - rcnn_test_cfg (dict): rcnn test config. - - Returns: - tuple: (bboxes, scores) - """ - recovered_masks = [] - for mask, img_info in zip(aug_masks, img_metas): - flip = img_info[0]['flip'] - if flip: - flip_direction = img_info[0]['flip_direction'] - if flip_direction == 'horizontal': - mask = mask[:, :, :, ::-1] - elif flip_direction == 'vertical': - mask = mask[:, :, ::-1, :] - elif flip_direction == 'diagonal': - mask = mask[:, :, :, ::-1] - mask = mask[:, :, ::-1, :] - else: - raise ValueError( - f"Invalid flipping direction '{flip_direction}'") - recovered_masks.append(mask) - - if weights is None: - merged_masks = np.mean(recovered_masks, axis=0) - else: - merged_masks = np.average( - np.array(recovered_masks), axis=0, weights=np.array(weights)) - return merged_masks diff --git a/cv/detection/co-detr/pytorch/mmdet/core/utils/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/utils/__init__.py deleted file mode 100644 index 3f0d07081a265d249d0ddb3a80ce39bf29e668e9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/utils/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, - reduce_mean, sync_random_seed) -from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, - generate_coordinate, mask2ndarray, multi_apply, - select_single_mlvl, unmap) - -__all__ = [ - 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', - 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict', - 'center_of_mass', 'generate_coordinate', 'select_single_mlvl', - 'filter_scores_and_topk', 'sync_random_seed' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/utils/dist_utils.py b/cv/detection/co-detr/pytorch/mmdet/core/utils/dist_utils.py deleted file mode 100644 index 8760774fd90e666c03ca4d553111363065a08426..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/utils/dist_utils.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import functools -import pickle -import warnings -from collections import OrderedDict - -import numpy as np -import torch -import torch.distributed as dist -from mmcv.runner import OptimizerHook, get_dist_info -from torch._utils import (_flatten_dense_tensors, _take_tensors, - _unflatten_dense_tensors) - - -def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): - if bucket_size_mb > 0: - bucket_size_bytes = bucket_size_mb * 1024 * 1024 - buckets = _take_tensors(tensors, bucket_size_bytes) - else: - buckets = OrderedDict() - for tensor in tensors: - tp = tensor.type() - if tp not in buckets: - buckets[tp] = [] - buckets[tp].append(tensor) - buckets = buckets.values() - - for bucket in buckets: - flat_tensors = _flatten_dense_tensors(bucket) - dist.all_reduce(flat_tensors) - flat_tensors.div_(world_size) - for tensor, synced in zip( - bucket, _unflatten_dense_tensors(flat_tensors, bucket)): - tensor.copy_(synced) - - -def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): - """Allreduce gradients. - - Args: - params (list[torch.Parameters]): List of parameters of a model - coalesce (bool, optional): Whether allreduce parameters as a whole. - Defaults to True. - bucket_size_mb (int, optional): Size of bucket, the unit is MB. - Defaults to -1. - """ - grads = [ - param.grad.data for param in params - if param.requires_grad and param.grad is not None - ] - world_size = dist.get_world_size() - if coalesce: - _allreduce_coalesced(grads, world_size, bucket_size_mb) - else: - for tensor in grads: - dist.all_reduce(tensor.div_(world_size)) - - -class DistOptimizerHook(OptimizerHook): - """Deprecated optimizer hook for distributed training.""" - - def __init__(self, *args, **kwargs): - warnings.warn('"DistOptimizerHook" is deprecated, please switch to' - '"mmcv.runner.OptimizerHook".') - super().__init__(*args, **kwargs) - - -def reduce_mean(tensor): - """"Obtain the mean of tensor on different GPUs.""" - if not (dist.is_available() and dist.is_initialized()): - return tensor - tensor = tensor.clone() - dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) - return tensor - - -def obj2tensor(pyobj, device='cuda'): - """Serialize picklable python object to tensor.""" - storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) - return torch.ByteTensor(storage).to(device=device) - - -def tensor2obj(tensor): - """Deserialize tensor to picklable python object.""" - return pickle.loads(tensor.cpu().numpy().tobytes()) - - -@functools.lru_cache() -def _get_global_gloo_group(): - """Return a process group based on gloo backend, containing all the ranks - The result is cached.""" - if dist.get_backend() == 'nccl': - return dist.new_group(backend='gloo') - else: - return dist.group.WORLD - - -def all_reduce_dict(py_dict, op='sum', group=None, to_float=True): - """Apply all reduce function for python dict object. - - The code is modified from https://github.com/Megvii- - BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py. - - NOTE: make sure that py_dict in different ranks has the same keys and - the values should be in the same shape. Currently only supports - nccl backend. - - Args: - py_dict (dict): Dict to be applied all reduce op. - op (str): Operator, could be 'sum' or 'mean'. Default: 'sum' - group (:obj:`torch.distributed.group`, optional): Distributed group, - Default: None. - to_float (bool): Whether to convert all values of dict to float. - Default: True. - - Returns: - OrderedDict: reduced python dict object. - """ - warnings.warn( - 'group` is deprecated. Currently only supports NCCL backend.') - _, world_size = get_dist_info() - if world_size == 1: - return py_dict - - # all reduce logic across different devices. - py_key = list(py_dict.keys()) - if not isinstance(py_dict, OrderedDict): - py_key_tensor = obj2tensor(py_key) - dist.broadcast(py_key_tensor, src=0) - py_key = tensor2obj(py_key_tensor) - - tensor_shapes = [py_dict[k].shape for k in py_key] - tensor_numels = [py_dict[k].numel() for k in py_key] - - if to_float: - warnings.warn('Note: the "to_float" is True, you need to ' - 'ensure that the behavior is reasonable.') - flatten_tensor = torch.cat( - [py_dict[k].flatten().float() for k in py_key]) - else: - flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) - - dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM) - if op == 'mean': - flatten_tensor /= world_size - - split_tensors = [ - x.reshape(shape) for x, shape in zip( - torch.split(flatten_tensor, tensor_numels), tensor_shapes) - ] - out_dict = {k: v for k, v in zip(py_key, split_tensors)} - if isinstance(py_dict, OrderedDict): - out_dict = OrderedDict(out_dict) - return out_dict - - -def sync_random_seed(seed=None, device='cuda'): - """Make sure different ranks share the same seed. - - All workers must call this function, otherwise it will deadlock. - This method is generally used in `DistributedSampler`, - because the seed should be identical across all processes - in the distributed group. - - In distributed sampling, different ranks should sample non-overlapped - data in the dataset. Therefore, this function is used to make sure that - each rank shuffles the data indices in the same order based - on the same seed. Then different ranks could use different indices - to select non-overlapped data from the same data list. - - Args: - seed (int, Optional): The seed. Default to None. - device (str): The device where the seed will be put on. - Default to 'cuda'. - - Returns: - int: Seed to be used. - """ - if seed is None: - seed = np.random.randint(2**31) - assert isinstance(seed, int) - - rank, world_size = get_dist_info() - - if world_size == 1: - return seed - - if rank == 0: - random_num = torch.tensor(seed, dtype=torch.int32, device=device) - else: - random_num = torch.tensor(0, dtype=torch.int32, device=device) - dist.broadcast(random_num, src=0) - return random_num.item() diff --git a/cv/detection/co-detr/pytorch/mmdet/core/utils/misc.py b/cv/detection/co-detr/pytorch/mmdet/core/utils/misc.py deleted file mode 100644 index 14cb745e38e7f2a9c0fea43be926eb2f0dddd734..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/utils/misc.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from functools import partial - -import numpy as np -import torch -from six.moves import map, zip - -from ..mask.structures import BitmapMasks, PolygonMasks - - -def multi_apply(func, *args, **kwargs): - """Apply function to a list of arguments. - - Note: - This function applies the ``func`` to multiple inputs and - map the multiple outputs of the ``func`` into different - list. Each list contains the same type of outputs corresponding - to different inputs. - - Args: - func (Function): A function that will be applied to a list of - arguments - - Returns: - tuple(list): A tuple containing multiple list, each list contains \ - a kind of returned results by the function - """ - pfunc = partial(func, **kwargs) if kwargs else func - map_results = map(pfunc, *args) - return tuple(map(list, zip(*map_results))) - - -def unmap(data, count, inds, fill=0): - """Unmap a subset of item (data) back to the original set of items (of size - count)""" - if data.dim() == 1: - ret = data.new_full((count, ), fill) - ret[inds.type(torch.bool)] = data - else: - new_size = (count, ) + data.size()[1:] - ret = data.new_full(new_size, fill) - ret[inds.type(torch.bool), :] = data - return ret - - -def mask2ndarray(mask): - """Convert Mask to ndarray.. - - Args: - mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or - torch.Tensor or np.ndarray): The mask to be converted. - - Returns: - np.ndarray: Ndarray mask of shape (n, h, w) that has been converted - """ - if isinstance(mask, (BitmapMasks, PolygonMasks)): - mask = mask.to_ndarray() - elif isinstance(mask, torch.Tensor): - mask = mask.detach().cpu().numpy() - elif not isinstance(mask, np.ndarray): - raise TypeError(f'Unsupported {type(mask)} data type') - return mask - - -def flip_tensor(src_tensor, flip_direction): - """flip tensor base on flip_direction. - - Args: - src_tensor (Tensor): input feature map, shape (B, C, H, W). - flip_direction (str): The flipping direction. Options are - 'horizontal', 'vertical', 'diagonal'. - - Returns: - out_tensor (Tensor): Flipped tensor. - """ - assert src_tensor.ndim == 4 - valid_directions = ['horizontal', 'vertical', 'diagonal'] - assert flip_direction in valid_directions - if flip_direction == 'horizontal': - out_tensor = torch.flip(src_tensor, [3]) - elif flip_direction == 'vertical': - out_tensor = torch.flip(src_tensor, [2]) - else: - out_tensor = torch.flip(src_tensor, [2, 3]) - return out_tensor - - -def select_single_mlvl(mlvl_tensors, batch_id, detach=True): - """Extract a multi-scale single image tensor from a multi-scale batch - tensor based on batch index. - - Note: The default value of detach is True, because the proposal gradient - needs to be detached during the training of the two-stage model. E.g - Cascade Mask R-CNN. - - Args: - mlvl_tensors (list[Tensor]): Batch tensor for all scale levels, - each is a 4D-tensor. - batch_id (int): Batch index. - detach (bool): Whether detach gradient. Default True. - - Returns: - list[Tensor]: Multi-scale single image tensor. - """ - assert isinstance(mlvl_tensors, (list, tuple)) - num_levels = len(mlvl_tensors) - - if detach: - mlvl_tensor_list = [ - mlvl_tensors[i][batch_id].detach() for i in range(num_levels) - ] - else: - mlvl_tensor_list = [ - mlvl_tensors[i][batch_id] for i in range(num_levels) - ] - return mlvl_tensor_list - - -def filter_scores_and_topk(scores, score_thr, topk, results=None): - """Filter results using score threshold and topk candidates. - - Args: - scores (Tensor): The scores, shape (num_bboxes, K). - score_thr (float): The score filter threshold. - topk (int): The number of topk candidates. - results (dict or list or Tensor, Optional): The results to - which the filtering rule is to be applied. The shape - of each item is (num_bboxes, N). - - Returns: - tuple: Filtered results - - - scores (Tensor): The scores after being filtered, \ - shape (num_bboxes_filtered, ). - - labels (Tensor): The class labels, shape \ - (num_bboxes_filtered, ). - - anchor_idxs (Tensor): The anchor indexes, shape \ - (num_bboxes_filtered, ). - - filtered_results (dict or list or Tensor, Optional): \ - The filtered results. The shape of each item is \ - (num_bboxes_filtered, N). - """ - valid_mask = scores > score_thr - scores = scores[valid_mask] - valid_idxs = torch.nonzero(valid_mask) - - num_topk = min(topk, valid_idxs.size(0)) - # torch.sort is actually faster than .topk (at least on GPUs) - scores, idxs = scores.sort(descending=True) - scores = scores[:num_topk] - topk_idxs = valid_idxs[idxs[:num_topk]] - keep_idxs, labels = topk_idxs.unbind(dim=1) - - filtered_results = None - if results is not None: - if isinstance(results, dict): - filtered_results = {k: v[keep_idxs] for k, v in results.items()} - elif isinstance(results, list): - filtered_results = [result[keep_idxs] for result in results] - elif isinstance(results, torch.Tensor): - filtered_results = results[keep_idxs] - else: - raise NotImplementedError(f'Only supports dict or list or Tensor, ' - f'but get {type(results)}.') - return scores, labels, keep_idxs, filtered_results - - -def center_of_mass(mask, esp=1e-6): - """Calculate the centroid coordinates of the mask. - - Args: - mask (Tensor): The mask to be calculated, shape (h, w). - esp (float): Avoid dividing by zero. Default: 1e-6. - - Returns: - tuple[Tensor]: the coordinates of the center point of the mask. - - - center_h (Tensor): the center point of the height. - - center_w (Tensor): the center point of the width. - """ - h, w = mask.shape - grid_h = torch.arange(h, device=mask.device)[:, None] - grid_w = torch.arange(w, device=mask.device) - normalizer = mask.sum().float().clamp(min=esp) - center_h = (mask * grid_h).sum() / normalizer - center_w = (mask * grid_w).sum() / normalizer - return center_h, center_w - - -def generate_coordinate(featmap_sizes, device='cuda'): - """Generate the coordinate. - - Args: - featmap_sizes (tuple): The feature to be calculated, - of shape (N, C, W, H). - device (str): The device where the feature will be put on. - Returns: - coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H). - """ - - x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) - y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) - y, x = torch.meshgrid(y_range, x_range) - y = y.expand([featmap_sizes[0], 1, -1, -1]) - x = x.expand([featmap_sizes[0], 1, -1, -1]) - coord_feat = torch.cat([x, y], 1) - - return coord_feat diff --git a/cv/detection/co-detr/pytorch/mmdet/core/visualization/__init__.py b/cv/detection/co-detr/pytorch/mmdet/core/visualization/__init__.py deleted file mode 100644 index 2eb17c4b32bc0c5c76db31e22e995716ba718222..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/visualization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .image import (color_val_matplotlib, imshow_det_bboxes, - imshow_gt_det_bboxes) -from .palette import get_palette, palette_val - -__all__ = [ - 'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib', - 'palette_val', 'get_palette' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/core/visualization/image.py b/cv/detection/co-detr/pytorch/mmdet/core/visualization/image.py deleted file mode 100644 index 63eae8a2846b78394f0ed554d182e04a0da36021..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/visualization/image.py +++ /dev/null @@ -1,563 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import sys - -import cv2 -import matplotlib.pyplot as plt -import mmcv -import numpy as np -import pycocotools.mask as mask_util -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon - -from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET -from ..mask.structures import bitmap_to_polygon -from ..utils import mask2ndarray -from .palette import get_palette, palette_val - -__all__ = [ - 'color_val_matplotlib', 'draw_masks', 'draw_bboxes', 'draw_labels', - 'imshow_det_bboxes', 'imshow_gt_det_bboxes' -] - -EPS = 1e-2 - - -def color_val_matplotlib(color): - """Convert various input in BGR order to normalized RGB matplotlib color - tuples. - - Args: - color (:obj`Color` | str | tuple | int | ndarray): Color inputs. - - Returns: - tuple[float]: A tuple of 3 normalized floats indicating RGB channels. - """ - color = mmcv.color_val(color) - color = [color / 255 for color in color[::-1]] - return tuple(color) - - -def _get_adaptive_scales(areas, min_area=800, max_area=30000): - """Get adaptive scales according to areas. - - The scale range is [0.5, 1.0]. When the area is less than - ``'min_area'``, the scale is 0.5 while the area is larger than - ``'max_area'``, the scale is 1.0. - - Args: - areas (ndarray): The areas of bboxes or masks with the - shape of (n, ). - min_area (int): Lower bound areas for adaptive scales. - Default: 800. - max_area (int): Upper bound areas for adaptive scales. - Default: 30000. - - Returns: - ndarray: The adaotive scales with the shape of (n, ). - """ - scales = 0.5 + (areas - min_area) / (max_area - min_area) - scales = np.clip(scales, 0.5, 1.0) - return scales - - -def _get_bias_color(base, max_dist=30): - """Get different colors for each masks. - - Get different colors for each masks by adding a bias - color to the base category color. - Args: - base (ndarray): The base category color with the shape - of (3, ). - max_dist (int): The max distance of bias. Default: 30. - - Returns: - ndarray: The new color for a mask with the shape of (3, ). - """ - new_color = base + np.random.randint( - low=-max_dist, high=max_dist + 1, size=3) - return np.clip(new_color, 0, 255, new_color) - - -def draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2): - """Draw bounding boxes on the axes. - - Args: - ax (matplotlib.Axes): The input axes. - bboxes (ndarray): The input bounding boxes with the shape - of (n, 4). - color (list[tuple] | matplotlib.color): the colors for each - bounding boxes. - alpha (float): Transparency of bounding boxes. Default: 0.8. - thickness (int): Thickness of lines. Default: 2. - - Returns: - matplotlib.Axes: The result axes. - """ - polygons = [] - for i, bbox in enumerate(bboxes): - bbox_int = bbox.astype(np.int32) - poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]], - [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - p = PatchCollection( - polygons, - facecolor='none', - edgecolors=color, - linewidths=thickness, - alpha=alpha) - ax.add_collection(p) - - return ax - - -def draw_labels(ax, - labels, - positions, - scores=None, - class_names=None, - color='w', - font_size=8, - scales=None, - horizontal_alignment='left'): - """Draw labels on the axes. - - Args: - ax (matplotlib.Axes): The input axes. - labels (ndarray): The labels with the shape of (n, ). - positions (ndarray): The positions to draw each labels. - scores (ndarray): The scores for each labels. - class_names (list[str]): The class names. - color (list[tuple] | matplotlib.color): The colors for labels. - font_size (int): Font size of texts. Default: 8. - scales (list[float]): Scales of texts. Default: None. - horizontal_alignment (str): The horizontal alignment method of - texts. Default: 'left'. - - Returns: - matplotlib.Axes: The result axes. - """ - for i, (pos, label) in enumerate(zip(positions, labels)): - label_text = class_names[ - label] if class_names is not None else f'class {label}' - if scores is not None: - label_text += f'|{scores[i]:.02f}' - text_color = color[i] if isinstance(color, list) else color - - font_size_mask = font_size if scales is None else font_size * scales[i] - ax.text( - pos[0], - pos[1], - f'{label_text}', - bbox={ - 'facecolor': 'black', - 'alpha': 0.8, - 'pad': 0.7, - 'edgecolor': 'none' - }, - color=text_color, - fontsize=font_size_mask, - verticalalignment='top', - horizontalalignment=horizontal_alignment) - - return ax - - -def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8): - """Draw masks on the image and their edges on the axes. - - Args: - ax (matplotlib.Axes): The input axes. - img (ndarray): The image with the shape of (3, h, w). - masks (ndarray): The masks with the shape of (n, h, w). - color (ndarray): The colors for each masks with the shape - of (n, 3). - with_edge (bool): Whether to draw edges. Default: True. - alpha (float): Transparency of bounding boxes. Default: 0.8. - - Returns: - matplotlib.Axes: The result axes. - ndarray: The result image. - """ - taken_colors = set([0, 0, 0]) - if color is None: - random_colors = np.random.randint(0, 255, (masks.size(0), 3)) - color = [tuple(c) for c in random_colors] - color = np.array(color, dtype=np.uint8) - polygons = [] - for i, mask in enumerate(masks): - if with_edge: - contours, _ = bitmap_to_polygon(mask) - polygons += [Polygon(c) for c in contours] - - color_mask = color[i] - while tuple(color_mask) in taken_colors: - color_mask = _get_bias_color(color_mask) - taken_colors.add(tuple(color_mask)) - - mask = mask.astype(bool) - img[mask] = img[mask] * (1 - alpha) + color_mask * alpha - - p = PatchCollection( - polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8) - ax.add_collection(p) - - return ax, img - - -def imshow_det_bboxes(img, - bboxes=None, - labels=None, - segms=None, - class_names=None, - score_thr=0, - bbox_color='green', - text_color='green', - mask_color=None, - thickness=2, - font_size=8, - win_name='', - show=True, - wait_time=0, - out_file=None): - """Draw bboxes and class labels (with scores) on an image. - - Args: - img (str | ndarray): The image to be displayed. - bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or - (n, 5). - labels (ndarray): Labels of bboxes. - segms (ndarray | None): Masks, shaped (n,h,w) or None. - class_names (list[str]): Names of each classes. - score_thr (float): Minimum score of bboxes to be shown. Default: 0. - bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines. - If a single color is given, it will be applied to all classes. - The tuple of color should be in RGB order. Default: 'green'. - text_color (list[tuple] | tuple | str | None): Colors of texts. - If a single color is given, it will be applied to all classes. - The tuple of color should be in RGB order. Default: 'green'. - mask_color (list[tuple] | tuple | str | None, optional): Colors of - masks. If a single color is given, it will be applied to all - classes. The tuple of color should be in RGB order. - Default: None. - thickness (int): Thickness of lines. Default: 2. - font_size (int): Font size of texts. Default: 13. - show (bool): Whether to show the image. Default: True. - win_name (str): The window name. Default: ''. - wait_time (float): Value of waitKey param. Default: 0. - out_file (str, optional): The filename to write the image. - Default: None. - - Returns: - ndarray: The image with bboxes drawn on it. - """ - assert bboxes is None or bboxes.ndim == 2, \ - f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.' - assert labels.ndim == 1, \ - f' labels ndim should be 1, but its ndim is {labels.ndim}.' - assert bboxes is None or bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \ - f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.' - assert bboxes is None or bboxes.shape[0] <= labels.shape[0], \ - 'labels.shape[0] should not be less than bboxes.shape[0].' - assert segms is None or segms.shape[0] == labels.shape[0], \ - 'segms.shape[0] and labels.shape[0] should have the same length.' - assert segms is not None or bboxes is not None, \ - 'segms and bboxes should not be None at the same time.' - - img = mmcv.imread(img).astype(np.uint8) - - if score_thr > 0: - assert bboxes is not None and bboxes.shape[1] == 5 - scores = bboxes[:, -1] - inds = scores > score_thr - bboxes = bboxes[inds, :] - labels = labels[inds] - if segms is not None: - segms = segms[inds, ...] - - img = mmcv.bgr2rgb(img) - width, height = img.shape[1], img.shape[0] - img = np.ascontiguousarray(img) - - fig = plt.figure(win_name, frameon=False) - plt.title(win_name) - canvas = fig.canvas - dpi = fig.get_dpi() - # add a small EPS to avoid precision lost due to matplotlib's truncation - # (https://github.com/matplotlib/matplotlib/issues/15363) - fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi) - - # remove white edges by set subplot margin - plt.subplots_adjust(left=0, right=1, bottom=0, top=1) - ax = plt.gca() - ax.axis('off') - - max_label = int(max(labels) if len(labels) > 0 else 0) - text_palette = palette_val(get_palette(text_color, max_label + 1)) - text_colors = [text_palette[label] for label in labels] - - num_bboxes = 0 - if bboxes is not None: - num_bboxes = bboxes.shape[0] - bbox_palette = palette_val(get_palette(bbox_color, max_label + 1)) - colors = [bbox_palette[label] for label in labels[:num_bboxes]] - draw_bboxes(ax, bboxes, colors, alpha=0.8, thickness=thickness) - - horizontal_alignment = 'left' - positions = bboxes[:, :2].astype(np.int32) + thickness - areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0]) - scales = _get_adaptive_scales(areas) - scores = bboxes[:, 4] if bboxes.shape[1] == 5 else None - draw_labels( - ax, - labels[:num_bboxes], - positions, - scores=scores, - class_names=class_names, - color=text_colors, - font_size=font_size, - scales=scales, - horizontal_alignment=horizontal_alignment) - - if segms is not None: - mask_palette = get_palette(mask_color, max_label + 1) - colors = [mask_palette[label] for label in labels] - colors = np.array(colors, dtype=np.uint8) - draw_masks(ax, img, segms, colors, with_edge=True) - - if num_bboxes < segms.shape[0]: - segms = segms[num_bboxes:] - horizontal_alignment = 'center' - areas = [] - positions = [] - for mask in segms: - _, _, stats, centroids = cv2.connectedComponentsWithStats( - mask.astype(np.uint8), connectivity=8) - largest_id = np.argmax(stats[1:, -1]) + 1 - positions.append(centroids[largest_id]) - areas.append(stats[largest_id, -1]) - areas = np.stack(areas, axis=0) - scales = _get_adaptive_scales(areas) - draw_labels( - ax, - labels[num_bboxes:], - positions, - class_names=class_names, - color=text_colors, - font_size=font_size, - scales=scales, - horizontal_alignment=horizontal_alignment) - - plt.imshow(img) - - stream, _ = canvas.print_to_buffer() - buffer = np.frombuffer(stream, dtype='uint8') - if sys.platform == 'darwin': - width, height = canvas.get_width_height(physical=True) - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - img = rgb.astype('uint8') - img = mmcv.rgb2bgr(img) - - if show: - # We do not use cv2 for display because in some cases, opencv will - # conflict with Qt, it will output a warning: Current thread - # is not the object's thread. You can refer to - # https://github.com/opencv/opencv-python/issues/46 for details - if wait_time == 0: - plt.show() - else: - plt.show(block=False) - plt.pause(wait_time) - if out_file is not None: - mmcv.imwrite(img, out_file) - - plt.close() - - return img - - -def imshow_gt_det_bboxes(img, - annotation, - result, - class_names=None, - score_thr=0, - gt_bbox_color=(61, 102, 255), - gt_text_color=(200, 200, 200), - gt_mask_color=(61, 102, 255), - det_bbox_color=(241, 101, 72), - det_text_color=(200, 200, 200), - det_mask_color=(241, 101, 72), - thickness=2, - font_size=13, - win_name='', - show=True, - wait_time=0, - out_file=None, - overlay_gt_pred=True): - """General visualization GT and result function. - - Args: - img (str | ndarray): The image to be displayed. - annotation (dict): Ground truth annotations where contain keys of - 'gt_bboxes' and 'gt_labels' or 'gt_masks'. - result (tuple[list] | list): The detection result, can be either - (bbox, segm) or just bbox. - class_names (list[str]): Names of each classes. - score_thr (float): Minimum score of bboxes to be shown. Default: 0. - gt_bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines. - If a single color is given, it will be applied to all classes. - The tuple of color should be in RGB order. Default: (61, 102, 255). - gt_text_color (list[tuple] | tuple | str | None): Colors of texts. - If a single color is given, it will be applied to all classes. - The tuple of color should be in RGB order. Default: (200, 200, 200). - gt_mask_color (list[tuple] | tuple | str | None, optional): Colors of - masks. If a single color is given, it will be applied to all classes. - The tuple of color should be in RGB order. Default: (61, 102, 255). - det_bbox_color (list[tuple] | tuple | str | None):Colors of bbox lines. - If a single color is given, it will be applied to all classes. - The tuple of color should be in RGB order. Default: (241, 101, 72). - det_text_color (list[tuple] | tuple | str | None):Colors of texts. - If a single color is given, it will be applied to all classes. - The tuple of color should be in RGB order. Default: (200, 200, 200). - det_mask_color (list[tuple] | tuple | str | None, optional): Color of - masks. If a single color is given, it will be applied to all classes. - The tuple of color should be in RGB order. Default: (241, 101, 72). - thickness (int): Thickness of lines. Default: 2. - font_size (int): Font size of texts. Default: 13. - win_name (str): The window name. Default: ''. - show (bool): Whether to show the image. Default: True. - wait_time (float): Value of waitKey param. Default: 0. - out_file (str, optional): The filename to write the image. - Default: None. - overlay_gt_pred (bool): Whether to plot gts and predictions on the - same image. If False, predictions and gts will be plotted on two same - image which will be concatenated in vertical direction. The image - above is drawn with gt, and the image below is drawn with the - prediction result. Default: True. - - Returns: - ndarray: The image with bboxes or masks drawn on it. - """ - assert 'gt_bboxes' in annotation - assert 'gt_labels' in annotation - assert isinstance(result, (tuple, list, dict)), 'Expected ' \ - f'tuple or list or dict, but get {type(result)}' - - gt_bboxes = annotation['gt_bboxes'] - gt_labels = annotation['gt_labels'] - gt_masks = annotation.get('gt_masks', None) - if gt_masks is not None: - gt_masks = mask2ndarray(gt_masks) - - gt_seg = annotation.get('gt_semantic_seg', None) - if gt_seg is not None: - pad_value = 255 # the padding value of gt_seg - sem_labels = np.unique(gt_seg) - all_labels = np.concatenate((gt_labels, sem_labels), axis=0) - all_labels, counts = np.unique(all_labels, return_counts=True) - stuff_labels = all_labels[np.logical_and(counts < 2, - all_labels != pad_value)] - stuff_masks = gt_seg[None] == stuff_labels[:, None, None] - gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0) - gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)), - axis=0) - # If you need to show the bounding boxes, - # please comment the following line - # gt_bboxes = None - - img = mmcv.imread(img) - - img_with_gt = imshow_det_bboxes( - img, - gt_bboxes, - gt_labels, - gt_masks, - class_names=class_names, - bbox_color=gt_bbox_color, - text_color=gt_text_color, - mask_color=gt_mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=False) - - if not isinstance(result, dict): - if isinstance(result, tuple): - bbox_result, segm_result = result - if isinstance(segm_result, tuple): - segm_result = segm_result[0] # ms rcnn - else: - bbox_result, segm_result = result, None - - bboxes = np.vstack(bbox_result) - labels = [ - np.full(bbox.shape[0], i, dtype=np.int32) - for i, bbox in enumerate(bbox_result) - ] - labels = np.concatenate(labels) - - segms = None - if segm_result is not None and len(labels) > 0: # non empty - segms = mmcv.concat_list(segm_result) - segms = mask_util.decode(segms) - segms = segms.transpose(2, 0, 1) - else: - assert class_names is not None, 'We need to know the number ' \ - 'of classes.' - VOID = len(class_names) - bboxes = None - pan_results = result['pan_results'] - # keep objects ahead - ids = np.unique(pan_results)[::-1] - legal_indices = ids != VOID - ids = ids[legal_indices] - labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) - segms = (pan_results[None] == ids[:, None, None]) - - if overlay_gt_pred: - img = imshow_det_bboxes( - img_with_gt, - bboxes, - labels, - segms=segms, - class_names=class_names, - score_thr=score_thr, - bbox_color=det_bbox_color, - text_color=det_text_color, - mask_color=det_mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=show, - wait_time=wait_time, - out_file=out_file) - else: - img_with_det = imshow_det_bboxes( - img, - bboxes, - labels, - segms=segms, - class_names=class_names, - score_thr=score_thr, - bbox_color=det_bbox_color, - text_color=det_text_color, - mask_color=det_mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=False) - img = np.concatenate([img_with_gt, img_with_det], axis=0) - - plt.imshow(img) - if show: - if wait_time == 0: - plt.show() - else: - plt.show(block=False) - plt.pause(wait_time) - if out_file is not None: - mmcv.imwrite(img, out_file) - plt.close() - - return img diff --git a/cv/detection/co-detr/pytorch/mmdet/core/visualization/palette.py b/cv/detection/co-detr/pytorch/mmdet/core/visualization/palette.py deleted file mode 100644 index 11692cdd086301d9d3be4a4702dc12881b8e8d6e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/core/visualization/palette.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np - - -def palette_val(palette): - """Convert palette to matplotlib palette. - - Args: - palette List[tuple]: A list of color tuples. - - Returns: - List[tuple[float]]: A list of RGB matplotlib color tuples. - """ - new_palette = [] - for color in palette: - color = [c / 255 for c in color] - new_palette.append(tuple(color)) - return new_palette - - -def get_palette(palette, num_classes): - """Get palette from various inputs. - - Args: - palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs. - num_classes (int): the number of classes. - - Returns: - list[tuple[int]]: A list of color tuples. - """ - assert isinstance(num_classes, int) - - if isinstance(palette, list): - dataset_palette = palette - elif isinstance(palette, tuple): - dataset_palette = [palette] * num_classes - elif palette == 'random' or palette is None: - state = np.random.get_state() - # random color - np.random.seed(42) - palette = np.random.randint(0, 256, size=(num_classes, 3)) - np.random.set_state(state) - dataset_palette = [tuple(c) for c in palette] - elif palette == 'coco': - from mmdet.datasets import CocoDataset, CocoPanopticDataset - dataset_palette = CocoDataset.PALETTE - if len(dataset_palette) < num_classes: - dataset_palette = CocoPanopticDataset.PALETTE - elif palette == 'citys': - from mmdet.datasets import CityscapesDataset - dataset_palette = CityscapesDataset.PALETTE - elif palette == 'voc': - from mmdet.datasets import VOCDataset - dataset_palette = VOCDataset.PALETTE - elif mmcv.is_str(palette): - dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes - else: - raise TypeError(f'Invalid type for palette: {type(palette)}') - - assert len(dataset_palette) >= num_classes, \ - 'The length of palette should not be less than `num_classes`.' - return dataset_palette diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/__init__.py b/cv/detection/co-detr/pytorch/mmdet/datasets/__init__.py deleted file mode 100644 index f251d07e174b21e27c03f95d52b2bd80bbc1d81b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset -from .cityscapes import CityscapesDataset -from .coco import CocoDataset -from .coco_panoptic import CocoPanopticDataset -from .custom import CustomDataset -from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, - MultiImageMixDataset, RepeatDataset) -from .deepfashion import DeepFashionDataset -from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset -from .openimages import OpenImagesChallengeDataset, OpenImagesDataset -from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler -from .utils import (NumClassCheckHook, get_loading_pipeline, - replace_ImageToTensor) -from .voc import VOCDataset -from .wider_face import WIDERFaceDataset -from .xml_style import XMLDataset - -__all__ = [ - 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset', - 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', - 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler', - 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', - 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES', - 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline', - 'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset', - 'OpenImagesDataset', 'OpenImagesChallengeDataset' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/__init__.py b/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/__init__.py deleted file mode 100644 index af8557593b6a50541bba1198dc9361ab5382547f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .coco_api import COCO, COCOeval -from .panoptic_evaluation import pq_compute_multi_core, pq_compute_single_core - -__all__ = [ - 'COCO', 'COCOeval', 'pq_compute_multi_core', 'pq_compute_single_core' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/coco_api.py b/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/coco_api.py deleted file mode 100644 index eef6341ebbd33c222b5cda9c43c21bac1a9575da..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/coco_api.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This file add snake case alias for coco api - -import warnings - -import pycocotools -from pycocotools.coco import COCO as _COCO -from pycocotools.cocoeval import COCOeval as _COCOeval - - -class COCO(_COCO): - """This class is almost the same as official pycocotools package. - - It implements some snake case function aliases. So that the COCO class has - the same interface as LVIS class. - """ - - def __init__(self, annotation_file=None): - if getattr(pycocotools, '__version__', '0') >= '12.0.2': - warnings.warn( - 'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501 - UserWarning) - super().__init__(annotation_file=annotation_file) - self.img_ann_map = self.imgToAnns - self.cat_img_map = self.catToImgs - - def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None): - return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd) - - def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]): - return self.getCatIds(cat_names, sup_names, cat_ids) - - def get_img_ids(self, img_ids=[], cat_ids=[]): - return self.getImgIds(img_ids, cat_ids) - - def load_anns(self, ids): - return self.loadAnns(ids) - - def load_cats(self, ids): - return self.loadCats(ids) - - def load_imgs(self, ids): - return self.loadImgs(ids) - - -# just for the ease of import -COCOeval = _COCOeval diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/panoptic_evaluation.py b/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/panoptic_evaluation.py deleted file mode 100644 index 55f57bf4a4ca3554ab90ac768dc9ec06e9c878d2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/api_wrappers/panoptic_evaluation.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -# Copyright (c) 2018, Alexander Kirillov -# This file supports `file_client` for `panopticapi`, -# the source code is copied from `panopticapi`, -# only the way to load the gt images is modified. -import multiprocessing -import os - -import mmcv -import numpy as np - -try: - from panopticapi.evaluation import OFFSET, VOID, PQStat - from panopticapi.utils import rgb2id -except ImportError: - PQStat = None - rgb2id = None - VOID = 0 - OFFSET = 256 * 256 * 256 - - -def pq_compute_single_core(proc_id, - annotation_set, - gt_folder, - pred_folder, - categories, - file_client=None, - print_log=False): - """The single core function to evaluate the metric of Panoptic - Segmentation. - - Same as the function with the same name in `panopticapi`. Only the function - to load the images is changed to use the file client. - - Args: - proc_id (int): The id of the mini process. - gt_folder (str): The path of the ground truth images. - pred_folder (str): The path of the prediction images. - categories (str): The categories of the dataset. - file_client (object): The file client of the dataset. If None, - the backend will be set to `disk`. - print_log (bool): Whether to print the log. Defaults to False. - """ - if PQStat is None: - raise RuntimeError( - 'panopticapi is not installed, please install it by: ' - 'pip install git+https://github.com/cocodataset/' - 'panopticapi.git.') - - if file_client is None: - file_client_args = dict(backend='disk') - file_client = mmcv.FileClient(**file_client_args) - - pq_stat = PQStat() - - idx = 0 - for gt_ann, pred_ann in annotation_set: - if print_log and idx % 100 == 0: - print('Core: {}, {} from {} images processed'.format( - proc_id, idx, len(annotation_set))) - idx += 1 - # The gt images can be on the local disk or `ceph`, so we use - # file_client here. - img_bytes = file_client.get( - os.path.join(gt_folder, gt_ann['file_name'])) - pan_gt = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb') - pan_gt = rgb2id(pan_gt) - - # The predictions can only be on the local dist now. - pan_pred = mmcv.imread( - os.path.join(pred_folder, pred_ann['file_name']), - flag='color', - channel_order='rgb') - pan_pred = rgb2id(pan_pred) - - gt_segms = {el['id']: el for el in gt_ann['segments_info']} - pred_segms = {el['id']: el for el in pred_ann['segments_info']} - - # predicted segments area calculation + prediction sanity checks - pred_labels_set = set(el['id'] for el in pred_ann['segments_info']) - labels, labels_cnt = np.unique(pan_pred, return_counts=True) - for label, label_cnt in zip(labels, labels_cnt): - if label not in pred_segms: - if label == VOID: - continue - raise KeyError( - 'In the image with ID {} segment with ID {} is ' - 'presented in PNG and not presented in JSON.'.format( - gt_ann['image_id'], label)) - pred_segms[label]['area'] = label_cnt - pred_labels_set.remove(label) - if pred_segms[label]['category_id'] not in categories: - raise KeyError( - 'In the image with ID {} segment with ID {} has ' - 'unknown category_id {}.'.format( - gt_ann['image_id'], label, - pred_segms[label]['category_id'])) - if len(pred_labels_set) != 0: - raise KeyError( - 'In the image with ID {} the following segment IDs {} ' - 'are presented in JSON and not presented in PNG.'.format( - gt_ann['image_id'], list(pred_labels_set))) - - # confusion matrix calculation - pan_gt_pred = pan_gt.astype(np.uint64) * OFFSET + pan_pred.astype( - np.uint64) - gt_pred_map = {} - labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True) - for label, intersection in zip(labels, labels_cnt): - gt_id = label // OFFSET - pred_id = label % OFFSET - gt_pred_map[(gt_id, pred_id)] = intersection - - # count all matched pairs - gt_matched = set() - pred_matched = set() - for label_tuple, intersection in gt_pred_map.items(): - gt_label, pred_label = label_tuple - if gt_label not in gt_segms: - continue - if pred_label not in pred_segms: - continue - if gt_segms[gt_label]['iscrowd'] == 1: - continue - if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][ - 'category_id']: - continue - - union = pred_segms[pred_label]['area'] + gt_segms[gt_label][ - 'area'] - intersection - gt_pred_map.get((VOID, pred_label), 0) - iou = intersection / union - if iou > 0.5: - pq_stat[gt_segms[gt_label]['category_id']].tp += 1 - pq_stat[gt_segms[gt_label]['category_id']].iou += iou - gt_matched.add(gt_label) - pred_matched.add(pred_label) - - # count false positives - crowd_labels_dict = {} - for gt_label, gt_info in gt_segms.items(): - if gt_label in gt_matched: - continue - # crowd segments are ignored - if gt_info['iscrowd'] == 1: - crowd_labels_dict[gt_info['category_id']] = gt_label - continue - pq_stat[gt_info['category_id']].fn += 1 - - # count false positives - for pred_label, pred_info in pred_segms.items(): - if pred_label in pred_matched: - continue - # intersection of the segment with VOID - intersection = gt_pred_map.get((VOID, pred_label), 0) - # plus intersection with corresponding CROWD region if it exists - if pred_info['category_id'] in crowd_labels_dict: - intersection += gt_pred_map.get( - (crowd_labels_dict[pred_info['category_id']], pred_label), - 0) - # predicted segment is ignored if more than half of - # the segment correspond to VOID and CROWD regions - if intersection / pred_info['area'] > 0.5: - continue - pq_stat[pred_info['category_id']].fp += 1 - - if print_log: - print('Core: {}, all {} images processed'.format( - proc_id, len(annotation_set))) - return pq_stat - - -def pq_compute_multi_core(matched_annotations_list, - gt_folder, - pred_folder, - categories, - file_client=None, - nproc=32): - """Evaluate the metrics of Panoptic Segmentation with multithreading. - - Same as the function with the same name in `panopticapi`. - - Args: - matched_annotations_list (list): The matched annotation list. Each - element is a tuple of annotations of the same image with the - format (gt_anns, pred_anns). - gt_folder (str): The path of the ground truth images. - pred_folder (str): The path of the prediction images. - categories (str): The categories of the dataset. - file_client (object): The file client of the dataset. If None, - the backend will be set to `disk`. - nproc (int): Number of processes for panoptic quality computing. - Defaults to 32. When `nproc` exceeds the number of cpu cores, - the number of cpu cores is used. - """ - if PQStat is None: - raise RuntimeError( - 'panopticapi is not installed, please install it by: ' - 'pip install git+https://github.com/cocodataset/' - 'panopticapi.git.') - - if file_client is None: - file_client_args = dict(backend='disk') - file_client = mmcv.FileClient(**file_client_args) - - cpu_num = min(nproc, multiprocessing.cpu_count()) - - annotations_split = np.array_split(matched_annotations_list, cpu_num) - print('Number of cores: {}, images per core: {}'.format( - cpu_num, len(annotations_split[0]))) - workers = multiprocessing.Pool(processes=cpu_num) - processes = [] - for proc_id, annotation_set in enumerate(annotations_split): - p = workers.apply_async(pq_compute_single_core, - (proc_id, annotation_set, gt_folder, - pred_folder, categories, file_client)) - processes.append(p) - - # Close the process pool, otherwise it will lead to memory - # leaking problems. - workers.close() - workers.join() - - pq_stat = PQStat() - for p in processes: - pq_stat += p.get() - - return pq_stat diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/builder.py b/cv/detection/co-detr/pytorch/mmdet/datasets/builder.py deleted file mode 100644 index 852d166058fb3e43fdad02e0159354f8cf30d36d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/builder.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import platform -import random -import warnings -from functools import partial - -import numpy as np -import torch -from mmcv.parallel import collate -from mmcv.runner import get_dist_info -from mmcv.utils import TORCH_VERSION, Registry, build_from_cfg, digit_version -from torch.utils.data import DataLoader - -from .samplers import (ClassAwareSampler, DistributedGroupSampler, - DistributedSampler, GroupSampler, InfiniteBatchSampler, - InfiniteGroupBatchSampler) - -if platform.system() != 'Windows': - # https://github.com/pytorch/pytorch/issues/973 - import resource - rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) - base_soft_limit = rlimit[0] - hard_limit = rlimit[1] - soft_limit = min(max(4096, base_soft_limit), hard_limit) - resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) - -DATASETS = Registry('dataset') -PIPELINES = Registry('pipeline') - - -def _concat_dataset(cfg, default_args=None): - from .dataset_wrappers import ConcatDataset - ann_files = cfg['ann_file'] - img_prefixes = cfg.get('img_prefix', None) - seg_prefixes = cfg.get('seg_prefix', None) - proposal_files = cfg.get('proposal_file', None) - separate_eval = cfg.get('separate_eval', True) - - datasets = [] - num_dset = len(ann_files) - for i in range(num_dset): - data_cfg = copy.deepcopy(cfg) - # pop 'separate_eval' since it is not a valid key for common datasets. - if 'separate_eval' in data_cfg: - data_cfg.pop('separate_eval') - data_cfg['ann_file'] = ann_files[i] - if isinstance(img_prefixes, (list, tuple)): - data_cfg['img_prefix'] = img_prefixes[i] - if isinstance(seg_prefixes, (list, tuple)): - data_cfg['seg_prefix'] = seg_prefixes[i] - if isinstance(proposal_files, (list, tuple)): - data_cfg['proposal_file'] = proposal_files[i] - datasets.append(build_dataset(data_cfg, default_args)) - - return ConcatDataset(datasets, separate_eval) - - -def build_dataset(cfg, default_args=None): - from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, - MultiImageMixDataset, RepeatDataset) - if isinstance(cfg, (list, tuple)): - dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) - elif cfg['type'] == 'ConcatDataset': - dataset = ConcatDataset( - [build_dataset(c, default_args) for c in cfg['datasets']], - cfg.get('separate_eval', True)) - elif cfg['type'] == 'RepeatDataset': - dataset = RepeatDataset( - build_dataset(cfg['dataset'], default_args), cfg['times']) - elif cfg['type'] == 'ClassBalancedDataset': - dataset = ClassBalancedDataset( - build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) - elif cfg['type'] == 'MultiImageMixDataset': - cp_cfg = copy.deepcopy(cfg) - cp_cfg['dataset'] = build_dataset(cp_cfg['dataset']) - cp_cfg.pop('type') - cp_cfg.pop('ann_file') - cp_cfg.pop('img_prefix') - if 'filter_empty_gt' in cp_cfg.keys(): - cp_cfg.pop('filter_empty_gt') - dataset = MultiImageMixDataset(**cp_cfg) - elif isinstance(cfg.get('ann_file'), (list, tuple)): - dataset = _concat_dataset(cfg, default_args) - else: - dataset = build_from_cfg(cfg, DATASETS, default_args) - - return dataset - - -def build_dataloader(dataset, - samples_per_gpu, - workers_per_gpu, - num_gpus=1, - dist=True, - shuffle=True, - seed=None, - runner_type='EpochBasedRunner', - persistent_workers=False, - class_aware_sampler=None, - **kwargs): - """Build PyTorch DataLoader. - - In distributed training, each GPU/process has a dataloader. - In non-distributed training, there is only one dataloader for all GPUs. - - Args: - dataset (Dataset): A PyTorch dataset. - samples_per_gpu (int): Number of training samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data loading - for each GPU. - num_gpus (int): Number of GPUs. Only used in non-distributed training. - dist (bool): Distributed training/test or not. Default: True. - shuffle (bool): Whether to shuffle the data at every epoch. - Default: True. - seed (int, Optional): Seed to be used. Default: None. - runner_type (str): Type of runner. Default: `EpochBasedRunner` - persistent_workers (bool): If True, the data loader will not shutdown - the worker processes after a dataset has been consumed once. - This allows to maintain the workers `Dataset` instances alive. - This argument is only valid when PyTorch>=1.7.0. Default: False. - class_aware_sampler (dict): Whether to use `ClassAwareSampler` - during training. Default: None. - kwargs: any keyword argument to be used to initialize DataLoader - - Returns: - DataLoader: A PyTorch dataloader. - """ - rank, world_size = get_dist_info() - - if dist: - # When model is :obj:`DistributedDataParallel`, - # `batch_size` of :obj:`dataloader` is the - # number of training samples on each GPU. - batch_size = samples_per_gpu - num_workers = workers_per_gpu - else: - # When model is obj:`DataParallel` - # the batch size is samples on all the GPUS - batch_size = num_gpus * samples_per_gpu - num_workers = num_gpus * workers_per_gpu - - if runner_type == 'IterBasedRunner': - # this is a batch sampler, which can yield - # a mini-batch indices each time. - # it can be used in both `DataParallel` and - # `DistributedDataParallel` - if shuffle: - batch_sampler = InfiniteGroupBatchSampler( - dataset, batch_size, world_size, rank, seed=seed) - else: - batch_sampler = InfiniteBatchSampler( - dataset, - batch_size, - world_size, - rank, - seed=seed, - shuffle=False) - batch_size = 1 - sampler = None - else: - if class_aware_sampler is not None: - # ClassAwareSampler can be used in both distributed and - # non-distributed training. - num_sample_class = class_aware_sampler.get('num_sample_class', 1) - sampler = ClassAwareSampler( - dataset, - samples_per_gpu, - world_size, - rank, - seed=seed, - num_sample_class=num_sample_class) - elif dist: - # DistributedGroupSampler will definitely shuffle the data to - # satisfy that images on each GPU are in the same group - if shuffle: - sampler = DistributedGroupSampler( - dataset, samples_per_gpu, world_size, rank, seed=seed) - else: - sampler = DistributedSampler( - dataset, world_size, rank, shuffle=False, seed=seed) - else: - sampler = GroupSampler(dataset, - samples_per_gpu) if shuffle else None - batch_sampler = None - - init_fn = partial( - worker_init_fn, num_workers=num_workers, rank=rank, - seed=seed) if seed is not None else None - - if (TORCH_VERSION != 'parrots' - and digit_version(TORCH_VERSION) >= digit_version('1.7.0')): - kwargs['persistent_workers'] = persistent_workers - elif persistent_workers is True: - warnings.warn('persistent_workers is invalid because your pytorch ' - 'version is lower than 1.7.0') - - data_loader = DataLoader( - dataset, - batch_size=batch_size, - sampler=sampler, - num_workers=num_workers, - batch_sampler=batch_sampler, - collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), - pin_memory=kwargs.pop('pin_memory', False), - worker_init_fn=init_fn, - **kwargs) - - return data_loader - - -def worker_init_fn(worker_id, num_workers, rank, seed): - # The seed of each worker equals to - # num_worker * rank + worker_id + user_seed - worker_seed = num_workers * rank + worker_id + seed - np.random.seed(worker_seed) - random.seed(worker_seed) - torch.manual_seed(worker_seed) diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/cityscapes.py b/cv/detection/co-detr/pytorch/mmdet/datasets/cityscapes.py deleted file mode 100644 index da6a2adc1194f0cb49c8b0aba0ec39c3cec10565..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/cityscapes.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa -# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa - -import glob -import os -import os.path as osp -import tempfile -from collections import OrderedDict - -import mmcv -import numpy as np -import pycocotools.mask as maskUtils -from mmcv.utils import print_log - -from .builder import DATASETS -from .coco import CocoDataset - - -@DATASETS.register_module() -class CityscapesDataset(CocoDataset): - - CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle') - - PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), - (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)] - - def _filter_imgs(self, min_size=32): - """Filter images too small or without ground truths.""" - valid_inds = [] - # obtain images that contain annotation - ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) - # obtain images that contain annotations of the required categories - ids_in_cat = set() - for i, class_id in enumerate(self.cat_ids): - ids_in_cat |= set(self.coco.cat_img_map[class_id]) - # merge the image id sets of the two conditions and use the merged set - # to filter out images if self.filter_empty_gt=True - ids_in_cat &= ids_with_ann - - valid_img_ids = [] - for i, img_info in enumerate(self.data_infos): - img_id = img_info['id'] - ann_ids = self.coco.getAnnIds(imgIds=[img_id]) - ann_info = self.coco.loadAnns(ann_ids) - all_iscrowd = all([_['iscrowd'] for _ in ann_info]) - if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat - or all_iscrowd): - continue - if min(img_info['width'], img_info['height']) >= min_size: - valid_inds.append(i) - valid_img_ids.append(img_id) - self.img_ids = valid_img_ids - return valid_inds - - def _parse_ann_info(self, img_info, ann_info): - """Parse bbox and mask annotation. - - Args: - img_info (dict): Image info of an image. - ann_info (list[dict]): Annotation info of an image. - - Returns: - dict: A dict containing the following keys: bboxes, \ - bboxes_ignore, labels, masks, seg_map. \ - "masks" are already decoded into binary masks. - """ - gt_bboxes = [] - gt_labels = [] - gt_bboxes_ignore = [] - gt_masks_ann = [] - - for i, ann in enumerate(ann_info): - if ann.get('ignore', False): - continue - x1, y1, w, h = ann['bbox'] - if ann['area'] <= 0 or w < 1 or h < 1: - continue - if ann['category_id'] not in self.cat_ids: - continue - bbox = [x1, y1, x1 + w, y1 + h] - if ann.get('iscrowd', False): - gt_bboxes_ignore.append(bbox) - else: - gt_bboxes.append(bbox) - gt_labels.append(self.cat2label[ann['category_id']]) - gt_masks_ann.append(ann['segmentation']) - - if gt_bboxes: - gt_bboxes = np.array(gt_bboxes, dtype=np.float32) - gt_labels = np.array(gt_labels, dtype=np.int64) - else: - gt_bboxes = np.zeros((0, 4), dtype=np.float32) - gt_labels = np.array([], dtype=np.int64) - - if gt_bboxes_ignore: - gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) - else: - gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) - - ann = dict( - bboxes=gt_bboxes, - labels=gt_labels, - bboxes_ignore=gt_bboxes_ignore, - masks=gt_masks_ann, - seg_map=img_info['segm_file']) - - return ann - - def results2txt(self, results, outfile_prefix): - """Dump the detection results to a txt file. - - Args: - results (list[list | tuple]): Testing results of the - dataset. - outfile_prefix (str): The filename prefix of the json files. - If the prefix is "somepath/xxx", - the txt files will be named "somepath/xxx.txt". - - Returns: - list[str]: Result txt files which contains corresponding \ - instance segmentation images. - """ - try: - import cityscapesscripts.helpers.labels as CSLabels - except ImportError: - raise ImportError('Please run "pip install citscapesscripts" to ' - 'install cityscapesscripts first.') - result_files = [] - os.makedirs(outfile_prefix, exist_ok=True) - prog_bar = mmcv.ProgressBar(len(self)) - for idx in range(len(self)): - result = results[idx] - filename = self.data_infos[idx]['filename'] - basename = osp.splitext(osp.basename(filename))[0] - pred_txt = osp.join(outfile_prefix, basename + '_pred.txt') - - bbox_result, segm_result = result - bboxes = np.vstack(bbox_result) - # segm results - if isinstance(segm_result, tuple): - # Some detectors use different scores for bbox and mask, - # like Mask Scoring R-CNN. Score of segm will be used instead - # of bbox score. - segms = mmcv.concat_list(segm_result[0]) - mask_score = segm_result[1] - else: - # use bbox score for mask score - segms = mmcv.concat_list(segm_result) - mask_score = [bbox[-1] for bbox in bboxes] - labels = [ - np.full(bbox.shape[0], i, dtype=np.int32) - for i, bbox in enumerate(bbox_result) - ] - labels = np.concatenate(labels) - - assert len(bboxes) == len(segms) == len(labels) - num_instances = len(bboxes) - prog_bar.update() - with open(pred_txt, 'w') as fout: - for i in range(num_instances): - pred_class = labels[i] - classes = self.CLASSES[pred_class] - class_id = CSLabels.name2label[classes].id - score = mask_score[i] - mask = maskUtils.decode(segms[i]).astype(np.uint8) - png_filename = osp.join(outfile_prefix, - basename + f'_{i}_{classes}.png') - mmcv.imwrite(mask, png_filename) - fout.write(f'{osp.basename(png_filename)} {class_id} ' - f'{score}\n') - result_files.append(pred_txt) - - return result_files - - def format_results(self, results, txtfile_prefix=None): - """Format the results to txt (standard format for Cityscapes - evaluation). - - Args: - results (list): Testing results of the dataset. - txtfile_prefix (str | None): The prefix of txt files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - - Returns: - tuple: (result_files, tmp_dir), result_files is a dict containing \ - the json filepaths, tmp_dir is the temporal directory created \ - for saving txt/png files when txtfile_prefix is not specified. - """ - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: {} != {}'. - format(len(results), len(self))) - - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: {} != {}'. - format(len(results), len(self))) - - if txtfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - txtfile_prefix = osp.join(tmp_dir.name, 'results') - else: - tmp_dir = None - result_files = self.results2txt(results, txtfile_prefix) - - return result_files, tmp_dir - - def evaluate(self, - results, - metric='bbox', - logger=None, - outfile_prefix=None, - classwise=False, - proposal_nums=(100, 300, 1000), - iou_thrs=np.arange(0.5, 0.96, 0.05)): - """Evaluation in Cityscapes/COCO protocol. - - Args: - results (list[list | tuple]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. Options are - 'bbox', 'segm', 'proposal', 'proposal_fast'. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - outfile_prefix (str | None): The prefix of output file. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If results are evaluated with COCO protocol, it would be the - prefix of output json file. For example, the metric is 'bbox' - and 'segm', then json files would be "a/b/prefix.bbox.json" and - "a/b/prefix.segm.json". - If results are evaluated with cityscapes protocol, it would be - the prefix of output txt/png files. The output files would be - png images under folder "a/b/prefix/xxx/" and the file name of - images would be written into a txt file - "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of - cityscapes. If not specified, a temp file will be created. - Default: None. - classwise (bool): Whether to evaluating the AP for each class. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thrs (Sequence[float]): IoU threshold used for evaluating - recalls. If set to a list, the average recall of all IoUs will - also be computed. Default: 0.5. - - Returns: - dict[str, float]: COCO style evaluation metric or cityscapes mAP \ - and AP@50. - """ - eval_results = dict() - - metrics = metric.copy() if isinstance(metric, list) else [metric] - - if 'cityscapes' in metrics: - eval_results.update( - self._evaluate_cityscapes(results, outfile_prefix, logger)) - metrics.remove('cityscapes') - - # left metrics are all coco metric - if len(metrics) > 0: - # create CocoDataset with CityscapesDataset annotation - self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, - None, self.data_root, self.img_prefix, - self.seg_prefix, self.proposal_file, - self.test_mode, self.filter_empty_gt) - # TODO: remove this in the future - # reload annotations of correct class - self_coco.CLASSES = self.CLASSES - self_coco.data_infos = self_coco.load_annotations(self.ann_file) - eval_results.update( - self_coco.evaluate(results, metrics, logger, outfile_prefix, - classwise, proposal_nums, iou_thrs)) - - return eval_results - - def _evaluate_cityscapes(self, results, txtfile_prefix, logger): - """Evaluation in Cityscapes protocol. - - Args: - results (list): Testing results of the dataset. - txtfile_prefix (str | None): The prefix of output txt file - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - - Returns: - dict[str: float]: Cityscapes evaluation results, contains 'mAP' \ - and 'AP@50'. - """ - - try: - import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa - except ImportError: - raise ImportError('Please run "pip install citscapesscripts" to ' - 'install cityscapesscripts first.') - msg = 'Evaluating in Cityscapes style' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - result_files, tmp_dir = self.format_results(results, txtfile_prefix) - - if tmp_dir is None: - result_dir = osp.join(txtfile_prefix, 'results') - else: - result_dir = osp.join(tmp_dir.name, 'results') - - eval_results = OrderedDict() - print_log(f'Evaluating results under {result_dir} ...', logger=logger) - - # set global states in cityscapes evaluation API - CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..') - CSEval.args.predictionPath = os.path.abspath(result_dir) - CSEval.args.predictionWalk = None - CSEval.args.JSONOutput = False - CSEval.args.colorized = False - CSEval.args.gtInstancesFile = os.path.join(result_dir, - 'gtInstances.json') - CSEval.args.groundTruthSearch = os.path.join( - self.img_prefix.replace('leftImg8bit', 'gtFine'), - '*/*_gtFine_instanceIds.png') - - groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch) - assert len(groundTruthImgList), 'Cannot find ground truth images' \ - f' in {CSEval.args.groundTruthSearch}.' - predictionImgList = [] - for gt in groundTruthImgList: - predictionImgList.append(CSEval.getPrediction(gt, CSEval.args)) - CSEval_results = CSEval.evaluateImgLists(predictionImgList, - groundTruthImgList, - CSEval.args)['averages'] - - eval_results['mAP'] = CSEval_results['allAp'] - eval_results['AP@50'] = CSEval_results['allAp50%'] - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/coco.py b/cv/detection/co-detr/pytorch/mmdet/datasets/coco.py deleted file mode 100644 index 4738da213da15c31a14f4277eaaca0343c7af115..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/coco.py +++ /dev/null @@ -1,649 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import contextlib -import io -import itertools -import logging -import os.path as osp -import tempfile -import warnings -from collections import OrderedDict - -import mmcv -import numpy as np -from mmcv.utils import print_log -from terminaltables import AsciiTable - -from mmdet.core import eval_recalls -from .api_wrappers import COCO, COCOeval -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class CocoDataset(CustomDataset): - - CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', - 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', - 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', - 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', - 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', - 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', - 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', - 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', - 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', - 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', - 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') - - PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), - (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), - (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), - (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), - (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), - (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), - (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), - (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), - (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), - (134, 134, 103), (145, 148, 174), (255, 208, 186), - (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), - (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), - (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149), - (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205), - (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0), - (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88), - (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118), - (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15), - (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0), - (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122), - (191, 162, 208)] - - def load_annotations(self, ann_file): - """Load annotation from COCO style annotation file. - - Args: - ann_file (str): Path of annotation file. - - Returns: - list[dict]: Annotation info from COCO api. - """ - - self.coco = COCO(ann_file) - # The order of returned `cat_ids` will not - # change with the order of the CLASSES - self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) - - self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} - self.img_ids = self.coco.get_img_ids() - data_infos = [] - total_ann_ids = [] - for i in self.img_ids: - info = self.coco.load_imgs([i])[0] - info['filename'] = info['file_name'] - data_infos.append(info) - ann_ids = self.coco.get_ann_ids(img_ids=[i]) - total_ann_ids.extend(ann_ids) - assert len(set(total_ann_ids)) == len( - total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" - return data_infos - - def get_ann_info(self, idx): - """Get COCO annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - img_id = self.data_infos[idx]['id'] - ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) - ann_info = self.coco.load_anns(ann_ids) - return self._parse_ann_info(self.data_infos[idx], ann_info) - - def get_cat_ids(self, idx): - """Get COCO category ids by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - img_id = self.data_infos[idx]['id'] - ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) - ann_info = self.coco.load_anns(ann_ids) - return [ann['category_id'] for ann in ann_info] - - def _filter_imgs(self, min_size=32): - """Filter images too small or without ground truths.""" - valid_inds = [] - # obtain images that contain annotation - ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) - # obtain images that contain annotations of the required categories - ids_in_cat = set() - for i, class_id in enumerate(self.cat_ids): - ids_in_cat |= set(self.coco.cat_img_map[class_id]) - # merge the image id sets of the two conditions and use the merged set - # to filter out images if self.filter_empty_gt=True - ids_in_cat &= ids_with_ann - - valid_img_ids = [] - for i, img_info in enumerate(self.data_infos): - img_id = self.img_ids[i] - if self.filter_empty_gt and img_id not in ids_in_cat: - continue - if min(img_info['width'], img_info['height']) >= min_size: - valid_inds.append(i) - valid_img_ids.append(img_id) - self.img_ids = valid_img_ids - return valid_inds - - def _parse_ann_info(self, img_info, ann_info): - """Parse bbox and mask annotation. - - Args: - ann_info (list[dict]): Annotation info of an image. - with_mask (bool): Whether to parse mask annotations. - - Returns: - dict: A dict containing the following keys: bboxes, bboxes_ignore,\ - labels, masks, seg_map. "masks" are raw annotations and not \ - decoded into binary masks. - """ - gt_bboxes = [] - gt_labels = [] - gt_bboxes_ignore = [] - gt_masks_ann = [] - for i, ann in enumerate(ann_info): - if ann.get('ignore', False): - continue - x1, y1, w, h = ann['bbox'] - inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) - inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) - if inter_w * inter_h == 0: - continue - if ann['area'] <= 0 or w < 1 or h < 1: - continue - if ann['category_id'] not in self.cat_ids: - continue - bbox = [x1, y1, x1 + w, y1 + h] - if ann.get('iscrowd', False): - gt_bboxes_ignore.append(bbox) - else: - gt_bboxes.append(bbox) - gt_labels.append(self.cat2label[ann['category_id']]) - gt_masks_ann.append(ann.get('segmentation', None)) - - if gt_bboxes: - gt_bboxes = np.array(gt_bboxes, dtype=np.float32) - gt_labels = np.array(gt_labels, dtype=np.int64) - else: - gt_bboxes = np.zeros((0, 4), dtype=np.float32) - gt_labels = np.array([], dtype=np.int64) - - if gt_bboxes_ignore: - gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) - else: - gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) - - seg_map = img_info['filename'].rsplit('.', 1)[0] + self.seg_suffix - - ann = dict( - bboxes=gt_bboxes, - labels=gt_labels, - bboxes_ignore=gt_bboxes_ignore, - masks=gt_masks_ann, - seg_map=seg_map) - - return ann - - def xyxy2xywh(self, bbox): - """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO - evaluation. - - Args: - bbox (numpy.ndarray): The bounding boxes, shape (4, ), in - ``xyxy`` order. - - Returns: - list[float]: The converted bounding boxes, in ``xywh`` order. - """ - - _bbox = bbox.tolist() - return [ - _bbox[0], - _bbox[1], - _bbox[2] - _bbox[0], - _bbox[3] - _bbox[1], - ] - - def _proposal2json(self, results): - """Convert proposal results to COCO json style.""" - json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - bboxes = results[idx] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = 1 - json_results.append(data) - return json_results - - def _det2json(self, results): - """Convert detection results to COCO json style.""" - json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - result = results[idx] - for label in range(len(result)): - bboxes = result[label] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = self.cat_ids[label] - json_results.append(data) - return json_results - - def _segm2json(self, results): - """Convert instance segmentation results to COCO json style.""" - bbox_json_results = [] - segm_json_results = [] - for idx in range(len(self)): - img_id = self.img_ids[idx] - det, seg = results[idx] - for label in range(len(det)): - # bbox results - bboxes = det[label] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(bboxes[i][4]) - data['category_id'] = self.cat_ids[label] - bbox_json_results.append(data) - - # segm results - # some detectors use different scores for bbox and mask - if isinstance(seg, tuple): - segms = seg[0][label] - mask_score = seg[1][label] - else: - segms = seg[label] - mask_score = [bbox[4] for bbox in bboxes] - for i in range(bboxes.shape[0]): - data = dict() - data['image_id'] = img_id - data['bbox'] = self.xyxy2xywh(bboxes[i]) - data['score'] = float(mask_score[i]) - data['category_id'] = self.cat_ids[label] - if isinstance(segms[i]['counts'], bytes): - segms[i]['counts'] = segms[i]['counts'].decode() - data['segmentation'] = segms[i] - segm_json_results.append(data) - return bbox_json_results, segm_json_results - - def results2json(self, results, outfile_prefix): - """Dump the detection results to a COCO style json file. - - There are 3 types of results: proposals, bbox predictions, mask - predictions, and they have different data types. This method will - automatically recognize the type, and dump them to json files. - - Args: - results (list[list | tuple | ndarray]): Testing results of the - dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.bbox.json", "somepath/xxx.segm.json", - "somepath/xxx.proposal.json". - - Returns: - dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ - values are corresponding filenames. - """ - result_files = dict() - if isinstance(results[0], list): - json_results = self._det2json(results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - mmcv.dump(json_results, result_files['bbox']) - elif isinstance(results[0], tuple): - json_results = self._segm2json(results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - result_files['segm'] = f'{outfile_prefix}.segm.json' - mmcv.dump(json_results[0], result_files['bbox']) - mmcv.dump(json_results[1], result_files['segm']) - elif isinstance(results[0], np.ndarray): - json_results = self._proposal2json(results) - result_files['proposal'] = f'{outfile_prefix}.proposal.json' - mmcv.dump(json_results, result_files['proposal']) - else: - raise TypeError('invalid type of results') - return result_files - - def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): - gt_bboxes = [] - for i in range(len(self.img_ids)): - ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) - ann_info = self.coco.load_anns(ann_ids) - if len(ann_info) == 0: - gt_bboxes.append(np.zeros((0, 4))) - continue - bboxes = [] - for ann in ann_info: - if ann.get('ignore', False) or ann['iscrowd']: - continue - x1, y1, w, h = ann['bbox'] - bboxes.append([x1, y1, x1 + w, y1 + h]) - bboxes = np.array(bboxes, dtype=np.float32) - if bboxes.shape[0] == 0: - bboxes = np.zeros((0, 4)) - gt_bboxes.append(bboxes) - - recalls = eval_recalls( - gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) - ar = recalls.mean(axis=1) - return ar - - def format_results(self, results, jsonfile_prefix=None, **kwargs): - """Format the results to json (standard format for COCO evaluation). - - Args: - results (list[tuple | numpy.ndarray]): Testing results of the - dataset. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - - Returns: - tuple: (result_files, tmp_dir), result_files is a dict containing \ - the json filepaths, tmp_dir is the temporal directory created \ - for saving json files when jsonfile_prefix is not specified. - """ - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: {} != {}'. - format(len(results), len(self))) - - if jsonfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - jsonfile_prefix = osp.join(tmp_dir.name, 'results') - else: - tmp_dir = None - result_files = self.results2json(results, jsonfile_prefix) - return result_files, tmp_dir - - def evaluate_det_segm(self, - results, - result_files, - coco_gt, - metrics, - logger=None, - classwise=False, - proposal_nums=(100, 300, 1000), - iou_thrs=None, - metric_items=None): - """Instance segmentation and object detection evaluation in COCO - protocol. - - Args: - results (list[list | tuple | dict]): Testing results of the - dataset. - result_files (dict[str, str]): a dict contains json file path. - coco_gt (COCO): COCO API object with ground truth annotation. - metric (str | list[str]): Metrics to be evaluated. Options are - 'bbox', 'segm', 'proposal', 'proposal_fast'. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - classwise (bool): Whether to evaluating the AP for each class. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thrs (Sequence[float], optional): IoU threshold used for - evaluating recalls/mAPs. If set to a list, the average of all - IoUs will also be computed. If not specified, [0.50, 0.55, - 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. - Default: None. - metric_items (list[str] | str, optional): Metric items that will - be returned. If not specified, ``['AR@100', 'AR@300', - 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be - used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', - 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when - ``metric=='bbox' or metric=='segm'``. - - Returns: - dict[str, float]: COCO style evaluation metric. - """ - if iou_thrs is None: - iou_thrs = np.linspace( - .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) - if metric_items is not None: - if not isinstance(metric_items, list): - metric_items = [metric_items] - - eval_results = OrderedDict() - for metric in metrics: - msg = f'Evaluating {metric}...' - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - if metric == 'proposal_fast': - if isinstance(results[0], tuple): - raise KeyError('proposal_fast is not supported for ' - 'instance segmentation result.') - ar = self.fast_eval_recall( - results, proposal_nums, iou_thrs, logger='silent') - log_msg = [] - for i, num in enumerate(proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') - log_msg = ''.join(log_msg) - print_log(log_msg, logger=logger) - continue - - iou_type = 'bbox' if metric == 'proposal' else metric - if metric not in result_files: - raise KeyError(f'{metric} is not in results') - try: - predictions = mmcv.load(result_files[metric]) - if iou_type == 'segm': - # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa - # When evaluating mask AP, if the results contain bbox, - # cocoapi will use the box area instead of the mask area - # for calculating the instance area. Though the overall AP - # is not affected, this leads to different - # small/medium/large mask AP results. - for x in predictions: - x.pop('bbox') - warnings.simplefilter('once') - warnings.warn( - 'The key "bbox" is deleted for more accurate mask AP ' - 'of small/medium/large instances since v2.12.0. This ' - 'does not change the overall mAP calculation.', - UserWarning) - coco_det = coco_gt.loadRes(predictions) - except IndexError: - print_log( - 'The testing results of the whole dataset is empty.', - logger=logger, - level=logging.ERROR) - break - - cocoEval = COCOeval(coco_gt, coco_det, iou_type) - cocoEval.params.catIds = self.cat_ids - cocoEval.params.imgIds = self.img_ids - cocoEval.params.maxDets = list(proposal_nums) - cocoEval.params.iouThrs = iou_thrs - # mapping of cocoEval.stats - coco_metric_names = { - 'mAP': 0, - 'mAP_50': 1, - 'mAP_75': 2, - 'mAP_s': 3, - 'mAP_m': 4, - 'mAP_l': 5, - 'AR@100': 6, - 'AR@300': 7, - 'AR@1000': 8, - 'AR_s@1000': 9, - 'AR_m@1000': 10, - 'AR_l@1000': 11 - } - if metric_items is not None: - for metric_item in metric_items: - if metric_item not in coco_metric_names: - raise KeyError( - f'metric item {metric_item} is not supported') - - if metric == 'proposal': - cocoEval.params.useCats = 0 - cocoEval.evaluate() - cocoEval.accumulate() - - # Save coco summarize print information to logger - redirect_string = io.StringIO() - with contextlib.redirect_stdout(redirect_string): - cocoEval.summarize() - print_log('\n' + redirect_string.getvalue(), logger=logger) - - if metric_items is None: - metric_items = [ - 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', - 'AR_m@1000', 'AR_l@1000' - ] - - for item in metric_items: - val = float( - f'{cocoEval.stats[coco_metric_names[item]]:.3f}') - eval_results[item] = val - else: - cocoEval.evaluate() - cocoEval.accumulate() - - # Save coco summarize print information to logger - redirect_string = io.StringIO() - with contextlib.redirect_stdout(redirect_string): - cocoEval.summarize() - print_log('\n' + redirect_string.getvalue(), logger=logger) - - if classwise: # Compute per-category AP - # Compute per-category AP - # from https://github.com/facebookresearch/detectron2/ - precisions = cocoEval.eval['precision'] - # precision: (iou, recall, cls, area range, max dets) - assert len(self.cat_ids) == precisions.shape[2] - - results_per_category = [] - for idx, catId in enumerate(self.cat_ids): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - nm = self.coco.loadCats(catId)[0] - precision = precisions[:, :, idx, 0, -1] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - results_per_category.append( - (f'{nm["name"]}', f'{float(ap):0.3f}')) - - num_columns = min(6, len(results_per_category) * 2) - results_flatten = list( - itertools.chain(*results_per_category)) - headers = ['category', 'AP'] * (num_columns // 2) - results_2d = itertools.zip_longest(*[ - results_flatten[i::num_columns] - for i in range(num_columns) - ]) - table_data = [headers] - table_data += [result for result in results_2d] - table = AsciiTable(table_data) - print_log('\n' + table.table, logger=logger) - - if metric_items is None: - metric_items = [ - 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' - ] - - for metric_item in metric_items: - key = f'{metric}_{metric_item}' - val = float( - f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' - ) - eval_results[key] = val - ap = cocoEval.stats[:6] - eval_results[f'{metric}_mAP_copypaste'] = ( - f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' - f'{ap[4]:.3f} {ap[5]:.3f}') - - return eval_results - - def evaluate(self, - results, - metric='bbox', - logger=None, - jsonfile_prefix=None, - classwise=False, - proposal_nums=(100, 300, 1000), - iou_thrs=None, - metric_items=None): - """Evaluation in COCO protocol. - - Args: - results (list[list | tuple]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. Options are - 'bbox', 'segm', 'proposal', 'proposal_fast'. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - classwise (bool): Whether to evaluating the AP for each class. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thrs (Sequence[float], optional): IoU threshold used for - evaluating recalls/mAPs. If set to a list, the average of all - IoUs will also be computed. If not specified, [0.50, 0.55, - 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. - Default: None. - metric_items (list[str] | str, optional): Metric items that will - be returned. If not specified, ``['AR@100', 'AR@300', - 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be - used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', - 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when - ``metric=='bbox' or metric=='segm'``. - - Returns: - dict[str, float]: COCO style evaluation metric. - """ - - metrics = metric if isinstance(metric, list) else [metric] - allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] - for metric in metrics: - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - - coco_gt = self.coco - self.cat_ids = coco_gt.get_cat_ids(cat_names=self.CLASSES) - - result_files, tmp_dir = self.format_results(results, jsonfile_prefix) - eval_results = self.evaluate_det_segm(results, result_files, coco_gt, - metrics, logger, classwise, - proposal_nums, iou_thrs, - metric_items) - - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/coco_panoptic.py b/cv/detection/co-detr/pytorch/mmdet/datasets/coco_panoptic.py deleted file mode 100644 index 53ef5947d1e723dbd19b4fd1fbdeba672414e378..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/coco_panoptic.py +++ /dev/null @@ -1,692 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import itertools -import os -from collections import defaultdict - -import mmcv -import numpy as np -from mmcv.utils import print_log -from terminaltables import AsciiTable - -from mmdet.core import INSTANCE_OFFSET -from .api_wrappers import COCO, pq_compute_multi_core -from .builder import DATASETS -from .coco import CocoDataset - -try: - import panopticapi - from panopticapi.evaluation import VOID - from panopticapi.utils import id2rgb -except ImportError: - panopticapi = None - id2rgb = None - VOID = None - -__all__ = ['CocoPanopticDataset'] - - -class COCOPanoptic(COCO): - """This wrapper is for loading the panoptic style annotation file. - - The format is shown in the CocoPanopticDataset class. - - Args: - annotation_file (str): Path of annotation file. - """ - - def __init__(self, annotation_file=None): - if panopticapi is None: - raise RuntimeError( - 'panopticapi is not installed, please install it by: ' - 'pip install git+https://github.com/cocodataset/' - 'panopticapi.git.') - - super(COCOPanoptic, self).__init__(annotation_file) - - def createIndex(self): - # create index - print('creating index...') - # anns stores 'segment_id -> annotation' - anns, cats, imgs = {}, {}, {} - img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list) - if 'annotations' in self.dataset: - for ann, img_info in zip(self.dataset['annotations'], - self.dataset['images']): - img_info['segm_file'] = ann['file_name'] - for seg_ann in ann['segments_info']: - # to match with instance.json - seg_ann['image_id'] = ann['image_id'] - seg_ann['height'] = img_info['height'] - seg_ann['width'] = img_info['width'] - img_to_anns[ann['image_id']].append(seg_ann) - # segment_id is not unique in coco dataset orz... - if seg_ann['id'] in anns.keys(): - anns[seg_ann['id']].append(seg_ann) - else: - anns[seg_ann['id']] = [seg_ann] - - if 'images' in self.dataset: - for img in self.dataset['images']: - imgs[img['id']] = img - - if 'categories' in self.dataset: - for cat in self.dataset['categories']: - cats[cat['id']] = cat - - if 'annotations' in self.dataset and 'categories' in self.dataset: - for ann in self.dataset['annotations']: - for seg_ann in ann['segments_info']: - cat_to_imgs[seg_ann['category_id']].append(ann['image_id']) - - print('index created!') - - self.anns = anns - self.imgToAnns = img_to_anns - self.catToImgs = cat_to_imgs - self.imgs = imgs - self.cats = cats - - def load_anns(self, ids=[]): - """Load anns with the specified ids. - - self.anns is a list of annotation lists instead of a - list of annotations. - - Args: - ids (int array): integer ids specifying anns - - Returns: - anns (object array): loaded ann objects - """ - anns = [] - - if hasattr(ids, '__iter__') and hasattr(ids, '__len__'): - # self.anns is a list of annotation lists instead of - # a list of annotations - for id in ids: - anns += self.anns[id] - return anns - elif type(ids) == int: - return self.anns[ids] - - -@DATASETS.register_module() -class CocoPanopticDataset(CocoDataset): - """Coco dataset for Panoptic segmentation. - - The annotation format is shown as follows. The `ann` field is optional - for testing. - - .. code-block:: none - - [ - { - 'filename': f'{image_id:012}.png', - 'image_id':9 - 'segments_info': { - [ - { - 'id': 8345037, (segment_id in panoptic png, - convert from rgb) - 'category_id': 51, - 'iscrowd': 0, - 'bbox': (x1, y1, w, h), - 'area': 24315, - 'segmentation': list,(coded mask) - }, - ... - } - } - }, - ... - ] - - Args: - ann_file (str): Panoptic segmentation annotation file path. - pipeline (list[dict]): Processing pipeline. - ins_ann_file (str): Instance segmentation annotation file path. - Defaults to None. - classes (str | Sequence[str], optional): Specify classes to load. - If is None, ``cls.CLASSES`` will be used. Defaults to None. - data_root (str, optional): Data root for ``ann_file``, - ``ins_ann_file`` ``img_prefix``, ``seg_prefix``, ``proposal_file`` - if specified. Defaults to None. - img_prefix (str, optional): Prefix of path to images. Defaults to ''. - seg_prefix (str, optional): Prefix of path to segmentation files. - Defaults to None. - proposal_file (str, optional): Path to proposal file. Defaults to None. - test_mode (bool, optional): If set True, annotation will not be loaded. - Defaults to False. - filter_empty_gt (bool, optional): If set true, images without bounding - boxes of the dataset's classes will be filtered out. This option - only works when `test_mode=False`, i.e., we never filter images - during tests. Defaults to True. - file_client_args (:obj:`mmcv.ConfigDict` | dict): file client args. - Defaults to dict(backend='disk'). - """ - CLASSES = [ - 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', - ' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', - 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', - 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', - 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', - 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', - 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', - 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', - 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', - 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', - 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', - 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', - 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', - 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', - 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', - 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', - 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', - 'wall-wood', 'water-other', 'window-blind', 'window-other', - 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', - 'cabinet-merged', 'table-merged', 'floor-other-merged', - 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', - 'paper-merged', 'food-other-merged', 'building-other-merged', - 'rock-merged', 'wall-other-merged', 'rug-merged' - ] - THING_CLASSES = [ - 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', - 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', - 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', - 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', - 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', - 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', - 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', - 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', - 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', - 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', - 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', - 'scissors', 'teddy bear', 'hair drier', 'toothbrush' - ] - STUFF_CLASSES = [ - 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', - 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', - 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', - 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', - 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', - 'wall-wood', 'water-other', 'window-blind', 'window-other', - 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', - 'cabinet-merged', 'table-merged', 'floor-other-merged', - 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', - 'paper-merged', 'food-other-merged', 'building-other-merged', - 'rock-merged', 'wall-other-merged', 'rug-merged' - ] - - PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), - (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), - (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), - (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), - (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), - (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), - (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), - (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), - (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), - (134, 134, 103), (145, 148, 174), (255, 208, 186), - (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), - (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), - (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149), - (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205), - (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0), - (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88), - (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118), - (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15), - (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0), - (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122), - (191, 162, 208), (255, 255, 128), (147, 211, 203), - (150, 100, 100), (168, 171, 172), (146, 112, 198), - (210, 170, 100), (92, 136, 89), (218, 88, 184), (241, 129, 0), - (217, 17, 255), (124, 74, 181), (70, 70, 70), (255, 228, 255), - (154, 208, 0), (193, 0, 92), (76, 91, 113), (255, 180, 195), - (106, 154, 176), - (230, 150, 140), (60, 143, 255), (128, 64, 128), (92, 82, 55), - (254, 212, 124), (73, 77, 174), (255, 160, 98), (255, 255, 255), - (104, 84, 109), (169, 164, 131), (225, 199, 255), (137, 54, 74), - (135, 158, 223), (7, 246, 231), (107, 255, 200), (58, 41, 149), - (183, 121, 142), (255, 73, 97), (107, 142, 35), (190, 153, 153), - (146, 139, 141), - (70, 130, 180), (134, 199, 156), (209, 226, 140), (96, 36, 108), - (96, 96, 96), (64, 170, 64), (152, 251, 152), (208, 229, 228), - (206, 186, 171), (152, 161, 64), (116, 112, 0), (0, 114, 143), - (102, 102, 156), (250, 141, 255)] - - def __init__(self, - ann_file, - pipeline, - ins_ann_file=None, - classes=None, - data_root=None, - img_prefix='', - seg_prefix=None, - proposal_file=None, - test_mode=False, - filter_empty_gt=True, - file_client_args=dict(backend='disk')): - super().__init__( - ann_file, - pipeline, - classes=classes, - data_root=data_root, - img_prefix=img_prefix, - seg_prefix=seg_prefix, - proposal_file=proposal_file, - test_mode=test_mode, - filter_empty_gt=filter_empty_gt, - file_client_args=file_client_args) - self.ins_ann_file = ins_ann_file - - def load_annotations(self, ann_file): - """Load annotation from COCO Panoptic style annotation file. - - Args: - ann_file (str): Path of annotation file. - - Returns: - list[dict]: Annotation info from COCO api. - """ - self.coco = COCOPanoptic(ann_file) - self.cat_ids = self.coco.get_cat_ids() - self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} - self.categories = self.coco.cats - self.img_ids = self.coco.get_img_ids() - data_infos = [] - for i in self.img_ids: - info = self.coco.load_imgs([i])[0] - info['filename'] = info['file_name'] - info['segm_file'] = info['filename'].replace('jpg', 'png') - data_infos.append(info) - return data_infos - - def get_ann_info(self, idx): - """Get COCO annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - img_id = self.data_infos[idx]['id'] - ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) - ann_info = self.coco.load_anns(ann_ids) - # filter out unmatched images - ann_info = [i for i in ann_info if i['image_id'] == img_id] - return self._parse_ann_info(self.data_infos[idx], ann_info) - - def _parse_ann_info(self, img_info, ann_info): - """Parse annotations and load panoptic ground truths. - - Args: - img_info (int): Image info of an image. - ann_info (list[dict]): Annotation info of an image. - - Returns: - dict: A dict containing the following keys: bboxes, bboxes_ignore, - labels, masks, seg_map. - """ - gt_bboxes = [] - gt_labels = [] - gt_bboxes_ignore = [] - gt_mask_infos = [] - - for i, ann in enumerate(ann_info): - x1, y1, w, h = ann['bbox'] - if ann['area'] <= 0 or w < 1 or h < 1: - continue - bbox = [x1, y1, x1 + w, y1 + h] - - category_id = ann['category_id'] - contiguous_cat_id = self.cat2label[category_id] - - is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] - if is_thing: - is_crowd = ann.get('iscrowd', False) - if not is_crowd: - gt_bboxes.append(bbox) - gt_labels.append(contiguous_cat_id) - else: - gt_bboxes_ignore.append(bbox) - is_thing = False - - mask_info = { - 'id': ann['id'], - 'category': contiguous_cat_id, - 'is_thing': is_thing - } - gt_mask_infos.append(mask_info) - - if gt_bboxes: - gt_bboxes = np.array(gt_bboxes, dtype=np.float32) - gt_labels = np.array(gt_labels, dtype=np.int64) - else: - gt_bboxes = np.zeros((0, 4), dtype=np.float32) - gt_labels = np.array([], dtype=np.int64) - - if gt_bboxes_ignore: - gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) - else: - gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) - - ann = dict( - bboxes=gt_bboxes, - labels=gt_labels, - bboxes_ignore=gt_bboxes_ignore, - masks=gt_mask_infos, - seg_map=img_info['segm_file']) - - return ann - - def _filter_imgs(self, min_size=32): - """Filter images too small or without ground truths.""" - ids_with_ann = [] - # check whether images have legal thing annotations. - for lists in self.coco.anns.values(): - for item in lists: - category_id = item['category_id'] - is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] - if not is_thing: - continue - ids_with_ann.append(item['image_id']) - ids_with_ann = set(ids_with_ann) - - valid_inds = [] - valid_img_ids = [] - for i, img_info in enumerate(self.data_infos): - img_id = self.img_ids[i] - if self.filter_empty_gt and img_id not in ids_with_ann: - continue - if min(img_info['width'], img_info['height']) >= min_size: - valid_inds.append(i) - valid_img_ids.append(img_id) - self.img_ids = valid_img_ids - return valid_inds - - def _pan2json(self, results, outfile_prefix): - """Convert panoptic results to COCO panoptic json style.""" - label2cat = dict((v, k) for (k, v) in self.cat2label.items()) - pred_annotations = [] - outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') - - for idx in range(len(self)): - img_id = self.img_ids[idx] - segm_file = self.data_infos[idx]['segm_file'] - pan = results[idx] - - pan_labels = np.unique(pan) - segm_info = [] - for pan_label in pan_labels: - sem_label = pan_label % INSTANCE_OFFSET - # We reserve the length of self.CLASSES for VOID label - if sem_label == len(self.CLASSES): - continue - # convert sem_label to json label - cat_id = label2cat[sem_label] - is_thing = self.categories[cat_id]['isthing'] - mask = pan == pan_label - area = mask.sum() - segm_info.append({ - 'id': int(pan_label), - 'category_id': cat_id, - 'isthing': is_thing, - 'area': int(area) - }) - # evaluation script uses 0 for VOID label. - pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID - pan = id2rgb(pan).astype(np.uint8) - mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file)) - record = { - 'image_id': img_id, - 'segments_info': segm_info, - 'file_name': segm_file - } - pred_annotations.append(record) - pan_json_results = dict(annotations=pred_annotations) - return pan_json_results - - def results2json(self, results, outfile_prefix): - """Dump the results to a COCO style json file. - - There are 4 types of results: proposals, bbox predictions, mask - predictions, panoptic segmentation predictions, and they have - different data types. This method will automatically recognize - the type, and dump them to json files. - - .. code-block:: none - - [ - { - 'pan_results': np.array, # shape (h, w) - # ins_results which includes bboxes and RLE encoded masks - # is optional. - 'ins_results': (list[np.array], list[list[str]]) - }, - ... - ] - - Args: - results (list[dict]): Testing results of the dataset. - outfile_prefix (str): The filename prefix of the json files. If the - prefix is "somepath/xxx", the json files will be named - "somepath/xxx.panoptic.json", "somepath/xxx.bbox.json", - "somepath/xxx.segm.json" - - Returns: - dict[str: str]: Possible keys are "panoptic", "bbox", "segm", \ - "proposal", and values are corresponding filenames. - """ - result_files = dict() - # panoptic segmentation results - if 'pan_results' in results[0]: - pan_results = [result['pan_results'] for result in results] - pan_json_results = self._pan2json(pan_results, outfile_prefix) - result_files['panoptic'] = f'{outfile_prefix}.panoptic.json' - mmcv.dump(pan_json_results, result_files['panoptic']) - - # instance segmentation results - if 'ins_results' in results[0]: - ins_results = [result['ins_results'] for result in results] - bbox_json_results, segm_json_results = self._segm2json(ins_results) - result_files['bbox'] = f'{outfile_prefix}.bbox.json' - result_files['proposal'] = f'{outfile_prefix}.bbox.json' - result_files['segm'] = f'{outfile_prefix}.segm.json' - mmcv.dump(bbox_json_results, result_files['bbox']) - mmcv.dump(segm_json_results, result_files['segm']) - - return result_files - - def evaluate_pan_json(self, - result_files, - outfile_prefix, - logger=None, - classwise=False, - nproc=32): - """Evaluate PQ according to the panoptic results json file.""" - imgs = self.coco.imgs - gt_json = self.coco.img_ann_map # image to annotations - gt_json = [{ - 'image_id': k, - 'segments_info': v, - 'file_name': imgs[k]['segm_file'] - } for k, v in gt_json.items()] - pred_json = mmcv.load(result_files['panoptic']) - pred_json = dict( - (el['image_id'], el) for el in pred_json['annotations']) - - # match the gt_anns and pred_anns in the same image - matched_annotations_list = [] - for gt_ann in gt_json: - img_id = gt_ann['image_id'] - if img_id not in pred_json.keys(): - raise Exception('no prediction for the image' - ' with id: {}'.format(img_id)) - matched_annotations_list.append((gt_ann, pred_json[img_id])) - - gt_folder = self.seg_prefix - pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') - - pq_stat = pq_compute_multi_core( - matched_annotations_list, - gt_folder, - pred_folder, - self.categories, - self.file_client, - nproc=nproc) - - metrics = [('All', None), ('Things', True), ('Stuff', False)] - pq_results = {} - - for name, isthing in metrics: - pq_results[name], classwise_results = pq_stat.pq_average( - self.categories, isthing=isthing) - if name == 'All': - pq_results['classwise'] = classwise_results - - classwise_results = None - if classwise: - classwise_results = { - k: v - for k, v in zip(self.CLASSES, pq_results['classwise'].values()) - } - print_panoptic_table(pq_results, classwise_results, logger=logger) - results = parse_pq_results(pq_results) - results['PQ_copypaste'] = ( - f'{results["PQ"]:.3f} {results["SQ"]:.3f} ' - f'{results["RQ"]:.3f} ' - f'{results["PQ_th"]:.3f} {results["SQ_th"]:.3f} ' - f'{results["RQ_th"]:.3f} ' - f'{results["PQ_st"]:.3f} {results["SQ_st"]:.3f} ' - f'{results["RQ_st"]:.3f}') - - return results - - def evaluate(self, - results, - metric='PQ', - logger=None, - jsonfile_prefix=None, - classwise=False, - nproc=32, - **kwargs): - """Evaluation in COCO Panoptic protocol. - - Args: - results (list[dict]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. 'PQ', 'bbox', - 'segm', 'proposal' are supported. 'pq' will be regarded as 'PQ. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - jsonfile_prefix (str | None): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Default: None. - classwise (bool): Whether to print classwise evaluation results. - Default: False. - nproc (int): Number of processes for panoptic quality computing. - Defaults to 32. When `nproc` exceeds the number of cpu cores, - the number of cpu cores is used. - - Returns: - dict[str, float]: COCO Panoptic style evaluation metric. - """ - metrics = metric if isinstance(metric, list) else [metric] - # Compatible with lowercase 'pq' - metrics = ['PQ' if metric == 'pq' else metric for metric in metrics] - allowed_metrics = ['PQ', 'bbox', 'segm', 'proposal'] - for metric in metrics: - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - - result_files, tmp_dir = self.format_results(results, jsonfile_prefix) - eval_results = {} - - outfile_prefix = os.path.join(tmp_dir.name, 'results') \ - if tmp_dir is not None else jsonfile_prefix - if 'PQ' in metrics: - eval_pan_results = self.evaluate_pan_json( - result_files, outfile_prefix, logger, classwise, nproc=nproc) - - eval_results.update(eval_pan_results) - metrics.remove('PQ') - - if (('bbox' in metrics) or ('segm' in metrics) - or ('proposal' in metrics)): - - assert 'ins_results' in results[0], 'instance segmentation' \ - 'results are absent from results' - - assert self.ins_ann_file is not None, 'Annotation '\ - 'file for instance segmentation or object detection ' \ - 'shuold not be None' - - coco_gt = COCO(self.ins_ann_file) - panoptic_cat_ids = self.cat_ids - self.cat_ids = coco_gt.get_cat_ids(cat_names=self.THING_CLASSES) - - eval_ins_results = self.evaluate_det_segm(results, result_files, - coco_gt, metrics, logger, - classwise, **kwargs) - self.cat_ids = panoptic_cat_ids - eval_results.update(eval_ins_results) - - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results - - -def parse_pq_results(pq_results): - """Parse the Panoptic Quality results.""" - result = dict() - result['PQ'] = 100 * pq_results['All']['pq'] - result['SQ'] = 100 * pq_results['All']['sq'] - result['RQ'] = 100 * pq_results['All']['rq'] - result['PQ_th'] = 100 * pq_results['Things']['pq'] - result['SQ_th'] = 100 * pq_results['Things']['sq'] - result['RQ_th'] = 100 * pq_results['Things']['rq'] - result['PQ_st'] = 100 * pq_results['Stuff']['pq'] - result['SQ_st'] = 100 * pq_results['Stuff']['sq'] - result['RQ_st'] = 100 * pq_results['Stuff']['rq'] - return result - - -def print_panoptic_table(pq_results, classwise_results=None, logger=None): - """Print the panoptic evaluation results table. - - Args: - pq_results(dict): The Panoptic Quality results. - classwise_results(dict | None): The classwise Panoptic Quality results. - The keys are class names and the values are metrics. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - """ - - headers = ['', 'PQ', 'SQ', 'RQ', 'categories'] - data = [headers] - for name in ['All', 'Things', 'Stuff']: - numbers = [ - f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq'] - ] - row = [name] + numbers + [pq_results[name]['n']] - data.append(row) - table = AsciiTable(data) - print_log('Panoptic Evaluation Results:\n' + table.table, logger=logger) - - if classwise_results is not None: - class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}' - for k in ['pq', 'sq', 'rq']) - for name, metrics in classwise_results.items()] - num_columns = min(8, len(class_metrics) * 4) - results_flatten = list(itertools.chain(*class_metrics)) - headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4) - results_2d = itertools.zip_longest( - *[results_flatten[i::num_columns] for i in range(num_columns)]) - data = [headers] - data += [result for result in results_2d] - table = AsciiTable(data) - print_log( - 'Classwise Panoptic Evaluation Results:\n' + table.table, - logger=logger) diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/custom.py b/cv/detection/co-detr/pytorch/mmdet/datasets/custom.py deleted file mode 100644 index 3b97685bff49b5c4291a4ebe459e829cce2e54d0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/custom.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import warnings -from collections import OrderedDict - -import mmcv -import numpy as np -from mmcv.utils import print_log -from terminaltables import AsciiTable -from torch.utils.data import Dataset - -from mmdet.core import eval_map, eval_recalls -from .builder import DATASETS -from .pipelines import Compose - - -@DATASETS.register_module() -class CustomDataset(Dataset): - """Custom dataset for detection. - - The annotation format is shown as follows. The `ann` field is optional for - testing. - - .. code-block:: none - - [ - { - 'filename': 'a.jpg', - 'width': 1280, - 'height': 720, - 'ann': { - 'bboxes': (n, 4) in (x1, y1, x2, y2) order. - 'labels': (n, ), - 'bboxes_ignore': (k, 4), (optional field) - 'labels_ignore': (k, 4) (optional field) - } - }, - ... - ] - - Args: - ann_file (str): Annotation file path. - pipeline (list[dict]): Processing pipeline. - classes (str | Sequence[str], optional): Specify classes to load. - If is None, ``cls.CLASSES`` will be used. Default: None. - data_root (str, optional): Data root for ``ann_file``, - ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. - test_mode (bool, optional): If set True, annotation will not be loaded. - filter_empty_gt (bool, optional): If set true, images without bounding - boxes of the dataset's classes will be filtered out. This option - only works when `test_mode=False`, i.e., we never filter images - during tests. - """ - - CLASSES = None - - PALETTE = None - - def __init__(self, - ann_file, - pipeline, - classes=None, - data_root=None, - img_prefix='', - seg_prefix=None, - seg_suffix='.png', - proposal_file=None, - test_mode=False, - filter_empty_gt=True, - file_client_args=dict(backend='disk')): - self.ann_file = ann_file - self.data_root = data_root - self.img_prefix = img_prefix - self.seg_prefix = seg_prefix - self.seg_suffix = seg_suffix - self.proposal_file = proposal_file - self.test_mode = test_mode - self.filter_empty_gt = filter_empty_gt - self.file_client = mmcv.FileClient(**file_client_args) - self.CLASSES = self.get_classes(classes) - - # join paths if data_root is specified - if self.data_root is not None: - if not osp.isabs(self.ann_file): - self.ann_file = osp.join(self.data_root, self.ann_file) - if not (self.img_prefix is None or osp.isabs(self.img_prefix)): - self.img_prefix = osp.join(self.data_root, self.img_prefix) - if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): - self.seg_prefix = osp.join(self.data_root, self.seg_prefix) - if not (self.proposal_file is None - or osp.isabs(self.proposal_file)): - self.proposal_file = osp.join(self.data_root, - self.proposal_file) - # load annotations (and proposals) - if hasattr(self.file_client, 'get_local_path'): - with self.file_client.get_local_path(self.ann_file) as local_path: - self.data_infos = self.load_annotations(local_path) - else: - warnings.warn( - 'The used MMCV version does not have get_local_path. ' - f'We treat the {self.ann_file} as local paths and it ' - 'might cause errors if the path is not a local path. ' - 'Please use MMCV>= 1.3.16 if you meet errors.') - self.data_infos = self.load_annotations(self.ann_file) - - if self.proposal_file is not None: - if hasattr(self.file_client, 'get_local_path'): - with self.file_client.get_local_path( - self.proposal_file) as local_path: - self.proposals = self.load_proposals(local_path) - else: - warnings.warn( - 'The used MMCV version does not have get_local_path. ' - f'We treat the {self.ann_file} as local paths and it ' - 'might cause errors if the path is not a local path. ' - 'Please use MMCV>= 1.3.16 if you meet errors.') - self.proposals = self.load_proposals(self.proposal_file) - else: - self.proposals = None - - # filter images too small and containing no annotations - if not test_mode: - valid_inds = self._filter_imgs() - self.data_infos = [self.data_infos[i] for i in valid_inds] - if self.proposals is not None: - self.proposals = [self.proposals[i] for i in valid_inds] - # set group flag for the sampler - self._set_group_flag() - - # processing pipeline - self.pipeline = Compose(pipeline) - - def __len__(self): - """Total number of samples of data.""" - return len(self.data_infos) - - def load_annotations(self, ann_file): - """Load annotation from annotation file.""" - return mmcv.load(ann_file) - - def load_proposals(self, proposal_file): - """Load proposal from proposal file.""" - return mmcv.load(proposal_file) - - def get_ann_info(self, idx): - """Get annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - return self.data_infos[idx]['ann'] - - def get_cat_ids(self, idx): - """Get category ids by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() - - def pre_pipeline(self, results): - """Prepare results dict for pipeline.""" - results['img_prefix'] = self.img_prefix - results['seg_prefix'] = self.seg_prefix - results['proposal_file'] = self.proposal_file - results['bbox_fields'] = [] - results['mask_fields'] = [] - results['seg_fields'] = [] - - def _filter_imgs(self, min_size=32): - """Filter images too small.""" - if self.filter_empty_gt: - warnings.warn( - 'CustomDataset does not support filtering empty gt images.') - valid_inds = [] - for i, img_info in enumerate(self.data_infos): - if min(img_info['width'], img_info['height']) >= min_size: - valid_inds.append(i) - return valid_inds - - def _set_group_flag(self): - """Set flag according to image aspect ratio. - - Images with aspect ratio greater than 1 will be set as group 1, - otherwise group 0. - """ - self.flag = np.zeros(len(self), dtype=np.uint8) - for i in range(len(self)): - img_info = self.data_infos[i] - if img_info['width'] / img_info['height'] > 1: - self.flag[i] = 1 - - def _rand_another(self, idx): - """Get another random index from the same group as the given index.""" - pool = np.where(self.flag == self.flag[idx])[0] - return np.random.choice(pool) - - def __getitem__(self, idx): - """Get training/test data after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Training/test data (with annotation if `test_mode` is set \ - True). - """ - - if self.test_mode: - return self.prepare_test_img(idx) - while True: - data = self.prepare_train_img(idx) - if data is None: - idx = self._rand_another(idx) - continue - return data - - def prepare_train_img(self, idx): - """Get training data and annotations after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Training data and annotation after pipeline with new keys \ - introduced by pipeline. - """ - - img_info = self.data_infos[idx] - ann_info = self.get_ann_info(idx) - results = dict(img_info=img_info, ann_info=ann_info) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - return self.pipeline(results) - - def prepare_test_img(self, idx): - """Get testing data after pipeline. - - Args: - idx (int): Index of data. - - Returns: - dict: Testing data after pipeline with new keys introduced by \ - pipeline. - """ - - img_info = self.data_infos[idx] - results = dict(img_info=img_info) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - return self.pipeline(results) - - @classmethod - def get_classes(cls, classes=None): - """Get class names of current dataset. - - Args: - classes (Sequence[str] | str | None): If classes is None, use - default CLASSES defined by builtin dataset. If classes is a - string, take it as a file name. The file contains the name of - classes where each line contains one class name. If classes is - a tuple or list, override the CLASSES defined by the dataset. - - Returns: - tuple[str] or list[str]: Names of categories of the dataset. - """ - if classes is None: - return cls.CLASSES - - if isinstance(classes, str): - # take it as a file path - class_names = mmcv.list_from_file(classes) - elif isinstance(classes, (tuple, list)): - class_names = classes - else: - raise ValueError(f'Unsupported type {type(classes)} of classes.') - - return class_names - - def get_cat2imgs(self): - """Get a dict with class as key and img_ids as values, which will be - used in :class:`ClassAwareSampler`. - - Returns: - dict[list]: A dict of per-label image list, - the item of the dict indicates a label index, - corresponds to the image index that contains the label. - """ - if self.CLASSES is None: - raise ValueError('self.CLASSES can not be None') - # sort the label index - cat2imgs = {i: [] for i in range(len(self.CLASSES))} - for i in range(len(self)): - cat_ids = set(self.get_cat_ids(i)) - for cat in cat_ids: - cat2imgs[cat].append(i) - return cat2imgs - - def format_results(self, results, **kwargs): - """Place holder to format result to dataset specific output.""" - - def evaluate(self, - results, - metric='mAP', - logger=None, - proposal_nums=(100, 300, 1000), - iou_thr=0.5, - scale_ranges=None): - """Evaluate the dataset. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | None | str): Logger used for printing - related information during evaluation. Default: None. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thr (float | list[float]): IoU threshold. Default: 0.5. - scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. - Default: None. - """ - - if not isinstance(metric, str): - assert len(metric) == 1 - metric = metric[0] - allowed_metrics = ['mAP', 'recall'] - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - annotations = [self.get_ann_info(i) for i in range(len(self))] - eval_results = OrderedDict() - iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr - if metric == 'mAP': - assert isinstance(iou_thrs, list) - mean_aps = [] - for iou_thr in iou_thrs: - print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') - mean_ap, _ = eval_map( - results, - annotations, - scale_ranges=scale_ranges, - iou_thr=iou_thr, - dataset=self.CLASSES, - logger=logger) - mean_aps.append(mean_ap) - eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) - eval_results['mAP'] = sum(mean_aps) / len(mean_aps) - elif metric == 'recall': - gt_bboxes = [ann['bboxes'] for ann in annotations] - recalls = eval_recalls( - gt_bboxes, results, proposal_nums, iou_thr, logger=logger) - for i, num in enumerate(proposal_nums): - for j, iou in enumerate(iou_thrs): - eval_results[f'recall@{num}@{iou}'] = recalls[i, j] - if recalls.shape[1] > 1: - ar = recalls.mean(axis=1) - for i, num in enumerate(proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - return eval_results - - def __repr__(self): - """Print the number of instance number.""" - dataset_type = 'Test' if self.test_mode else 'Train' - result = (f'\n{self.__class__.__name__} {dataset_type} dataset ' - f'with number of images {len(self)}, ' - f'and instance counts: \n') - if self.CLASSES is None: - result += 'Category names are not provided. \n' - return result - instance_count = np.zeros(len(self.CLASSES) + 1).astype(int) - # count the instance number in each image - for idx in range(len(self)): - label = self.get_ann_info(idx)['labels'] - unique, counts = np.unique(label, return_counts=True) - if len(unique) > 0: - # add the occurrence number to each class - instance_count[unique] += counts - else: - # background is the last index - instance_count[-1] += 1 - # create a table with category count - table_data = [['category', 'count'] * 5] - row_data = [] - for cls, count in enumerate(instance_count): - if cls < len(self.CLASSES): - row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}'] - else: - # add the background number - row_data += ['-1 background', f'{count}'] - if len(row_data) == 10: - table_data.append(row_data) - row_data = [] - if len(row_data) >= 2: - if row_data[-1] == '0': - row_data = row_data[:-2] - if len(row_data) >= 2: - table_data.append([]) - table_data.append(row_data) - - table = AsciiTable(table_data) - result += table.table - return result diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/dataset_wrappers.py b/cv/detection/co-detr/pytorch/mmdet/datasets/dataset_wrappers.py deleted file mode 100644 index d6ceffb8105dd05586a1a88c49de71777159f1f1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/dataset_wrappers.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import bisect -import collections -import copy -import math -from collections import defaultdict - -import numpy as np -from mmcv.utils import build_from_cfg, print_log -from torch.utils.data.dataset import ConcatDataset as _ConcatDataset - -from .builder import DATASETS, PIPELINES -from .coco import CocoDataset - - -@DATASETS.register_module() -class ConcatDataset(_ConcatDataset): - """A wrapper of concatenated dataset. - - Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but - concat the group flag for image aspect ratio. - - Args: - datasets (list[:obj:`Dataset`]): A list of datasets. - separate_eval (bool): Whether to evaluate the results - separately if it is used as validation dataset. - Defaults to True. - """ - - def __init__(self, datasets, separate_eval=True): - super(ConcatDataset, self).__init__(datasets) - self.CLASSES = datasets[0].CLASSES - self.PALETTE = getattr(datasets[0], 'PALETTE', None) - self.separate_eval = separate_eval - if not separate_eval: - if any([isinstance(ds, CocoDataset) for ds in datasets]): - raise NotImplementedError( - 'Evaluating concatenated CocoDataset as a whole is not' - ' supported! Please set "separate_eval=True"') - elif len(set([type(ds) for ds in datasets])) != 1: - raise NotImplementedError( - 'All the datasets should have same types') - - if hasattr(datasets[0], 'flag'): - flags = [] - for i in range(0, len(datasets)): - flags.append(datasets[i].flag) - self.flag = np.concatenate(flags) - - def get_cat_ids(self, idx): - """Get category ids of concatenated dataset by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - if idx < 0: - if -idx > len(self): - raise ValueError( - 'absolute value of index should not exceed dataset length') - idx = len(self) + idx - dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) - if dataset_idx == 0: - sample_idx = idx - else: - sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] - return self.datasets[dataset_idx].get_cat_ids(sample_idx) - - def get_ann_info(self, idx): - """Get annotation of concatenated dataset by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - if idx < 0: - if -idx > len(self): - raise ValueError( - 'absolute value of index should not exceed dataset length') - idx = len(self) + idx - dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) - if dataset_idx == 0: - sample_idx = idx - else: - sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] - return self.datasets[dataset_idx].get_ann_info(sample_idx) - - def evaluate(self, results, logger=None, **kwargs): - """Evaluate the results. - - Args: - results (list[list | tuple]): Testing results of the dataset. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - - Returns: - dict[str: float]: AP results of the total dataset or each separate - dataset if `self.separate_eval=True`. - """ - assert len(results) == self.cumulative_sizes[-1], \ - ('Dataset and results have different sizes: ' - f'{self.cumulative_sizes[-1]} v.s. {len(results)}') - - # Check whether all the datasets support evaluation - for dataset in self.datasets: - assert hasattr(dataset, 'evaluate'), \ - f'{type(dataset)} does not implement evaluate function' - - if self.separate_eval: - dataset_idx = -1 - total_eval_results = dict() - for size, dataset in zip(self.cumulative_sizes, self.datasets): - start_idx = 0 if dataset_idx == -1 else \ - self.cumulative_sizes[dataset_idx] - end_idx = self.cumulative_sizes[dataset_idx + 1] - - results_per_dataset = results[start_idx:end_idx] - print_log( - f'\nEvaluating {dataset.ann_file} with ' - f'{len(results_per_dataset)} images now', - logger=logger) - - eval_results_per_dataset = dataset.evaluate( - results_per_dataset, logger=logger, **kwargs) - dataset_idx += 1 - for k, v in eval_results_per_dataset.items(): - total_eval_results.update({f'{dataset_idx}_{k}': v}) - - return total_eval_results - elif any([isinstance(ds, CocoDataset) for ds in self.datasets]): - raise NotImplementedError( - 'Evaluating concatenated CocoDataset as a whole is not' - ' supported! Please set "separate_eval=True"') - elif len(set([type(ds) for ds in self.datasets])) != 1: - raise NotImplementedError( - 'All the datasets should have same types') - else: - original_data_infos = self.datasets[0].data_infos - self.datasets[0].data_infos = sum( - [dataset.data_infos for dataset in self.datasets], []) - eval_results = self.datasets[0].evaluate( - results, logger=logger, **kwargs) - self.datasets[0].data_infos = original_data_infos - return eval_results - - -@DATASETS.register_module() -class RepeatDataset: - """A wrapper of repeated dataset. - - The length of repeated dataset will be `times` larger than the original - dataset. This is useful when the data loading time is long but the dataset - is small. Using RepeatDataset can reduce the data loading time between - epochs. - - Args: - dataset (:obj:`Dataset`): The dataset to be repeated. - times (int): Repeat times. - """ - - def __init__(self, dataset, times): - self.dataset = dataset - self.times = times - self.CLASSES = dataset.CLASSES - self.PALETTE = getattr(dataset, 'PALETTE', None) - if hasattr(self.dataset, 'flag'): - self.flag = np.tile(self.dataset.flag, times) - - self._ori_len = len(self.dataset) - - def __getitem__(self, idx): - return self.dataset[idx % self._ori_len] - - def get_cat_ids(self, idx): - """Get category ids of repeat dataset by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - return self.dataset.get_cat_ids(idx % self._ori_len) - - def get_ann_info(self, idx): - """Get annotation of repeat dataset by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - return self.dataset.get_ann_info(idx % self._ori_len) - - def __len__(self): - """Length after repetition.""" - return self.times * self._ori_len - - -# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa -@DATASETS.register_module() -class ClassBalancedDataset: - """A wrapper of repeated dataset with repeat factor. - - Suitable for training on class imbalanced datasets like LVIS. Following - the sampling strategy in the `paper `_, - in each epoch, an image may appear multiple times based on its - "repeat factor". - The repeat factor for an image is a function of the frequency the rarest - category labeled in that image. The "frequency of category c" in [0, 1] - is defined by the fraction of images in the training set (without repeats) - in which category c appears. - The dataset needs to instantiate :func:`self.get_cat_ids` to support - ClassBalancedDataset. - - The repeat factor is computed as followed. - - 1. For each category c, compute the fraction # of images - that contain it: :math:`f(c)` - 2. For each category c, compute the category-level repeat factor: - :math:`r(c) = max(1, sqrt(t/f(c)))` - 3. For each image I, compute the image-level repeat factor: - :math:`r(I) = max_{c in I} r(c)` - - Args: - dataset (:obj:`CustomDataset`): The dataset to be repeated. - oversample_thr (float): frequency threshold below which data is - repeated. For categories with ``f_c >= oversample_thr``, there is - no oversampling. For categories with ``f_c < oversample_thr``, the - degree of oversampling following the square-root inverse frequency - heuristic above. - filter_empty_gt (bool, optional): If set true, images without bounding - boxes will not be oversampled. Otherwise, they will be categorized - as the pure background class and involved into the oversampling. - Default: True. - """ - - def __init__(self, dataset, oversample_thr, filter_empty_gt=True): - self.dataset = dataset - self.oversample_thr = oversample_thr - self.filter_empty_gt = filter_empty_gt - self.CLASSES = dataset.CLASSES - self.PALETTE = getattr(dataset, 'PALETTE', None) - - repeat_factors = self._get_repeat_factors(dataset, oversample_thr) - repeat_indices = [] - for dataset_idx, repeat_factor in enumerate(repeat_factors): - repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor)) - self.repeat_indices = repeat_indices - - flags = [] - if hasattr(self.dataset, 'flag'): - for flag, repeat_factor in zip(self.dataset.flag, repeat_factors): - flags.extend([flag] * int(math.ceil(repeat_factor))) - assert len(flags) == len(repeat_indices) - self.flag = np.asarray(flags, dtype=np.uint8) - - def _get_repeat_factors(self, dataset, repeat_thr): - """Get repeat factor for each images in the dataset. - - Args: - dataset (:obj:`CustomDataset`): The dataset - repeat_thr (float): The threshold of frequency. If an image - contains the categories whose frequency below the threshold, - it would be repeated. - - Returns: - list[float]: The repeat factors for each images in the dataset. - """ - - # 1. For each category c, compute the fraction # of images - # that contain it: f(c) - category_freq = defaultdict(int) - num_images = len(dataset) - for idx in range(num_images): - cat_ids = set(self.dataset.get_cat_ids(idx)) - if len(cat_ids) == 0 and not self.filter_empty_gt: - cat_ids = set([len(self.CLASSES)]) - for cat_id in cat_ids: - category_freq[cat_id] += 1 - for k, v in category_freq.items(): - category_freq[k] = v / num_images - - # 2. For each category c, compute the category-level repeat factor: - # r(c) = max(1, sqrt(t/f(c))) - category_repeat = { - cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) - for cat_id, cat_freq in category_freq.items() - } - - # 3. For each image I, compute the image-level repeat factor: - # r(I) = max_{c in I} r(c) - repeat_factors = [] - for idx in range(num_images): - cat_ids = set(self.dataset.get_cat_ids(idx)) - if len(cat_ids) == 0 and not self.filter_empty_gt: - cat_ids = set([len(self.CLASSES)]) - repeat_factor = 1 - if len(cat_ids) > 0: - repeat_factor = max( - {category_repeat[cat_id] - for cat_id in cat_ids}) - repeat_factors.append(repeat_factor) - - return repeat_factors - - def __getitem__(self, idx): - ori_index = self.repeat_indices[idx] - return self.dataset[ori_index] - - def get_ann_info(self, idx): - """Get annotation of dataset by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - ori_index = self.repeat_indices[idx] - return self.dataset.get_ann_info(ori_index) - - def __len__(self): - """Length after repetition.""" - return len(self.repeat_indices) - - -@DATASETS.register_module() -class MultiImageMixDataset: - """A wrapper of multiple images mixed dataset. - - Suitable for training on multiple images mixed data augmentation like - mosaic and mixup. For the augmentation pipeline of mixed image data, - the `get_indexes` method needs to be provided to obtain the image - indexes, and you can set `skip_flags` to change the pipeline running - process. At the same time, we provide the `dynamic_scale` parameter - to dynamically change the output image size. - - Args: - dataset (:obj:`CustomDataset`): The dataset to be mixed. - pipeline (Sequence[dict]): Sequence of transform object or - config dict to be composed. - dynamic_scale (tuple[int], optional): The image scale can be changed - dynamically. Default to None. It is deprecated. - skip_type_keys (list[str], optional): Sequence of type string to - be skip pipeline. Default to None. - max_refetch (int): The maximum number of retry iterations for getting - valid results from the pipeline. If the number of iterations is - greater than `max_refetch`, but results is still None, then the - iteration is terminated and raise the error. Default: 15. - """ - - def __init__(self, - dataset, - pipeline, - dynamic_scale=None, - skip_type_keys=None, - max_refetch=15): - if dynamic_scale is not None: - raise RuntimeError( - 'dynamic_scale is deprecated. Please use Resize pipeline ' - 'to achieve similar functions') - assert isinstance(pipeline, collections.abc.Sequence) - if skip_type_keys is not None: - assert all([ - isinstance(skip_type_key, str) - for skip_type_key in skip_type_keys - ]) - self._skip_type_keys = skip_type_keys - - self.pipeline = [] - self.pipeline_types = [] - for transform in pipeline: - if isinstance(transform, dict): - self.pipeline_types.append(transform['type']) - transform = build_from_cfg(transform, PIPELINES) - self.pipeline.append(transform) - else: - raise TypeError('pipeline must be a dict') - - self.dataset = dataset - self.CLASSES = dataset.CLASSES - self.PALETTE = getattr(dataset, 'PALETTE', None) - if hasattr(self.dataset, 'flag'): - self.flag = dataset.flag - self.num_samples = len(dataset) - self.max_refetch = max_refetch - - def __len__(self): - return self.num_samples - - def __getitem__(self, idx): - results = copy.deepcopy(self.dataset[idx]) - for (transform, transform_type) in zip(self.pipeline, - self.pipeline_types): - if self._skip_type_keys is not None and \ - transform_type in self._skip_type_keys: - continue - - if hasattr(transform, 'get_indexes'): - for i in range(self.max_refetch): - # Make sure the results passed the loading pipeline - # of the original dataset is not None. - indexes = transform.get_indexes(self.dataset) - if not isinstance(indexes, collections.abc.Sequence): - indexes = [indexes] - mix_results = [ - copy.deepcopy(self.dataset[index]) for index in indexes - ] - if None not in mix_results: - results['mix_results'] = mix_results - break - else: - raise RuntimeError( - 'The loading pipeline of the original dataset' - ' always return None. Please check the correctness ' - 'of the dataset and its pipeline.') - - for i in range(self.max_refetch): - # To confirm the results passed the training pipeline - # of the wrapper is not None. - updated_results = transform(copy.deepcopy(results)) - if updated_results is not None: - results = updated_results - break - else: - raise RuntimeError( - 'The training pipeline of the dataset wrapper' - ' always return None.Please check the correctness ' - 'of the dataset and its pipeline.') - - if 'mix_results' in results: - results.pop('mix_results') - - return results - - def update_skip_type_keys(self, skip_type_keys): - """Update skip_type_keys. It is called by an external hook. - - Args: - skip_type_keys (list[str], optional): Sequence of type - string to be skip pipeline. - """ - assert all([ - isinstance(skip_type_key, str) for skip_type_key in skip_type_keys - ]) - self._skip_type_keys = skip_type_keys diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/deepfashion.py b/cv/detection/co-detr/pytorch/mmdet/datasets/deepfashion.py deleted file mode 100644 index 609f80913b4ac63a80359dc25fdd49293a29aa7e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/deepfashion.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import DATASETS -from .coco import CocoDataset - - -@DATASETS.register_module() -class DeepFashionDataset(CocoDataset): - - CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', - 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', - 'skin', 'face') - - PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), - (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96), - (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192), - (128, 0, 96), (128, 0, 192), (0, 32, 192)] diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/lvis.py b/cv/detection/co-detr/pytorch/mmdet/datasets/lvis.py deleted file mode 100644 index a7325db32fac59b93bb633d5fe51f13db3c4566d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/lvis.py +++ /dev/null @@ -1,743 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import itertools -import logging -import os.path as osp -import tempfile -import warnings -from collections import OrderedDict - -import numpy as np -from mmcv.utils import print_log -from terminaltables import AsciiTable - -from .builder import DATASETS -from .coco import CocoDataset - - -@DATASETS.register_module() -class LVISV05Dataset(CocoDataset): - - CLASSES = ( - 'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', - 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', - 'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron', - 'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke', - 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award', - 'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack', - 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball', - 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage', - 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel', - 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat', - 'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop', - 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel', - 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead', - 'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed', - 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can', - 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench', - 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars', - 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse', - 'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag', - 'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp', - 'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin', - 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', - 'book', 'book_bag', 'bookcase', 'booklet', 'bookmark', - 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet', - 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', - 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin', - 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', - 'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase', - 'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie', - 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull', - 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board', - 'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed', - 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife', - 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', - 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', - 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', - 'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder', - 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon', - 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap', - 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)', - 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan', - 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag', - 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast', - 'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player', - 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue', - 'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard', - 'cherry', 'chessboard', 'chest_of_drawers_(furniture)', - 'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua', - 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)', - 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk', - 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick', - 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette', - 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent', - 'clementine', 'clip', 'clipboard', 'clock', 'clock_tower', - 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat', - 'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter', - 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin', - 'colander', 'coleslaw', 'coloring_material', 'combination_lock', - 'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer', - 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie', - 'cookie_jar', 'cooking_utensil', 'cooler_(for_food)', - 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn', - 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset', - 'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell', - 'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon', - 'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot', - 'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship', - 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube', - 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler', - 'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool', - 'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard', - 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', - 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', - 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', - 'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog', - 'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask', - 'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', - 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', - 'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper', - 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling', - 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', - 'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel', - 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', - 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', - 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', - 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', - 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', - 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', - 'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat', - 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash', - 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)', - 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair', - 'food_processor', 'football_(American)', 'football_helmet', - 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast', - 'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad', - 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', - 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', - 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda', - 'gift_wrap', 'ginger', 'giraffe', 'cincture', - 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', - 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', - 'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater', - 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle', - 'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag', - 'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush', - 'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock', - 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', - 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', - 'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil', - 'headband', 'headboard', 'headlight', 'headscarf', 'headset', - 'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater', - 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus', - 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood', - 'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', - 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', - 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', - 'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod', - 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean', - 'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick', - 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard', - 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', - 'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)', - 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat', - 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp', - 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer', - 'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)', - 'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy', - 'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine', - 'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard', - 'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion', - 'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine', - 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth', - 'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini', - 'mascot', 'mashed_potato', 'masher', 'mask', 'mast', - 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup', - 'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone', - 'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan', - 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money', - 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', - 'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle', - 'mound_(baseball)', 'mouse_(animal_rodent)', - 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', - 'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin', - 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand', - 'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)', - 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)', - 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion', - 'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman', - 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle', - 'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette', - 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose', - 'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book', - 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', - 'parchment', 'parka', 'parking_meter', 'parrot', - 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', - 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', - 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard', - 'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener', - 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper', - 'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood', - 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', - 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', - 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', - 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', - 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', - 'plate', 'platter', 'playing_card', 'playpen', 'pliers', - 'plow_(farm_equipment)', 'pocket_watch', 'pocketknife', - 'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt', - 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait', - 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', - 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer', - 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding', - 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet', - 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car', - 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft', - 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', - 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', - 'recliner', 'record_player', 'red_cabbage', 'reflector', - 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring', - 'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate', - 'Rollerblade', 'rolling_pin', 'root_beer', - 'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)', - 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', - 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami', - 'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker', - 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer', - 'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)', - 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard', - 'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver', - 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', - 'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker', - 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)', - 'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog', - 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart', - 'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head', - 'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo', - 'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', - 'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)', - 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', - 'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain', - 'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero', - 'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk', - 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear', - 'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear', - 'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish', - 'statue_(sculpture)', 'steak_(food)', 'steak_knife', - 'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil', - 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', - 'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light', - 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry', - 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer', - 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', - 'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop', - 'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', - 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', - 'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', - 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)', - 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', - 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', - 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', - 'telephone_pole', 'telephoto_lens', 'television_camera', - 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', - 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', - 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', - 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', - 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', - 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', - 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', - 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', - 'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)', - 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', - 'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip', - 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella', - 'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve', - 'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin', - 'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon', - 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet', - 'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch', - 'water_bottle', 'water_cooler', 'water_faucet', 'water_filter', - 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski', - 'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam', - 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', - 'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime', - 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock', - 'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair', - 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath', - 'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt', - 'yoke_(animal_equipment)', 'zebra', 'zucchini') - - PALETTE = None - - def load_annotations(self, ann_file): - """Load annotation from lvis style annotation file. - - Args: - ann_file (str): Path of annotation file. - - Returns: - list[dict]: Annotation info from LVIS api. - """ - - try: - import lvis - if getattr(lvis, '__version__', '0') >= '10.5.3': - warnings.warn( - 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 - UserWarning) - from lvis import LVIS - except ImportError: - raise ImportError( - 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 - ) - self.coco = LVIS(ann_file) - self.cat_ids = self.coco.get_cat_ids() - self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} - self.img_ids = self.coco.get_img_ids() - data_infos = [] - for i in self.img_ids: - info = self.coco.load_imgs([i])[0] - if info['file_name'].startswith('COCO'): - # Convert form the COCO 2014 file naming convention of - # COCO_[train/val/test]2014_000000000000.jpg to the 2017 - # naming convention of 000000000000.jpg - # (LVIS v1 will fix this naming issue) - info['filename'] = info['file_name'][-16:] - else: - info['filename'] = info['file_name'] - data_infos.append(info) - return data_infos - - def evaluate(self, - results, - metric='bbox', - logger=None, - jsonfile_prefix=None, - classwise=False, - proposal_nums=(100, 300, 1000), - iou_thrs=np.arange(0.5, 0.96, 0.05)): - """Evaluation in LVIS protocol. - - Args: - results (list[list | tuple]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. Options are - 'bbox', 'segm', 'proposal', 'proposal_fast'. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - jsonfile_prefix (str | None): - classwise (bool): Whether to evaluating the AP for each class. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thrs (Sequence[float]): IoU threshold used for evaluating - recalls. If set to a list, the average recall of all IoUs will - also be computed. Default: 0.5. - - Returns: - dict[str, float]: LVIS style metrics. - """ - - try: - import lvis - if getattr(lvis, '__version__', '0') >= '10.5.3': - warnings.warn( - 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 - UserWarning) - from lvis import LVISEval, LVISResults - except ImportError: - raise ImportError( - 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 - ) - assert isinstance(results, list), 'results must be a list' - assert len(results) == len(self), ( - 'The length of results is not equal to the dataset len: {} != {}'. - format(len(results), len(self))) - - metrics = metric if isinstance(metric, list) else [metric] - allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] - for metric in metrics: - if metric not in allowed_metrics: - raise KeyError('metric {} is not supported'.format(metric)) - - if jsonfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - jsonfile_prefix = osp.join(tmp_dir.name, 'results') - else: - tmp_dir = None - result_files = self.results2json(results, jsonfile_prefix) - - eval_results = OrderedDict() - # get original api - lvis_gt = self.coco - for metric in metrics: - msg = 'Evaluating {}...'.format(metric) - if logger is None: - msg = '\n' + msg - print_log(msg, logger=logger) - - if metric == 'proposal_fast': - ar = self.fast_eval_recall( - results, proposal_nums, iou_thrs, logger='silent') - log_msg = [] - for i, num in enumerate(proposal_nums): - eval_results['AR@{}'.format(num)] = ar[i] - log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) - log_msg = ''.join(log_msg) - print_log(log_msg, logger=logger) - continue - - if metric not in result_files: - raise KeyError('{} is not in results'.format(metric)) - try: - # NOTE: We only limit the max_dets in test config. - lvis_dt = LVISResults(lvis_gt, result_files[metric], max_dets=1000) - except IndexError: - print_log( - 'The testing results of the whole dataset is empty.', - logger=logger, - level=logging.ERROR) - break - - iou_type = 'bbox' if metric == 'proposal' else metric - lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type) - lvis_eval.params.imgIds = self.img_ids - if metric == 'proposal': - lvis_eval.params.useCats = 0 - lvis_eval.params.maxDets = list(proposal_nums) - lvis_eval.evaluate() - lvis_eval.accumulate() - lvis_eval.summarize() - for k, v in lvis_eval.get_results().items(): - if k.startswith('AR'): - val = float('{:.3f}'.format(float(v))) - eval_results[k] = val - else: - lvis_eval.evaluate() - lvis_eval.accumulate() - lvis_eval.summarize() - lvis_results = lvis_eval.get_results() - if classwise: # Compute per-category AP - # Compute per-category AP - # from https://github.com/facebookresearch/detectron2/ - precisions = lvis_eval.eval['precision'] - # precision: (iou, recall, cls, area range, max dets) - assert len(self.cat_ids) == precisions.shape[2] - - results_per_category = [] - for idx, catId in enumerate(self.cat_ids): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - # the dimensions of precisions are - # [num_thrs, num_recalls, num_cats, num_area_rngs] - nm = self.coco.load_cats([catId])[0] - precision = precisions[:, :, idx, 0] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - results_per_category.append( - (f'{nm["name"]}', f'{float(ap):0.3f}')) - - num_columns = min(6, len(results_per_category) * 2) - results_flatten = list( - itertools.chain(*results_per_category)) - headers = ['category', 'AP'] * (num_columns // 2) - results_2d = itertools.zip_longest(*[ - results_flatten[i::num_columns] - for i in range(num_columns) - ]) - table_data = [headers] - table_data += [result for result in results_2d] - table = AsciiTable(table_data) - print_log('\n' + table.table, logger=logger) - - for k, v in lvis_results.items(): - if k.startswith('AP'): - key = '{}_{}'.format(metric, k) - val = float('{:.3f}'.format(float(v))) - eval_results[key] = val - ap_summary = ' '.join([ - '{}:{:.3f}'.format(k, float(v)) - for k, v in lvis_results.items() if k.startswith('AP') - ]) - eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary - lvis_eval.print_results() - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results - - -LVISDataset = LVISV05Dataset -DATASETS.register_module(name='LVISDataset', module=LVISDataset) - - -@DATASETS.register_module() -class LVISV1Dataset(LVISDataset): - - CLASSES = ( - 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', - 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna', - 'apple', 'applesauce', 'apricot', 'apron', 'aquarium', - 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor', - 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', - 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy', - 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel', - 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', - 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo', - 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', - 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap', - 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)', - 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)', - 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie', - 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper', - 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', - 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', - 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath', - 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', - 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket', - 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry', - 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg', - 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase', - 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle', - 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', - 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box', - 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', - 'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase', - 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', - 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer', - 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', - 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', - 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', - 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', - 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', - 'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar', - 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup', - 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino', - 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', - 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship', - 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', - 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower', - 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', - 'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier', - 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', - 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime', - 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', - 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', - 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider', - 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', - 'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine', - 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock', - 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', - 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach', - 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table', - 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', - 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', - 'compass', 'computer_keyboard', 'condiment', 'cone', 'control', - 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie', - 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)', - 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', - 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall', - 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker', - 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib', - 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown', - 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', - 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', - 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', - 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard', - 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', - 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', - 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', - 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup', - 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin', - 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', - 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', - 'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)', - 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', - 'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring', - 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', - 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', - 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', - 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', - 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', - 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', - 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', - 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap', - 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', - 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', - 'folding_chair', 'food_processor', 'football_(American)', - 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', - 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice', - 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', - 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', - 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator', - 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', - 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', - 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', - 'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat', - 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly', - 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet', - 'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock', - 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', - 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', - 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband', - 'headboard', 'headlight', 'headscarf', 'headset', - 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet', - 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog', - 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah', - 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', - 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', - 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', - 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board', - 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey', - 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak', - 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono', - 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit', - 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)', - 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', - 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard', - 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather', - 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce', - 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb', - 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor', - 'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat', - 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)', - 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger', - 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato', - 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox', - 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine', - 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone', - 'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror', - 'mitten', 'mixer_(kitchen_tool)', 'money', - 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', - 'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)', - 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', - 'music_stool', 'musical_instrument', 'nailfile', 'napkin', - 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper', - 'newsstand', 'nightshirt', 'nosebag_(for_animals)', - 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker', - 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil', - 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich', - 'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad', - 'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas', - 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', - 'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book', - 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol', - 'parchment', 'parka', 'parking_meter', 'parrot', - 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', - 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', - 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg', - 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box', - 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)', - 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet', - 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', - 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', - 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', - 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', - 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', - 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)', - 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)', - 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)', - 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', - 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel', - 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune', - 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', - 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', - 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', - 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', - 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', - 'recliner', 'record_player', 'reflector', 'remote_control', - 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map', - 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade', - 'rolling_pin', 'root_beer', 'router_(computer_equipment)', - 'rubber_band', 'runner_(carpet)', 'plastic_bag', - 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin', - 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)', - 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)', - 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse', - 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf', - 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver', - 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', - 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark', - 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl', - 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt', - 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass', - 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap', - 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink', - 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole', - 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)', - 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', - 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball', - 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon', - 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)', - 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish', - 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)', - 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish', - 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel', - 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', - 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer', - 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign', - 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl', - 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses', - 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband', - 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword', - 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table', - 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight', - 'tambourine', 'army_tank', 'tank_(storage_vessel)', - 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', - 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', - 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', - 'telephone_pole', 'telephoto_lens', 'television_camera', - 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', - 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', - 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', - 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', - 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', - 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', - 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', - 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', - 'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', - 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', - 'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)', - 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn', - 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest', - 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture', - 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick', - 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe', - 'washbasin', 'automatic_washer', 'watch', 'water_bottle', - 'water_cooler', 'water_faucet', 'water_heater', 'water_jug', - 'water_gun', 'water_scooter', 'water_ski', 'water_tower', - 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake', - 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream', - 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)', - 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket', - 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', - 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt', - 'yoke_(animal_equipment)', 'zebra', 'zucchini') - - def load_annotations(self, ann_file): - try: - import lvis - if getattr(lvis, '__version__', '0') >= '10.5.3': - warnings.warn( - 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 - UserWarning) - from lvis import LVIS - except ImportError: - raise ImportError( - 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 - ) - self.coco = LVIS(ann_file) - self.cat_ids = self.coco.get_cat_ids() - self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} - self.img_ids = self.coco.get_img_ids() - data_infos = [] - for i in self.img_ids: - info = self.coco.load_imgs([i])[0] - # coco_url is used in LVISv1 instead of file_name - # e.g. http://images.cocodataset.org/train2017/000000391895.jpg - # train/val split in specified in url - info['filename'] = info['coco_url'].replace( - 'http://images.cocodataset.org/', '') - data_infos.append(info) - return data_infos diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/openimages.py b/cv/detection/co-detr/pytorch/mmdet/datasets/openimages.py deleted file mode 100644 index fba660c398b825f6f6df9ba6452cf50f82384394..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/openimages.py +++ /dev/null @@ -1,891 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import csv -import json -import os.path as osp -import warnings -from collections import OrderedDict, defaultdict - -import mmcv -import numpy as np -import torch.distributed as dist -from mmcv.runner import get_dist_info -from mmcv.utils import print_log - -from mmdet.core import eval_map -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class OpenImagesDataset(CustomDataset): - """Open Images dataset for detection. - - Args: - ann_file (str): Annotation file path. - label_file (str): File path of the label description file that - maps the classes names in MID format to their short - descriptions. - image_level_ann_file (str): Image level annotation, which is used - in evaluation. - get_supercategory (bool): Whether to get parent class of the - current class. Default: True. - hierarchy_file (str): The file path of the class hierarchy. - Default: None. - get_metas (bool): Whether to get image metas in testing or - validation time. This should be `True` during evaluation. - Default: True. The OpenImages annotations do not have image - metas (width and height of the image), which will be used - during evaluation. We provide two ways to get image metas - in `OpenImagesDataset`: - - - 1. `load from file`: Load image metas from pkl file, which - is suggested to use. We provided a script to get image metas: - `tools/misc/get_image_metas.py`, which need to run - this script before training/testing. Please refer to - `config/openimages/README.md` for more details. - - - 2. `load from pipeline`, which will get image metas during - test time. However, this may reduce the inference speed, - especially when using distribution. - - load_from_file (bool): Whether to get image metas from pkl file. - meta_file (str): File path to get image metas. - filter_labels (bool): Whether filter unannotated classes. - Default: True. - load_image_level_labels (bool): Whether load and consider image - level labels during evaluation. Default: True. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - ann_file, - label_file='', - image_level_ann_file='', - get_supercategory=True, - hierarchy_file=None, - get_metas=True, - load_from_file=True, - meta_file='', - filter_labels=True, - load_image_level_labels=True, - file_client_args=dict(backend='disk'), - **kwargs): - # may get error if use other file_client - self.file_client_args = file_client_args - - self.cat2label = defaultdict(str) - self.index_dict = {} - - # Although it will init file_client in `CustomDataset`, - # it needs to be init here. - file_client = mmcv.FileClient(**file_client_args) - # need get `index_dict` before load annotations - assert label_file.endswith('csv') - if hasattr(file_client, 'get_local_path'): - with file_client.get_local_path(label_file) as local_path: - class_names = self.get_classes_from_csv(local_path) - else: - class_names = self.get_classes_from_csv(label_file) - super(OpenImagesDataset, self).__init__( - ann_file=ann_file, file_client_args=file_client_args, **kwargs) - self.CLASSES = class_names - self.image_level_ann_file = image_level_ann_file - self.load_image_level_labels = load_image_level_labels - if get_supercategory is True: - assert hierarchy_file is not None - if self.__class__.__name__ == 'OpenImagesDataset': - assert hierarchy_file.endswith('json') - elif self.__class__.__name__ == 'OpenImagesChallengeDataset': - assert hierarchy_file.endswith('np') - else: - raise NotImplementedError - if hasattr(self.file_client, 'get_local_path'): - with self.file_client.get_local_path( - hierarchy_file) as local_path: - self.class_label_tree = self.get_relation_matrix( - local_path) - else: - self.class_label_tree = self.get_relation_matrix( - hierarchy_file) - self.get_supercategory = get_supercategory - self.get_metas = get_metas - self.load_from_file = load_from_file - self.meta_file = meta_file - if self.data_root is not None: - if not osp.isabs(self.meta_file): - self.meta_file = osp.join(self.data_root, self.meta_file) - self.filter_labels = filter_labels - self.rank, self.world_size = get_dist_info() - self.temp_img_metas = [] - self.test_img_metas = [] - self.test_img_shapes = [] - self.load_from_pipeline = False if load_from_file else True - - def get_classes_from_csv(self, label_file): - """Get classes name from file. - - Args: - label_file (str): File path of the label description file that - maps the classes names in MID format to their short - descriptions. - - Returns: - list[str]: Class name of OpenImages. - """ - - index_list = [] - classes_names = [] - with open(label_file, 'r') as f: - reader = csv.reader(f) - for line in reader: - self.cat2label[line[0]] = line[1] - classes_names.append(line[1]) - index_list.append(line[0]) - self.index_dict = {index: i for i, index in enumerate(index_list)} - return classes_names - - def load_annotations(self, ann_file): - """Load annotation from annotation file. - - Special described `self.data_infos` (defaultdict[list[dict]]) - in this function: Annotations where item of the defaultdict - indicates an image, each of which has (n) dicts. Keys of dicts are: - - - `bbox` (list): coordinates of the box, in normalized image - coordinates, of shape 4. - - `label` (int): the label id. - - `is_group_of` (bool): Indicates that the box spans a group - of objects (e.g., a bed of flowers or a crowd of people). - - `is_occluded` (bool): Indicates that the object is occluded - by another object in the image. - - `is_truncated` (bool): Indicates that the object extends - beyond the boundary of the image. - - `is_depiction` (bool): Indicates that the object is a - depiction. - - `is_inside` (bool): Indicates a picture taken from the - inside of the object. - - Args: - ann_file (str): CSV style annotation file path. - - Returns: - list[dict]: Data infos where each item of the list - indicates an image. Keys of annotations are: - - - `img_id` (str): Image name. - - `filename` (str): Image name with suffix. - """ - self.ann_infos = defaultdict(list) - data_infos = [] - cp_filename = None - with open(ann_file, 'r') as f: - reader = csv.reader(f) - for i, line in enumerate(reader): - if i == 0: - continue - img_id = line[0] - filename = f'{img_id}.jpg' - label_id = line[2] - assert label_id in self.index_dict - label = int(self.index_dict[label_id]) - bbox = [ - float(line[4]), # xmin - float(line[6]), # ymin - float(line[5]), # xmax - float(line[7]) # ymax - ] - is_occluded = True if int(line[8]) == 1 else False - is_truncated = True if int(line[9]) == 1 else False - is_group_of = True if int(line[10]) == 1 else False - is_depiction = True if int(line[11]) == 1 else False - is_inside = True if int(line[12]) == 1 else False - - self.ann_infos[img_id].append( - dict( - bbox=bbox, - label=label, - is_occluded=is_occluded, - is_truncated=is_truncated, - is_group_of=is_group_of, - is_depiction=is_depiction, - is_inside=is_inside)) - if filename != cp_filename: - data_infos.append(dict(img_id=img_id, filename=filename)) - cp_filename = filename - return data_infos - - def get_ann_info(self, idx): - """Get OpenImages annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - img_id = self.data_infos[idx]['img_id'] - bboxes = [] - labels = [] - bboxes_ignore = [] - labels_ignore = [] - is_occludeds = [] - is_truncateds = [] - is_group_ofs = [] - is_depictions = [] - is_insides = [] - for obj in self.ann_infos[img_id]: - label = int(obj['label']) - bbox = [ - float(obj['bbox'][0]), - float(obj['bbox'][1]), - float(obj['bbox'][2]), - float(obj['bbox'][3]) - ] - bboxes.append(bbox) - labels.append(label) - - # Other parameters - is_occludeds.append(obj['is_occluded']) - is_truncateds.append(obj['is_truncated']) - is_group_ofs.append(obj['is_group_of']) - is_depictions.append(obj['is_depiction']) - is_insides.append(obj['is_inside']) - if not bboxes: - bboxes = np.zeros((0, 4)) - labels = np.zeros((0, )) - else: - bboxes = np.array(bboxes) - labels = np.array(labels) - if not bboxes_ignore: - bboxes_ignore = np.zeros((0, 4)) - labels_ignore = np.zeros((0, )) - else: - bboxes_ignore = np.array(bboxes_ignore) - labels_ignore = np.array(labels_ignore) - - assert len(is_group_ofs) == len(labels) == len(bboxes) - gt_is_group_ofs = np.array(is_group_ofs, dtype=np.bool) - - # These parameters is not used yet. - is_occludeds = np.array(is_occludeds, dtype=np.bool) - is_truncateds = np.array(is_truncateds, dtype=np.bool) - is_depictions = np.array(is_depictions, dtype=np.bool) - is_insides = np.array(is_insides, dtype=np.bool) - - ann = dict( - bboxes=bboxes.astype(np.float32), - labels=labels.astype(np.int64), - bboxes_ignore=bboxes_ignore.astype(np.float32), - labels_ignore=labels_ignore.astype(np.int64), - gt_is_group_ofs=gt_is_group_ofs, - is_occludeds=is_occludeds, - is_truncateds=is_truncateds, - is_depictions=is_depictions, - is_insides=is_insides) - - return ann - - def get_meta_from_file(self, meta_file=''): - """Get image metas from pkl file.""" - metas = mmcv.load( - meta_file, - file_format='pkl', - file_client_args=self.file_client_args) - assert len(metas) == len(self) - for i in range(len(metas)): - file_name = osp.split(metas[i]['filename'])[-1] - img_info = self.data_infos[i].get('img_info', None) - if img_info is not None: - assert file_name == osp.split(img_info['filename'])[-1] - else: - assert file_name == self.data_infos[i]['filename'] - hw = metas[i]['ori_shape'][:2] - self.test_img_shapes.append(hw) - - def get_meta_from_pipeline(self, results): - """Get image metas from pipeline.""" - self.temp_img_metas.extend(results['img_metas']) - if dist.is_available() and self.world_size > 1: - from mmdet.apis.test import collect_results_cpu - - self.test_img_metas = collect_results_cpu(self.temp_img_metas, - len(self)) - else: - self.test_img_metas = self.temp_img_metas - - def get_img_shape(self, metas): - """Set images original shape into data_infos.""" - assert len(metas) == len(self) - for i in range(len(metas)): - file_name = osp.split(metas[i].data['ori_filename'])[-1] - img_info = self.data_infos[i].get('img_info', None) - if img_info is not None: - assert file_name == osp.split(img_info['filename'])[-1] - else: - assert file_name == self.data_infos[i]['filename'] - hw = metas[i].data['ori_shape'][:2] - self.test_img_shapes.append(hw) - - def prepare_test_img(self, idx): - """Get testing data after pipeline.""" - img_info = self.data_infos[idx] - results = dict(img_info=img_info) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - results = self.pipeline(results) - if self.get_metas and self.load_from_pipeline: - self.get_meta_from_pipeline(results) - return results - - def _filter_imgs(self, min_size=32): - """Filter images too small.""" - if self.filter_empty_gt: - warnings.warn('OpenImageDatasets does not support ' - 'filtering empty gt images.') - valid_inds = [i for i in range(len(self))] - return valid_inds - - def _set_group_flag(self): - """Set flag according to image aspect ratio.""" - self.flag = np.zeros(len(self), dtype=np.uint8) - # TODO: set flag without width and height - - def get_relation_matrix(self, hierarchy_file): - """Get hierarchy for classes. - - Args: - hierarchy_file (sty): File path to the hierarchy for classes. - - Returns: - ndarray: The matrix of the corresponding relationship between - the parent class and the child class, of shape - (class_num, class_num). - """ - - if self.data_root is not None: - if not osp.isabs(hierarchy_file): - hierarchy_file = osp.join(self.data_root, hierarchy_file) - with open(hierarchy_file, 'r') as f: - hierarchy = json.load(f) - class_num = len(self.CLASSES) - class_label_tree = np.eye(class_num, class_num) - class_label_tree = self._convert_hierarchy_tree( - hierarchy, class_label_tree) - return class_label_tree - - def _convert_hierarchy_tree(self, - hierarchy_map, - class_label_tree, - parents=[], - get_all_parents=True): - """Get matrix of the corresponding relationship between the parent - class and the child class. - - Args: - hierarchy_map (dict): Including label name and corresponding - subcategory. Keys of dicts are: - - - `LabeName` (str): Name of the label. - - `Subcategory` (dict | list): Corresponding subcategory(ies). - class_label_tree (ndarray): The matrix of the corresponding - relationship between the parent class and the child class, - of shape (class_num, class_num). - parents (list): Corresponding parent class. - get_all_parents (bool): Whether get all parent names. - Default: True - - Returns: - ndarray: The matrix of the corresponding relationship between - the parent class and the child class, of shape - (class_num, class_num). - """ - - if 'Subcategory' in hierarchy_map: - for node in hierarchy_map['Subcategory']: - if 'LabelName' in node: - children_name = node['LabelName'] - children_index = self.index_dict[children_name] - children = [children_index] - else: - continue - if len(parents) > 0: - for parent_index in parents: - if get_all_parents: - children.append(parent_index) - class_label_tree[children_index, parent_index] = 1 - - class_label_tree = self._convert_hierarchy_tree( - node, class_label_tree, parents=children) - - return class_label_tree - - def add_supercategory_ann(self, annotations): - """Add parent classes of the corresponding class of the ground truth - bboxes.""" - for i, ann in enumerate(annotations): - assert len(ann['labels']) == len(ann['bboxes']) == \ - len(ann['gt_is_group_ofs']) - gt_bboxes = [] - gt_is_group_ofs = [] - gt_labels = [] - for j in range(len(ann['labels'])): - label = ann['labels'][j] - bbox = ann['bboxes'][j] - is_group = ann['gt_is_group_ofs'][j] - label = np.where(self.class_label_tree[label])[0] - if len(label) > 1: - for k in range(len(label)): - gt_bboxes.append(bbox) - gt_is_group_ofs.append(is_group) - gt_labels.append(label[k]) - else: - gt_bboxes.append(bbox) - gt_is_group_ofs.append(is_group) - gt_labels.append(label[0]) - annotations[i] = dict( - bboxes=np.array(gt_bboxes).astype(np.float32), - labels=np.array(gt_labels).astype(np.int64), - bboxes_ignore=ann['bboxes_ignore'], - gt_is_group_ofs=np.array(gt_is_group_ofs).astype(np.bool)) - - return annotations - - def process_results(self, det_results, annotations, - image_level_annotations): - """Process results of the corresponding class of the detection bboxes. - - Note: It will choose to do the following two processing according to - the parameters: - - 1. Whether to add parent classes of the corresponding class of the - detection bboxes. - - 2. Whether to ignore the classes that unannotated on that image. - """ - if image_level_annotations is not None: - assert len(annotations) == \ - len(image_level_annotations) == \ - len(det_results) - else: - assert len(annotations) == len(det_results) - for i in range(len(det_results)): - results = copy.deepcopy(det_results[i]) - valid_classes = np.where( - np.array([[bbox.shape[0]] for bbox in det_results[i]]) != 0)[0] - if image_level_annotations is not None: - labels = annotations[i]['labels'] - image_level_labels = \ - image_level_annotations[i]['image_level_labels'] - allowed_labeles = np.unique( - np.append(labels, image_level_labels)) - else: - allowed_labeles = np.unique(annotations[i]['labels']) - - for valid_class in valid_classes: - det_cls = np.where(self.class_label_tree[valid_class])[0] - for index in det_cls: - if index in allowed_labeles and \ - index != valid_class and \ - self.get_supercategory: - det_results[i][index] = \ - np.concatenate((det_results[i][index], - results[valid_class])) - elif index not in allowed_labeles and self.filter_labels: - # Remove useless parts - det_results[i][index] = np.empty( - (0, 5)).astype(np.float32) - return det_results - - def load_image_label_from_csv(self, image_level_ann_file): - """Load image level annotations from csv style ann_file. - - Args: - image_level_ann_file (str): CSV style image level annotation - file path. - - Returns: - defaultdict[list[dict]]: Annotations where item of the defaultdict - indicates an image, each of which has (n) dicts. - Keys of dicts are: - - - `image_level_label` (int): Label id. - - `confidence` (float): Labels that are human-verified to be - present in an image have confidence = 1 (positive labels). - Labels that are human-verified to be absent from an image - have confidence = 0 (negative labels). Machine-generated - labels have fractional confidences, generally >= 0.5. - The higher the confidence, the smaller the chance for - the label to be a false positive. - """ - - item_lists = defaultdict(list) - with open(image_level_ann_file, 'r') as f: - reader = csv.reader(f) - for i, line in enumerate(reader): - if i == 0: - continue - img_id = line[0] - item_lists[img_id].append( - dict( - image_level_label=int(self.index_dict[line[2]]), - confidence=float(line[3]))) - return item_lists - - def get_image_level_ann(self, image_level_ann_file): - """Get OpenImages annotation by index. - - Args: - image_level_ann_file (str): CSV style image level annotation - file path. - - Returns: - dict: Annotation info of specified index. - """ - - if hasattr(self.file_client, 'get_local_path'): - with self.file_client.get_local_path(image_level_ann_file) \ - as local_path: - item_lists = self.load_image_label_from_csv(local_path) - else: - item_lists = self.load_image_label_from_csv(image_level_ann_file) - image_level_annotations = [] - for i in range(len(self)): - img_info = self.data_infos[i].get('img_info', None) - if img_info is not None: - # for Open Images Challenges - img_id = osp.split(img_info['filename'])[-1][:-4] - else: - # for Open Images v6 - img_id = self.data_infos[i]['img_id'] - item_list = item_lists.get(img_id, None) - if item_list is not None: - image_level_labels = [] - confidences = [] - for obj in item_list: - image_level_label = int(obj['image_level_label']) - confidence = float(obj['confidence']) - - image_level_labels.append(image_level_label) - confidences.append(confidence) - - if not image_level_labels: - image_level_labels = np.zeros((0, )) - confidences = np.zeros((0, )) - else: - image_level_labels = np.array(image_level_labels) - confidences = np.array(confidences) - else: - image_level_labels = np.zeros((0, )) - confidences = np.zeros((0, )) - ann = dict( - image_level_labels=image_level_labels.astype(np.int64), - confidences=confidences.astype(np.float32)) - image_level_annotations.append(ann) - - return image_level_annotations - - def denormalize_gt_bboxes(self, annotations): - """Convert ground truth bboxes from relative position to absolute - position. - - Only used in evaluating time. - """ - assert len(self.test_img_shapes) == len(annotations) - for i in range(len(annotations)): - h, w = self.test_img_shapes[i] - annotations[i]['bboxes'][:, 0::2] *= w - annotations[i]['bboxes'][:, 1::2] *= h - return annotations - - def get_cat_ids(self, idx): - """Get category ids by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - return self.get_ann_info(idx)['labels'].astype(np.int).tolist() - - def evaluate(self, - results, - metric='mAP', - logger=None, - iou_thr=0.5, - ioa_thr=0.5, - scale_ranges=None, - denorm_gt_bbox=True, - use_group_of=True): - """Evaluate in OpenImages. - - Args: - results (list[list | tuple]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. Option is - 'mAP'. Default: 'mAP'. - logger (logging.Logger | str, optional): Logger used for printing - related information during evaluation. Default: None. - iou_thr (float | list[float]): IoU threshold. Default: 0.5. - ioa_thr (float | list[float]): IoA threshold. Default: 0.5. - scale_ranges (list[tuple], optional): Scale ranges for evaluating - mAP. If not specified, all bounding boxes would be included in - evaluation. Default: None - denorm_gt_bbox (bool): Whether to denorm ground truth bboxes from - relative position to absolute position. Default: True - use_group_of (bool): Whether consider group of groud truth bboxes - during evaluating. Default: True. - - Returns: - dict[str, float]: AP metrics. - """ - - if not isinstance(metric, str): - assert len(metric) == 1 - metric = metric[0] - allowed_metrics = ['mAP'] - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - annotations = [self.get_ann_info(i) for i in range(len(self))] - - if self.load_image_level_labels: - image_level_annotations = \ - self.get_image_level_ann(self.image_level_ann_file) - else: - image_level_annotations = None - - # load metas from file - if self.get_metas and self.load_from_file: - assert self.meta_file.endswith( - 'pkl'), 'File name must be pkl suffix' - self.get_meta_from_file(self.meta_file) - # load metas from pipeline - else: - self.get_img_shape(self.test_img_metas) - - if len(self.test_img_shapes) > len(self): - self.test_img_shapes = self.test_img_shapes[:len(self)] - - if denorm_gt_bbox: - annotations = self.denormalize_gt_bboxes(annotations) - - # Reset test_image_metas, temp_image_metas and test_img_shapes - # to avoid potential error - self.temp_img_metas = [] - self.test_img_shapes = [] - self.test_img_metas = [] - if self.get_supercategory: - annotations = self.add_supercategory_ann(annotations) - - results = self.process_results(results, annotations, - image_level_annotations) - if use_group_of: - assert ioa_thr is not None, \ - 'ioa_thr must have value when using group_of in evaluation.' - - eval_results = OrderedDict() - iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr - ioa_thrs = [ioa_thr] if isinstance(ioa_thr, float) or ioa_thr is None \ - else ioa_thr - - # get dataset type - if len(self.CLASSES) == 500: - ds_name = 'oid_challenge' - elif len(self.CLASSES) == 601: - ds_name = 'oid_v6' - else: - ds_name = self.CLASSES - warnings.warn('Cannot infer dataset type from the length of the ' - 'classes. Set `oid_v6` as dataset type.') - - if metric == 'mAP': - assert isinstance(iou_thrs, list) and isinstance(ioa_thrs, list) - assert len(ioa_thrs) == len(iou_thrs) - mean_aps = [] - for iou_thr, ioa_thr in zip(iou_thrs, ioa_thrs): - print_log(f'\n{"-" * 15}iou_thr, ioa_thr: {iou_thr}, {ioa_thr}' - f'{"-" * 15}') - mean_ap, _ = eval_map( - results, - annotations, - scale_ranges=scale_ranges, - iou_thr=iou_thr, - ioa_thr=ioa_thr, - dataset=ds_name, - logger=logger, - use_group_of=use_group_of) - mean_aps.append(mean_ap) - eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) - eval_results['mAP'] = sum(mean_aps) / len(mean_aps) - return eval_results - - -@DATASETS.register_module() -class OpenImagesChallengeDataset(OpenImagesDataset): - """Open Images Challenge dataset for detection.""" - - def __init__(self, ann_file, **kwargs): - assert ann_file.endswith('txt') - super(OpenImagesChallengeDataset, self).__init__( - ann_file=ann_file, **kwargs) - - def get_classes_from_csv(self, label_file): - """Get classes name from file. - - Args: - label_file (str): File path of the label description file that - maps the classes names in MID format to their short - descriptions. - - Returns: - list: Class name of OpenImages. - """ - - label_list = [] - id_list = [] - with open(label_file, 'r') as f: - reader = csv.reader(f) - for line in reader: - label_name = line[0] - label_id = int(line[2]) - - label_list.append(line[1]) - id_list.append(label_id) - self.index_dict[label_name] = label_id - 1 - - indexes = np.argsort(id_list) - classes_names = [] - for index in indexes: - classes_names.append(label_list[index]) - return classes_names - - def load_annotations(self, ann_file): - """Load annotation from annotation file.""" - with open(ann_file) as f: - lines = f.readlines() - i = 0 - ann_infos = [] - while i < len(lines): - bboxes = [] - labels = [] - is_group_ofs = [] - filename = lines[i].rstrip() - i += 2 - img_gt_size = int(lines[i]) - i += 1 - for j in range(img_gt_size): - sp = lines[i + j].split() - bboxes.append( - [float(sp[1]), - float(sp[2]), - float(sp[3]), - float(sp[4])]) - labels.append(int(sp[0]) - 1) # labels begin from 1 - is_group_ofs.append(True if int(sp[5]) == 1 else False) - i += img_gt_size - - gt_bboxes = np.array(bboxes, dtype=np.float32) - gt_labels = np.array(labels, dtype=np.int64) - gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) - gt_is_group_ofs = np.array(is_group_ofs, dtype=np.bool) - - img_info = dict(filename=filename) - ann_info = dict( - bboxes=gt_bboxes, - labels=gt_labels, - bboxes_ignore=gt_bboxes_ignore, - gt_is_group_ofs=gt_is_group_ofs) - ann_infos.append(dict(img_info=img_info, ann_info=ann_info)) - - return ann_infos - - def prepare_train_img(self, idx): - """Get training data and annotations after pipeline.""" - ann_info = self.data_infos[idx] - results = dict( - img_info=ann_info['img_info'], - ann_info=ann_info['ann_info'], - ) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - return self.pipeline(results) - - def prepare_test_img(self, idx): - """Get testing data after pipeline.""" - ann_info = self.data_infos[idx] - results = dict(img_info=ann_info['img_info']) - if self.proposals is not None: - results['proposals'] = self.proposals[idx] - self.pre_pipeline(results) - - results = self.pipeline(results) - if self.get_metas and self.load_from_pipeline: - self.get_meta_from_pipeline(results) - return results - - def get_relation_matrix(self, hierarchy_file): - """Get hierarchy for classes. - - Args: - hierarchy_file (str): File path to the hierarchy for classes. - - Returns: - ndarray: The matrix of the corresponding - relationship between the parent class and the child class, - of shape (class_num, class_num). - """ - class_label_tree = np.load(hierarchy_file, allow_pickle=True) - return class_label_tree[1:, 1:] - - def get_ann_info(self, idx): - """Get OpenImages annotation by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - # avoid some potential error - data_infos = copy.deepcopy(self.data_infos[idx]['ann_info']) - return data_infos - - def load_image_label_from_csv(self, image_level_ann_file): - """Load image level annotations from csv style ann_file. - - Args: - image_level_ann_file (str): CSV style image level annotation - file path. - - Returns: - defaultdict[list[dict]]: Annotations where item of the defaultdict - indicates an image, each of which has (n) dicts. - Keys of dicts are: - - - `image_level_label` (int): of shape 1. - - `confidence` (float): of shape 1. - """ - - item_lists = defaultdict(list) - with open(image_level_ann_file, 'r') as f: - reader = csv.reader(f) - i = -1 - for line in reader: - i += 1 - if i == 0: - continue - else: - img_id = line[0] - label_id = line[1] - assert label_id in self.index_dict - image_level_label = int(self.index_dict[label_id]) - confidence = float(line[2]) - item_lists[img_id].append( - dict( - image_level_label=image_level_label, - confidence=confidence)) - return item_lists diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/__init__.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/__init__.py deleted file mode 100644 index 8260da642682e3ea509c544170b0b4d1f5f23199..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, - ContrastTransform, EqualizeTransform, Rotate, Shear, - Translate) -from .compose import Compose -from .formatting import (Collect, DefaultFormatBundle, ImageToTensor, - ToDataContainer, ToTensor, Transpose, to_tensor) -from .instaboost import InstaBoost -from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromFile, - LoadImageFromWebcam, LoadMultiChannelImageFromFiles, - LoadPanopticAnnotations, LoadProposals) -from .test_time_aug import MultiScaleFlipAug -from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop, - MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion, - RandomAffine, RandomCenterCropPad, RandomCrop, - RandomFlip, RandomShift, Resize, SegRescale, - YOLOXHSVRandomAug) - -__all__ = [ - 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', - 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations', - 'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations', - 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'FilterAnnotations', - 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', - 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand', - 'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad', - 'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform', - 'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform', - 'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine', - 'YOLOXHSVRandomAug', 'CopyPaste' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/auto_augment.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/auto_augment.py deleted file mode 100644 index b0ff67dbdd99c1889c424b59a9f0f12cfb216ba4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/auto_augment.py +++ /dev/null @@ -1,894 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import cv2 -import mmcv -import numpy as np - -from ..builder import PIPELINES -from .compose import Compose - -_MAX_LEVEL = 10 - - -def level_to_value(level, max_value): - """Map from level to values based on max_value.""" - return (level / _MAX_LEVEL) * max_value - - -def enhance_level_to_value(level, a=1.8, b=0.1): - """Map from level to values.""" - return (level / _MAX_LEVEL) * a + b - - -def random_negative(value, random_negative_prob): - """Randomly negate value based on random_negative_prob.""" - return -value if np.random.rand() < random_negative_prob else value - - -def bbox2fields(): - """The key correspondence from bboxes to labels, masks and - segmentations.""" - bbox2label = { - 'gt_bboxes': 'gt_labels', - 'gt_bboxes_ignore': 'gt_labels_ignore' - } - bbox2mask = { - 'gt_bboxes': 'gt_masks', - 'gt_bboxes_ignore': 'gt_masks_ignore' - } - bbox2seg = { - 'gt_bboxes': 'gt_semantic_seg', - } - return bbox2label, bbox2mask, bbox2seg - - -@PIPELINES.register_module() -class AutoAugment: - """Auto augmentation. - - This data augmentation is proposed in `Learning Data Augmentation - Strategies for Object Detection `_. - - TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms - - Args: - policies (list[list[dict]]): The policies of auto augmentation. Each - policy in ``policies`` is a specific augmentation policy, and is - composed by several augmentations (dict). When AutoAugment is - called, a random policy in ``policies`` will be selected to - augment images. - - Examples: - >>> replace = (104, 116, 124) - >>> policies = [ - >>> [ - >>> dict(type='Sharpness', prob=0.0, level=8), - >>> dict( - >>> type='Shear', - >>> prob=0.4, - >>> level=0, - >>> replace=replace, - >>> axis='x') - >>> ], - >>> [ - >>> dict( - >>> type='Rotate', - >>> prob=0.6, - >>> level=10, - >>> replace=replace), - >>> dict(type='Color', prob=1.0, level=6) - >>> ] - >>> ] - >>> augmentation = AutoAugment(policies) - >>> img = np.ones(100, 100, 3) - >>> gt_bboxes = np.ones(10, 4) - >>> results = dict(img=img, gt_bboxes=gt_bboxes) - >>> results = augmentation(results) - """ - - def __init__(self, policies): - assert isinstance(policies, list) and len(policies) > 0, \ - 'Policies must be a non-empty list.' - for policy in policies: - assert isinstance(policy, list) and len(policy) > 0, \ - 'Each policy in policies must be a non-empty list.' - for augment in policy: - assert isinstance(augment, dict) and 'type' in augment, \ - 'Each specific augmentation must be a dict with key' \ - ' "type".' - - self.policies = copy.deepcopy(policies) - self.transforms = [Compose(policy) for policy in self.policies] - - def __call__(self, results): - transform = np.random.choice(self.transforms) - return transform(results) - - def __repr__(self): - return f'{self.__class__.__name__}(policies={self.policies})' - - -@PIPELINES.register_module() -class Shear: - """Apply Shear Transformation to image (and its corresponding bbox, mask, - segmentation). - - Args: - level (int | float): The level should be in range [0,_MAX_LEVEL]. - img_fill_val (int | float | tuple): The filled values for image border. - If float, the same fill value will be used for all the three - channels of image. If tuple, the should be 3 elements. - seg_ignore_label (int): The fill value used for segmentation map. - Note this value must equals ``ignore_label`` in ``semantic_head`` - of the corresponding config. Default 255. - prob (float): The probability for performing Shear and should be in - range [0, 1]. - direction (str): The direction for shear, either "horizontal" - or "vertical". - max_shear_magnitude (float): The maximum magnitude for Shear - transformation. - random_negative_prob (float): The probability that turns the - offset negative. Should be in range [0,1] - interpolation (str): Same as in :func:`mmcv.imshear`. - """ - - def __init__(self, - level, - img_fill_val=128, - seg_ignore_label=255, - prob=0.5, - direction='horizontal', - max_shear_magnitude=0.3, - random_negative_prob=0.5, - interpolation='bilinear'): - assert isinstance(level, (int, float)), 'The level must be type ' \ - f'int or float, got {type(level)}.' - assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \ - f'[0,{_MAX_LEVEL}], got {level}.' - if isinstance(img_fill_val, (float, int)): - img_fill_val = tuple([float(img_fill_val)] * 3) - elif isinstance(img_fill_val, tuple): - assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \ - f'have 3 elements. got {len(img_fill_val)}.' - img_fill_val = tuple([float(val) for val in img_fill_val]) - else: - raise ValueError( - 'img_fill_val must be float or tuple with 3 elements.') - assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \ - 'elements of img_fill_val should between range [0,255].' \ - f'got {img_fill_val}.' - assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \ - f'range [0,1]. got {prob}.' - assert direction in ('horizontal', 'vertical'), 'direction must ' \ - f'in be either "horizontal" or "vertical". got {direction}.' - assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \ - f'should be type float. got {type(max_shear_magnitude)}.' - assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \ - 'max_shear_magnitude should be in range [0,1]. ' \ - f'got {max_shear_magnitude}.' - self.level = level - self.magnitude = level_to_value(level, max_shear_magnitude) - self.img_fill_val = img_fill_val - self.seg_ignore_label = seg_ignore_label - self.prob = prob - self.direction = direction - self.max_shear_magnitude = max_shear_magnitude - self.random_negative_prob = random_negative_prob - self.interpolation = interpolation - - def _shear_img(self, - results, - magnitude, - direction='horizontal', - interpolation='bilinear'): - """Shear the image. - - Args: - results (dict): Result dict from loading pipeline. - magnitude (int | float): The magnitude used for shear. - direction (str): The direction for shear, either "horizontal" - or "vertical". - interpolation (str): Same as in :func:`mmcv.imshear`. - """ - for key in results.get('img_fields', ['img']): - img = results[key] - img_sheared = mmcv.imshear( - img, - magnitude, - direction, - border_value=self.img_fill_val, - interpolation=interpolation) - results[key] = img_sheared.astype(img.dtype) - results['img_shape'] = results[key].shape - - def _shear_bboxes(self, results, magnitude): - """Shear the bboxes.""" - h, w, c = results['img_shape'] - if self.direction == 'horizontal': - shear_matrix = np.stack([[1, magnitude], - [0, 1]]).astype(np.float32) # [2, 2] - else: - shear_matrix = np.stack([[1, 0], [magnitude, - 1]]).astype(np.float32) - for key in results.get('bbox_fields', []): - min_x, min_y, max_x, max_y = np.split( - results[key], results[key].shape[-1], axis=-1) - coordinates = np.stack([[min_x, min_y], [max_x, min_y], - [min_x, max_y], - [max_x, max_y]]) # [4, 2, nb_box, 1] - coordinates = coordinates[..., 0].transpose( - (2, 1, 0)).astype(np.float32) # [nb_box, 2, 4] - new_coords = np.matmul(shear_matrix[None, :, :], - coordinates) # [nb_box, 2, 4] - min_x = np.min(new_coords[:, 0, :], axis=-1) - min_y = np.min(new_coords[:, 1, :], axis=-1) - max_x = np.max(new_coords[:, 0, :], axis=-1) - max_y = np.max(new_coords[:, 1, :], axis=-1) - min_x = np.clip(min_x, a_min=0, a_max=w) - min_y = np.clip(min_y, a_min=0, a_max=h) - max_x = np.clip(max_x, a_min=min_x, a_max=w) - max_y = np.clip(max_y, a_min=min_y, a_max=h) - results[key] = np.stack([min_x, min_y, max_x, max_y], - axis=-1).astype(results[key].dtype) - - def _shear_masks(self, - results, - magnitude, - direction='horizontal', - fill_val=0, - interpolation='bilinear'): - """Shear the masks.""" - h, w, c = results['img_shape'] - for key in results.get('mask_fields', []): - masks = results[key] - results[key] = masks.shear((h, w), - magnitude, - direction, - border_value=fill_val, - interpolation=interpolation) - - def _shear_seg(self, - results, - magnitude, - direction='horizontal', - fill_val=255, - interpolation='bilinear'): - """Shear the segmentation maps.""" - for key in results.get('seg_fields', []): - seg = results[key] - results[key] = mmcv.imshear( - seg, - magnitude, - direction, - border_value=fill_val, - interpolation=interpolation).astype(seg.dtype) - - def _filter_invalid(self, results, min_bbox_size=0): - """Filter bboxes and corresponding masks too small after shear - augmentation.""" - bbox2label, bbox2mask, _ = bbox2fields() - for key in results.get('bbox_fields', []): - bbox_w = results[key][:, 2] - results[key][:, 0] - bbox_h = results[key][:, 3] - results[key][:, 1] - valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size) - valid_inds = np.nonzero(valid_inds)[0] - results[key] = results[key][valid_inds] - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - # mask fields, e.g. gt_masks and gt_masks_ignore - mask_key = bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][valid_inds] - - def __call__(self, results): - """Call function to shear images, bounding boxes, masks and semantic - segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Sheared results. - """ - if np.random.rand() > self.prob: - return results - magnitude = random_negative(self.magnitude, self.random_negative_prob) - self._shear_img(results, magnitude, self.direction, self.interpolation) - self._shear_bboxes(results, magnitude) - # fill_val set to 0 for background of mask. - self._shear_masks( - results, - magnitude, - self.direction, - fill_val=0, - interpolation=self.interpolation) - self._shear_seg( - results, - magnitude, - self.direction, - fill_val=self.seg_ignore_label, - interpolation=self.interpolation) - self._filter_invalid(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'img_fill_val={self.img_fill_val}, ' - repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' - repr_str += f'prob={self.prob}, ' - repr_str += f'direction={self.direction}, ' - repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, ' - repr_str += f'random_negative_prob={self.random_negative_prob}, ' - repr_str += f'interpolation={self.interpolation})' - return repr_str - - -@PIPELINES.register_module() -class Rotate: - """Apply Rotate Transformation to image (and its corresponding bbox, mask, - segmentation). - - Args: - level (int | float): The level should be in range (0,_MAX_LEVEL]. - scale (int | float): Isotropic scale factor. Same in - ``mmcv.imrotate``. - center (int | float | tuple[float]): Center point (w, h) of the - rotation in the source image. If None, the center of the - image will be used. Same in ``mmcv.imrotate``. - img_fill_val (int | float | tuple): The fill value for image border. - If float, the same value will be used for all the three - channels of image. If tuple, the should be 3 elements (e.g. - equals the number of channels for image). - seg_ignore_label (int): The fill value used for segmentation map. - Note this value must equals ``ignore_label`` in ``semantic_head`` - of the corresponding config. Default 255. - prob (float): The probability for perform transformation and - should be in range 0 to 1. - max_rotate_angle (int | float): The maximum angles for rotate - transformation. - random_negative_prob (float): The probability that turns the - offset negative. - """ - - def __init__(self, - level, - scale=1, - center=None, - img_fill_val=128, - seg_ignore_label=255, - prob=0.5, - max_rotate_angle=30, - random_negative_prob=0.5): - assert isinstance(level, (int, float)), \ - f'The level must be type int or float. got {type(level)}.' - assert 0 <= level <= _MAX_LEVEL, \ - f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.' - assert isinstance(scale, (int, float)), \ - f'The scale must be type int or float. got type {type(scale)}.' - if isinstance(center, (int, float)): - center = (center, center) - elif isinstance(center, tuple): - assert len(center) == 2, 'center with type tuple must have '\ - f'2 elements. got {len(center)} elements.' - else: - assert center is None, 'center must be None or type int, '\ - f'float or tuple, got type {type(center)}.' - if isinstance(img_fill_val, (float, int)): - img_fill_val = tuple([float(img_fill_val)] * 3) - elif isinstance(img_fill_val, tuple): - assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\ - f'have 3 elements. got {len(img_fill_val)}.' - img_fill_val = tuple([float(val) for val in img_fill_val]) - else: - raise ValueError( - 'img_fill_val must be float or tuple with 3 elements.') - assert np.all([0 <= val <= 255 for val in img_fill_val]), \ - 'all elements of img_fill_val should between range [0,255]. '\ - f'got {img_fill_val}.' - assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\ - f'got {prob}.' - assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\ - f'should be type int or float. got type {type(max_rotate_angle)}.' - self.level = level - self.scale = scale - # Rotation angle in degrees. Positive values mean - # clockwise rotation. - self.angle = level_to_value(level, max_rotate_angle) - self.center = center - self.img_fill_val = img_fill_val - self.seg_ignore_label = seg_ignore_label - self.prob = prob - self.max_rotate_angle = max_rotate_angle - self.random_negative_prob = random_negative_prob - - def _rotate_img(self, results, angle, center=None, scale=1.0): - """Rotate the image. - - Args: - results (dict): Result dict from loading pipeline. - angle (float): Rotation angle in degrees, positive values - mean clockwise rotation. Same in ``mmcv.imrotate``. - center (tuple[float], optional): Center point (w, h) of the - rotation. Same in ``mmcv.imrotate``. - scale (int | float): Isotropic scale factor. Same in - ``mmcv.imrotate``. - """ - for key in results.get('img_fields', ['img']): - img = results[key].copy() - img_rotated = mmcv.imrotate( - img, angle, center, scale, border_value=self.img_fill_val) - results[key] = img_rotated.astype(img.dtype) - results['img_shape'] = results[key].shape - - def _rotate_bboxes(self, results, rotate_matrix): - """Rotate the bboxes.""" - h, w, c = results['img_shape'] - for key in results.get('bbox_fields', []): - min_x, min_y, max_x, max_y = np.split( - results[key], results[key].shape[-1], axis=-1) - coordinates = np.stack([[min_x, min_y], [max_x, min_y], - [min_x, max_y], - [max_x, max_y]]) # [4, 2, nb_bbox, 1] - # pad 1 to convert from format [x, y] to homogeneous - # coordinates format [x, y, 1] - coordinates = np.concatenate( - (coordinates, - np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)), - axis=1) # [4, 3, nb_bbox, 1] - coordinates = coordinates.transpose( - (2, 0, 1, 3)) # [nb_bbox, 4, 3, 1] - rotated_coords = np.matmul(rotate_matrix, - coordinates) # [nb_bbox, 4, 2, 1] - rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2] - min_x, min_y = np.min( - rotated_coords[:, :, 0], axis=1), np.min( - rotated_coords[:, :, 1], axis=1) - max_x, max_y = np.max( - rotated_coords[:, :, 0], axis=1), np.max( - rotated_coords[:, :, 1], axis=1) - min_x, min_y = np.clip( - min_x, a_min=0, a_max=w), np.clip( - min_y, a_min=0, a_max=h) - max_x, max_y = np.clip( - max_x, a_min=min_x, a_max=w), np.clip( - max_y, a_min=min_y, a_max=h) - results[key] = np.stack([min_x, min_y, max_x, max_y], - axis=-1).astype(results[key].dtype) - - def _rotate_masks(self, - results, - angle, - center=None, - scale=1.0, - fill_val=0): - """Rotate the masks.""" - h, w, c = results['img_shape'] - for key in results.get('mask_fields', []): - masks = results[key] - results[key] = masks.rotate((h, w), angle, center, scale, fill_val) - - def _rotate_seg(self, - results, - angle, - center=None, - scale=1.0, - fill_val=255): - """Rotate the segmentation map.""" - for key in results.get('seg_fields', []): - seg = results[key].copy() - results[key] = mmcv.imrotate( - seg, angle, center, scale, - border_value=fill_val).astype(seg.dtype) - - def _filter_invalid(self, results, min_bbox_size=0): - """Filter bboxes and corresponding masks too small after rotate - augmentation.""" - bbox2label, bbox2mask, _ = bbox2fields() - for key in results.get('bbox_fields', []): - bbox_w = results[key][:, 2] - results[key][:, 0] - bbox_h = results[key][:, 3] - results[key][:, 1] - valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size) - valid_inds = np.nonzero(valid_inds)[0] - results[key] = results[key][valid_inds] - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - # mask fields, e.g. gt_masks and gt_masks_ignore - mask_key = bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][valid_inds] - - def __call__(self, results): - """Call function to rotate images, bounding boxes, masks and semantic - segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Rotated results. - """ - if np.random.rand() > self.prob: - return results - h, w = results['img'].shape[:2] - center = self.center - if center is None: - center = ((w - 1) * 0.5, (h - 1) * 0.5) - angle = random_negative(self.angle, self.random_negative_prob) - self._rotate_img(results, angle, center, self.scale) - rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale) - self._rotate_bboxes(results, rotate_matrix) - self._rotate_masks(results, angle, center, self.scale, fill_val=0) - self._rotate_seg( - results, angle, center, self.scale, fill_val=self.seg_ignore_label) - self._filter_invalid(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'scale={self.scale}, ' - repr_str += f'center={self.center}, ' - repr_str += f'img_fill_val={self.img_fill_val}, ' - repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' - repr_str += f'prob={self.prob}, ' - repr_str += f'max_rotate_angle={self.max_rotate_angle}, ' - repr_str += f'random_negative_prob={self.random_negative_prob})' - return repr_str - - -@PIPELINES.register_module() -class Translate: - """Translate the images, bboxes, masks and segmentation maps horizontally - or vertically. - - Args: - level (int | float): The level for Translate and should be in - range [0,_MAX_LEVEL]. - prob (float): The probability for performing translation and - should be in range [0, 1]. - img_fill_val (int | float | tuple): The filled value for image - border. If float, the same fill value will be used for all - the three channels of image. If tuple, the should be 3 - elements (e.g. equals the number of channels for image). - seg_ignore_label (int): The fill value used for segmentation map. - Note this value must equals ``ignore_label`` in ``semantic_head`` - of the corresponding config. Default 255. - direction (str): The translate direction, either "horizontal" - or "vertical". - max_translate_offset (int | float): The maximum pixel's offset for - Translate. - random_negative_prob (float): The probability that turns the - offset negative. - min_size (int | float): The minimum pixel for filtering - invalid bboxes after the translation. - """ - - def __init__(self, - level, - prob=0.5, - img_fill_val=128, - seg_ignore_label=255, - direction='horizontal', - max_translate_offset=250., - random_negative_prob=0.5, - min_size=0): - assert isinstance(level, (int, float)), \ - 'The level must be type int or float.' - assert 0 <= level <= _MAX_LEVEL, \ - 'The level used for calculating Translate\'s offset should be ' \ - 'in range [0,_MAX_LEVEL]' - assert 0 <= prob <= 1.0, \ - 'The probability of translation should be in range [0, 1].' - if isinstance(img_fill_val, (float, int)): - img_fill_val = tuple([float(img_fill_val)] * 3) - elif isinstance(img_fill_val, tuple): - assert len(img_fill_val) == 3, \ - 'img_fill_val as tuple must have 3 elements.' - img_fill_val = tuple([float(val) for val in img_fill_val]) - else: - raise ValueError('img_fill_val must be type float or tuple.') - assert np.all([0 <= val <= 255 for val in img_fill_val]), \ - 'all elements of img_fill_val should between range [0,255].' - assert direction in ('horizontal', 'vertical'), \ - 'direction should be "horizontal" or "vertical".' - assert isinstance(max_translate_offset, (int, float)), \ - 'The max_translate_offset must be type int or float.' - # the offset used for translation - self.offset = int(level_to_value(level, max_translate_offset)) - self.level = level - self.prob = prob - self.img_fill_val = img_fill_val - self.seg_ignore_label = seg_ignore_label - self.direction = direction - self.max_translate_offset = max_translate_offset - self.random_negative_prob = random_negative_prob - self.min_size = min_size - - def _translate_img(self, results, offset, direction='horizontal'): - """Translate the image. - - Args: - results (dict): Result dict from loading pipeline. - offset (int | float): The offset for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - """ - for key in results.get('img_fields', ['img']): - img = results[key].copy() - results[key] = mmcv.imtranslate( - img, offset, direction, self.img_fill_val).astype(img.dtype) - results['img_shape'] = results[key].shape - - def _translate_bboxes(self, results, offset): - """Shift bboxes horizontally or vertically, according to offset.""" - h, w, c = results['img_shape'] - for key in results.get('bbox_fields', []): - min_x, min_y, max_x, max_y = np.split( - results[key], results[key].shape[-1], axis=-1) - if self.direction == 'horizontal': - min_x = np.maximum(0, min_x + offset) - max_x = np.minimum(w, max_x + offset) - elif self.direction == 'vertical': - min_y = np.maximum(0, min_y + offset) - max_y = np.minimum(h, max_y + offset) - - # the boxes translated outside of image will be filtered along with - # the corresponding masks, by invoking ``_filter_invalid``. - results[key] = np.concatenate([min_x, min_y, max_x, max_y], - axis=-1) - - def _translate_masks(self, - results, - offset, - direction='horizontal', - fill_val=0): - """Translate masks horizontally or vertically.""" - h, w, c = results['img_shape'] - for key in results.get('mask_fields', []): - masks = results[key] - results[key] = masks.translate((h, w), offset, direction, fill_val) - - def _translate_seg(self, - results, - offset, - direction='horizontal', - fill_val=255): - """Translate segmentation maps horizontally or vertically.""" - for key in results.get('seg_fields', []): - seg = results[key].copy() - results[key] = mmcv.imtranslate(seg, offset, direction, - fill_val).astype(seg.dtype) - - def _filter_invalid(self, results, min_size=0): - """Filter bboxes and masks too small or translated out of image.""" - bbox2label, bbox2mask, _ = bbox2fields() - for key in results.get('bbox_fields', []): - bbox_w = results[key][:, 2] - results[key][:, 0] - bbox_h = results[key][:, 3] - results[key][:, 1] - valid_inds = (bbox_w > min_size) & (bbox_h > min_size) - valid_inds = np.nonzero(valid_inds)[0] - results[key] = results[key][valid_inds] - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - # mask fields, e.g. gt_masks and gt_masks_ignore - mask_key = bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][valid_inds] - return results - - def __call__(self, results): - """Call function to translate images, bounding boxes, masks and - semantic segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Translated results. - """ - if np.random.rand() > self.prob: - return results - offset = random_negative(self.offset, self.random_negative_prob) - self._translate_img(results, offset, self.direction) - self._translate_bboxes(results, offset) - # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks. - self._translate_masks(results, offset, self.direction) - # fill_val set to ``seg_ignore_label`` for the ignored value - # of segmentation map. - self._translate_seg( - results, offset, self.direction, fill_val=self.seg_ignore_label) - self._filter_invalid(results, min_size=self.min_size) - return results - - -@PIPELINES.register_module() -class ColorTransform: - """Apply Color transformation to image. The bboxes, masks, and - segmentations are not modified. - - Args: - level (int | float): Should be in range [0,_MAX_LEVEL]. - prob (float): The probability for performing Color transformation. - """ - - def __init__(self, level, prob=0.5): - assert isinstance(level, (int, float)), \ - 'The level must be type int or float.' - assert 0 <= level <= _MAX_LEVEL, \ - 'The level should be in range [0,_MAX_LEVEL].' - assert 0 <= prob <= 1.0, \ - 'The probability should be in range [0,1].' - self.level = level - self.prob = prob - self.factor = enhance_level_to_value(level) - - def _adjust_color_img(self, results, factor=1.0): - """Apply Color transformation to image.""" - for key in results.get('img_fields', ['img']): - # NOTE defaultly the image should be BGR format - img = results[key] - results[key] = mmcv.adjust_color(img, factor).astype(img.dtype) - - def __call__(self, results): - """Call function for Color transformation. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Colored results. - """ - if np.random.rand() > self.prob: - return results - self._adjust_color_img(results, self.factor) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'prob={self.prob})' - return repr_str - - -@PIPELINES.register_module() -class EqualizeTransform: - """Apply Equalize transformation to image. The bboxes, masks and - segmentations are not modified. - - Args: - prob (float): The probability for performing Equalize transformation. - """ - - def __init__(self, prob=0.5): - assert 0 <= prob <= 1.0, \ - 'The probability should be in range [0,1].' - self.prob = prob - - def _imequalize(self, results): - """Equalizes the histogram of one image.""" - for key in results.get('img_fields', ['img']): - img = results[key] - results[key] = mmcv.imequalize(img).astype(img.dtype) - - def __call__(self, results): - """Call function for Equalize transformation. - - Args: - results (dict): Results dict from loading pipeline. - - Returns: - dict: Results after the transformation. - """ - if np.random.rand() > self.prob: - return results - self._imequalize(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(prob={self.prob})' - - -@PIPELINES.register_module() -class BrightnessTransform: - """Apply Brightness transformation to image. The bboxes, masks and - segmentations are not modified. - - Args: - level (int | float): Should be in range [0,_MAX_LEVEL]. - prob (float): The probability for performing Brightness transformation. - """ - - def __init__(self, level, prob=0.5): - assert isinstance(level, (int, float)), \ - 'The level must be type int or float.' - assert 0 <= level <= _MAX_LEVEL, \ - 'The level should be in range [0,_MAX_LEVEL].' - assert 0 <= prob <= 1.0, \ - 'The probability should be in range [0,1].' - self.level = level - self.prob = prob - self.factor = enhance_level_to_value(level) - - def _adjust_brightness_img(self, results, factor=1.0): - """Adjust the brightness of image.""" - for key in results.get('img_fields', ['img']): - img = results[key] - results[key] = mmcv.adjust_brightness(img, - factor).astype(img.dtype) - - def __call__(self, results): - """Call function for Brightness transformation. - - Args: - results (dict): Results dict from loading pipeline. - - Returns: - dict: Results after the transformation. - """ - if np.random.rand() > self.prob: - return results - self._adjust_brightness_img(results, self.factor) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'prob={self.prob})' - return repr_str - - -@PIPELINES.register_module() -class ContrastTransform: - """Apply Contrast transformation to image. The bboxes, masks and - segmentations are not modified. - - Args: - level (int | float): Should be in range [0,_MAX_LEVEL]. - prob (float): The probability for performing Contrast transformation. - """ - - def __init__(self, level, prob=0.5): - assert isinstance(level, (int, float)), \ - 'The level must be type int or float.' - assert 0 <= level <= _MAX_LEVEL, \ - 'The level should be in range [0,_MAX_LEVEL].' - assert 0 <= prob <= 1.0, \ - 'The probability should be in range [0,1].' - self.level = level - self.prob = prob - self.factor = enhance_level_to_value(level) - - def _adjust_contrast_img(self, results, factor=1.0): - """Adjust the image contrast.""" - for key in results.get('img_fields', ['img']): - img = results[key] - results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype) - - def __call__(self, results): - """Call function for Contrast transformation. - - Args: - results (dict): Results dict from loading pipeline. - - Returns: - dict: Results after the transformation. - """ - if np.random.rand() > self.prob: - return results - self._adjust_contrast_img(results, self.factor) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(level={self.level}, ' - repr_str += f'prob={self.prob})' - return repr_str diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/compose.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/compose.py deleted file mode 100644 index d759220098440c769b8f53c1e3b902c046450ff4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/compose.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import collections - -from mmcv.utils import build_from_cfg - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class Compose: - """Compose multiple transforms sequentially. - - Args: - transforms (Sequence[dict | callable]): Sequence of transform object or - config dict to be composed. - """ - - def __init__(self, transforms): - assert isinstance(transforms, collections.abc.Sequence) - self.transforms = [] - for transform in transforms: - if isinstance(transform, dict): - transform = build_from_cfg(transform, PIPELINES) - self.transforms.append(transform) - elif callable(transform): - self.transforms.append(transform) - else: - raise TypeError('transform must be callable or a dict') - - def __call__(self, data): - """Call function to apply transforms sequentially. - - Args: - data (dict): A result dict contains the data to transform. - - Returns: - dict: Transformed data. - """ - - for t in self.transforms: - data = t(data) - if data is None: - return None - return data - - def __repr__(self): - format_string = self.__class__.__name__ + '(' - for t in self.transforms: - str_ = t.__repr__() - if 'Compose(' in str_: - str_ = str_.replace('\n', '\n ') - format_string += '\n' - format_string += f' {str_}' - format_string += '\n)' - return format_string diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/formating.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/formating.py deleted file mode 100644 index 3b3e45abbb0714db18700ba9a12618a5aaa638d8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/formating.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# flake8: noqa -import warnings - -from .formatting import * - -warnings.warn('DeprecationWarning: mmdet.datasets.pipelines.formating will be ' - 'deprecated, please replace it with ' - 'mmdet.datasets.pipelines.formatting.') diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/formatting.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/formatting.py deleted file mode 100644 index 45ca69cfc6f400d0b80577ce2e19dfd9fd9ed204..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/formatting.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections.abc import Sequence - -import mmcv -import numpy as np -import torch -from mmcv.parallel import DataContainer as DC - -from ..builder import PIPELINES - - -def to_tensor(data): - """Convert objects of various python types to :obj:`torch.Tensor`. - - Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, - :class:`Sequence`, :class:`int` and :class:`float`. - - Args: - data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to - be converted. - """ - - if isinstance(data, torch.Tensor): - return data - elif isinstance(data, np.ndarray): - return torch.from_numpy(data) - elif isinstance(data, Sequence) and not mmcv.is_str(data): - return torch.tensor(data) - elif isinstance(data, int): - return torch.LongTensor([data]) - elif isinstance(data, float): - return torch.FloatTensor([data]) - else: - raise TypeError(f'type {type(data)} cannot be converted to tensor.') - - -@PIPELINES.register_module() -class ToTensor: - """Convert some results to :obj:`torch.Tensor` by given keys. - - Args: - keys (Sequence[str]): Keys that need to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert data in results to :obj:`torch.Tensor`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted - to :obj:`torch.Tensor`. - """ - for key in self.keys: - results[key] = to_tensor(results[key]) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class ImageToTensor: - """Convert image to :obj:`torch.Tensor` by given keys. - - The dimension order of input image is (H, W, C). The pipeline will convert - it to (C, H, W). If only 2 dimension (H, W) is given, the output would be - (1, H, W). - - Args: - keys (Sequence[str]): Key of images to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - for key in self.keys: - img = results[key] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - results[key] = (to_tensor(img.transpose(2, 0, 1))).contiguous() - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class Transpose: - """Transpose some results by given keys. - - Args: - keys (Sequence[str]): Keys of results to be transposed. - order (Sequence[int]): Order of transpose. - """ - - def __init__(self, keys, order): - self.keys = keys - self.order = order - - def __call__(self, results): - """Call function to transpose the channel order of data in results. - - Args: - results (dict): Result dict contains the data to transpose. - - Returns: - dict: The result dict contains the data transposed to \ - ``self.order``. - """ - for key in self.keys: - results[key] = results[key].transpose(self.order) - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, order={self.order})' - - -@PIPELINES.register_module() -class ToDataContainer: - """Convert results to :obj:`mmcv.DataContainer` by given fields. - - Args: - fields (Sequence[dict]): Each field is a dict like - ``dict(key='xxx', **kwargs)``. The ``key`` in result will - be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. - Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), - dict(key='gt_labels'))``. - """ - - def __init__(self, - fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), - dict(key='gt_labels'))): - self.fields = fields - - def __call__(self, results): - """Call function to convert data in results to - :obj:`mmcv.DataContainer`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted to \ - :obj:`mmcv.DataContainer`. - """ - - for field in self.fields: - field = field.copy() - key = field.pop('key') - results[key] = DC(results[key], **field) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(fields={self.fields})' - - -@PIPELINES.register_module() -class DefaultFormatBundle: - """Default formatting bundle. - - It simplifies the pipeline of formatting common fields, including "img", - "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". - These fields are formatted as follows. - - - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - - proposals: (1)to tensor, (2)to DataContainer - - gt_bboxes: (1)to tensor, (2)to DataContainer - - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer - - gt_labels: (1)to tensor, (2)to DataContainer - - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) - - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ - (3)to DataContainer (stack=True) - - Args: - img_to_float (bool): Whether to force the image to be converted to - float type. Default: True. - pad_val (dict): A dict for padding value in batch collating, - the default value is `dict(img=0, masks=0, seg=255)`. - Without this argument, the padding value of "gt_semantic_seg" - will be set to 0 by default, which should be 255. - """ - - def __init__(self, - img_to_float=True, - pad_val=dict(img=0, masks=0, seg=255)): - self.img_to_float = img_to_float - self.pad_val = pad_val - - def __call__(self, results): - """Call function to transform and format common fields in results. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data that is formatted with \ - default bundle. - """ - - if 'img' in results: - img = results['img'] - if self.img_to_float is True and img.dtype == np.uint8: - # Normally, image is of uint8 type without normalization. - # At this time, it needs to be forced to be converted to - # flot32, otherwise the model training and inference - # will be wrong. Only used for YOLOX currently . - img = img.astype(np.float32) - # add default meta keys - results = self._add_default_meta_keys(results) - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - img = np.ascontiguousarray(img.transpose(2, 0, 1)) - results['img'] = DC( - to_tensor(img), padding_value=self.pad_val['img'], stack=True) - for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: - if key not in results: - continue - results[key] = DC(to_tensor(results[key])) - if 'gt_masks' in results: - results['gt_masks'] = DC( - results['gt_masks'], - padding_value=self.pad_val['masks'], - cpu_only=True) - if 'gt_semantic_seg' in results: - results['gt_semantic_seg'] = DC( - to_tensor(results['gt_semantic_seg'][None, ...]), - padding_value=self.pad_val['seg'], - stack=True) - return results - - def _add_default_meta_keys(self, results): - """Add default meta keys. - - We set default meta keys including `pad_shape`, `scale_factor` and - `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and - `Pad` are implemented during the whole pipeline. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - results (dict): Updated result dict contains the data to convert. - """ - img = results['img'] - results.setdefault('pad_shape', img.shape) - results.setdefault('scale_factor', 1.0) - num_channels = 1 if len(img.shape) < 3 else img.shape[2] - results.setdefault( - 'img_norm_cfg', - dict( - mean=np.zeros(num_channels, dtype=np.float32), - std=np.ones(num_channels, dtype=np.float32), - to_rgb=False)) - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(img_to_float={self.img_to_float})' - - -@PIPELINES.register_module() -class Collect: - """Collect data from the loader relevant to the specific task. - - This is usually the last stage of the data loader pipeline. Typically keys - is set to some subset of "img", "proposals", "gt_bboxes", - "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". - - The "img_meta" item is always populated. The contents of the "img_meta" - dictionary depends on "meta_keys". By default this includes: - - - "img_shape": shape of the image input to the network as a tuple \ - (h, w, c). Note that images may be zero padded on the \ - bottom/right if the batch tensor is larger than this shape. - - - "scale_factor": a float indicating the preprocessing scale - - - "flip": a boolean indicating if image flip transform was used - - - "filename": path to the image file - - - "ori_shape": original shape of the image as a tuple (h, w, c) - - - "pad_shape": image shape after padding - - - "img_norm_cfg": a dict of normalization information: - - - mean - per channel mean subtraction - - std - per channel std divisor - - to_rgb - bool indicating if bgr was converted to rgb - - Args: - keys (Sequence[str]): Keys of results to be collected in ``data``. - meta_keys (Sequence[str], optional): Meta keys to be converted to - ``mmcv.DataContainer`` and collected in ``data[img_metas]``. - Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', - 'pad_shape', 'scale_factor', 'flip', 'flip_direction', - 'img_norm_cfg')`` - """ - - def __init__(self, - keys, - meta_keys=('filename', 'ori_filename', 'ori_shape', - 'img_shape', 'pad_shape', 'scale_factor', 'flip', - 'flip_direction', 'img_norm_cfg')): - self.keys = keys - self.meta_keys = meta_keys - - def __call__(self, results): - """Call function to collect keys in results. The keys in ``meta_keys`` - will be converted to :obj:mmcv.DataContainer. - - Args: - results (dict): Result dict contains the data to collect. - - Returns: - dict: The result dict contains the following keys - - - keys in``self.keys`` - - ``img_metas`` - """ - - data = {} - img_meta = {} - for key in self.meta_keys: - img_meta[key] = results[key] - data['img_metas'] = DC(img_meta, cpu_only=True) - for key in self.keys: - data[key] = results[key] - return data - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, meta_keys={self.meta_keys})' - - -@PIPELINES.register_module() -class WrapFieldsToLists: - """Wrap fields of the data dictionary into lists for evaluation. - - This class can be used as a last step of a test or validation - pipeline for single image evaluation or inference. - - Example: - >>> test_pipeline = [ - >>> dict(type='LoadImageFromFile'), - >>> dict(type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - >>> dict(type='Pad', size_divisor=32), - >>> dict(type='ImageToTensor', keys=['img']), - >>> dict(type='Collect', keys=['img']), - >>> dict(type='WrapFieldsToLists') - >>> ] - """ - - def __call__(self, results): - """Call function to wrap fields into lists. - - Args: - results (dict): Result dict contains the data to wrap. - - Returns: - dict: The result dict where value of ``self.keys`` are wrapped \ - into list. - """ - - # Wrap dict fields into lists - for key, val in results.items(): - results[key] = [val] - return results - - def __repr__(self): - return f'{self.__class__.__name__}()' diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/instaboost.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/instaboost.py deleted file mode 100644 index ca10c4c751f5309e37822fbe61ea3c7ed5de1b83..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/instaboost.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np - -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class InstaBoost: - r"""Data augmentation method in `InstaBoost: Boosting Instance - Segmentation Via Probability Map Guided Copy-Pasting - `_. - - Refer to https://github.com/GothicAi/Instaboost for implementation details. - - Args: - action_candidate (tuple): Action candidates. "normal", "horizontal", \ - "vertical", "skip" are supported. Default: ('normal', \ - 'horizontal', 'skip'). - action_prob (tuple): Corresponding action probabilities. Should be \ - the same length as action_candidate. Default: (1, 0, 0). - scale (tuple): (min scale, max scale). Default: (0.8, 1.2). - dx (int): The maximum x-axis shift will be (instance width) / dx. - Default 15. - dy (int): The maximum y-axis shift will be (instance height) / dy. - Default 15. - theta (tuple): (min rotation degree, max rotation degree). \ - Default: (-1, 1). - color_prob (float): Probability of images for color augmentation. - Default 0.5. - heatmap_flag (bool): Whether to use heatmap guided. Default False. - aug_ratio (float): Probability of applying this transformation. \ - Default 0.5. - """ - - def __init__(self, - action_candidate=('normal', 'horizontal', 'skip'), - action_prob=(1, 0, 0), - scale=(0.8, 1.2), - dx=15, - dy=15, - theta=(-1, 1), - color_prob=0.5, - hflag=False, - aug_ratio=0.5): - try: - import instaboostfast as instaboost - except ImportError: - raise ImportError( - 'Please run "pip install instaboostfast" ' - 'to install instaboostfast first for instaboost augmentation.') - self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, - scale, dx, dy, theta, - color_prob, hflag) - self.aug_ratio = aug_ratio - - def _load_anns(self, results): - labels = results['ann_info']['labels'] - masks = results['ann_info']['masks'] - bboxes = results['ann_info']['bboxes'] - n = len(labels) - - anns = [] - for i in range(n): - label = labels[i] - bbox = bboxes[i] - mask = masks[i] - x1, y1, x2, y2 = bbox - # assert (x2 - x1) >= 1 and (y2 - y1) >= 1 - bbox = [x1, y1, x2 - x1, y2 - y1] - anns.append({ - 'category_id': label, - 'segmentation': mask, - 'bbox': bbox - }) - - return anns - - def _parse_anns(self, results, anns, img): - gt_bboxes = [] - gt_labels = [] - gt_masks_ann = [] - for ann in anns: - x1, y1, w, h = ann['bbox'] - # TODO: more essential bug need to be fixed in instaboost - if w <= 0 or h <= 0: - continue - bbox = [x1, y1, x1 + w, y1 + h] - gt_bboxes.append(bbox) - gt_labels.append(ann['category_id']) - gt_masks_ann.append(ann['segmentation']) - gt_bboxes = np.array(gt_bboxes, dtype=np.float32) - gt_labels = np.array(gt_labels, dtype=np.int64) - results['ann_info']['labels'] = gt_labels - results['ann_info']['bboxes'] = gt_bboxes - results['ann_info']['masks'] = gt_masks_ann - results['img'] = img - return results - - def __call__(self, results): - img = results['img'] - ori_type = img.dtype - anns = self._load_anns(results) - if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]): - try: - import instaboostfast as instaboost - except ImportError: - raise ImportError('Please run "pip install instaboostfast" ' - 'to install instaboostfast first.') - anns, img = instaboost.get_new_data( - anns, img.astype(np.uint8), self.cfg, background=None) - - results = self._parse_anns(results, anns, img.astype(ori_type)) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})' - return repr_str diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/loading.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/loading.py deleted file mode 100644 index 8af8cf352ca4298fca4d50f0f5760daa869a6aeb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/loading.py +++ /dev/null @@ -1,645 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp - -import mmcv -import numpy as np -import pycocotools.mask as maskUtils - -from mmdet.core import BitmapMasks, PolygonMasks -from ..builder import PIPELINES - -try: - from panopticapi.utils import rgb2id -except ImportError: - rgb2id = None - - -@PIPELINES.register_module() -class LoadImageFromFile: - """Load an image from file. - - Required keys are "img_prefix" and "img_info" (a dict that must contain the - key "filename"). Added or updated keys are "filename", "img", "img_shape", - "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), - "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:`mmcv.imfrombytes`. - Defaults to 'color'. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - to_float32=False, - color_type='color', - channel_order='bgr', - file_client_args=dict(backend='disk')): - self.to_float32 = to_float32 - self.color_type = color_type - self.channel_order = channel_order - self.file_client_args = file_client_args.copy() - self.file_client = None - - def __call__(self, results): - """Call functions to load image and get image meta information. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded image and meta information. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results['img_prefix'] is not None: - filename = osp.join(results['img_prefix'], - results['img_info']['filename']) - else: - filename = results['img_info']['filename'] - - img_bytes = self.file_client.get(filename) - img = mmcv.imfrombytes( - img_bytes, flag=self.color_type, channel_order=self.channel_order) - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = filename - results['ori_filename'] = results['img_info']['filename'] - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - results['img_fields'] = ['img'] - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'to_float32={self.to_float32}, ' - f"color_type='{self.color_type}', " - f"channel_order='{self.channel_order}', " - f'file_client_args={self.file_client_args})') - return repr_str - - -@PIPELINES.register_module() -class LoadImageFromWebcam(LoadImageFromFile): - """Load an image from webcam. - - Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in - ``results['img']``. - """ - - def __call__(self, results): - """Call functions to add image meta information. - - Args: - results (dict): Result dict with Webcam read image in - ``results['img']``. - - Returns: - dict: The dict contains loaded image and meta information. - """ - - img = results['img'] - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = None - results['ori_filename'] = None - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - results['img_fields'] = ['img'] - return results - - -@PIPELINES.register_module() -class LoadMultiChannelImageFromFiles: - """Load multi-channel images from a list of separate channel files. - - Required keys are "img_prefix" and "img_info" (a dict that must contain the - key "filename", which is expected to be a list of filenames). - Added or updated keys are "filename", "img", "img_shape", - "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), - "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:`mmcv.imfrombytes`. - Defaults to 'color'. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - to_float32=False, - color_type='unchanged', - file_client_args=dict(backend='disk')): - self.to_float32 = to_float32 - self.color_type = color_type - self.file_client_args = file_client_args.copy() - self.file_client = None - - def __call__(self, results): - """Call functions to load multiple images and get images meta - information. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded images and meta information. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results['img_prefix'] is not None: - filename = [ - osp.join(results['img_prefix'], fname) - for fname in results['img_info']['filename'] - ] - else: - filename = results['img_info']['filename'] - - img = [] - for name in filename: - img_bytes = self.file_client.get(name) - img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type)) - img = np.stack(img, axis=-1) - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = filename - results['ori_filename'] = results['img_info']['filename'] - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - num_channels = 1 if len(img.shape) < 3 else img.shape[2] - results['img_norm_cfg'] = dict( - mean=np.zeros(num_channels, dtype=np.float32), - std=np.ones(num_channels, dtype=np.float32), - to_rgb=False) - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'to_float32={self.to_float32}, ' - f"color_type='{self.color_type}', " - f'file_client_args={self.file_client_args})') - return repr_str - - -@PIPELINES.register_module() -class LoadAnnotations: - """Load multiple types of annotations. - - Args: - with_bbox (bool): Whether to parse and load the bbox annotation. - Default: True. - with_label (bool): Whether to parse and load the label annotation. - Default: True. - with_mask (bool): Whether to parse and load the mask annotation. - Default: False. - with_seg (bool): Whether to parse and load the semantic segmentation - annotation. Default: False. - poly2mask (bool): Whether to convert the instance masks from polygons - to bitmaps. Default: True. - denorm_bbox (bool): Whether to convert bbox from relative value to - absolute value. Only used in OpenImage Dataset. - Default: False. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - with_bbox=True, - with_label=True, - with_mask=False, - with_seg=False, - poly2mask=True, - denorm_bbox=False, - file_client_args=dict(backend='disk')): - self.with_bbox = with_bbox - self.with_label = with_label - self.with_mask = with_mask - self.with_seg = with_seg - self.poly2mask = poly2mask - self.denorm_bbox = denorm_bbox - self.file_client_args = file_client_args.copy() - self.file_client = None - - def _load_bboxes(self, results): - """Private function to load bounding box annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded bounding box annotations. - """ - - ann_info = results['ann_info'] - results['gt_bboxes'] = ann_info['bboxes'].copy() - - if self.denorm_bbox: - bbox_num = results['gt_bboxes'].shape[0] - if bbox_num != 0: - h, w = results['img_shape'][:2] - results['gt_bboxes'][:, 0::2] *= w - results['gt_bboxes'][:, 1::2] *= h - - gt_bboxes_ignore = ann_info.get('bboxes_ignore', None) - if gt_bboxes_ignore is not None: - results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy() - results['bbox_fields'].append('gt_bboxes_ignore') - results['bbox_fields'].append('gt_bboxes') - - gt_is_group_ofs = ann_info.get('gt_is_group_ofs', None) - if gt_is_group_ofs is not None: - results['gt_is_group_ofs'] = gt_is_group_ofs.copy() - - return results - - def _load_labels(self, results): - """Private function to load label annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded label annotations. - """ - - results['gt_labels'] = results['ann_info']['labels'].copy() - return results - - def _poly2mask(self, mask_ann, img_h, img_w): - """Private function to convert masks represented with polygon to - bitmaps. - - Args: - mask_ann (list | dict): Polygon mask annotation input. - img_h (int): The height of output mask. - img_w (int): The width of output mask. - - Returns: - numpy.ndarray: The decode bitmap mask of shape (img_h, img_w). - """ - - if isinstance(mask_ann, list): - # polygon -- a single object might consist of multiple parts - # we merge all parts into one mask rle code - rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) - rle = maskUtils.merge(rles) - elif isinstance(mask_ann['counts'], list): - # uncompressed RLE - rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) - else: - # rle - rle = mask_ann - mask = maskUtils.decode(rle) - return mask - - def process_polygons(self, polygons): - """Convert polygons to list of ndarray and filter invalid polygons. - - Args: - polygons (list[list]): Polygons of one instance. - - Returns: - list[numpy.ndarray]: Processed polygons. - """ - - polygons = [np.array(p) for p in polygons] - valid_polygons = [] - for polygon in polygons: - if len(polygon) % 2 == 0 and len(polygon) >= 6: - valid_polygons.append(polygon) - return valid_polygons - - def _load_masks(self, results): - """Private function to load mask annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded mask annotations. - If ``self.poly2mask`` is set ``True``, `gt_mask` will contain - :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used. - """ - - h, w = results['img_info']['height'], results['img_info']['width'] - gt_masks = results['ann_info']['masks'] - if self.poly2mask: - gt_masks = BitmapMasks( - [self._poly2mask(mask, h, w) for mask in gt_masks], h, w) - else: - gt_masks = PolygonMasks( - [self.process_polygons(polygons) for polygons in gt_masks], h, - w) - results['gt_masks'] = gt_masks - results['mask_fields'].append('gt_masks') - return results - - def _load_semantic_seg(self, results): - """Private function to load semantic segmentation annotations. - - Args: - results (dict): Result dict from :obj:`dataset`. - - Returns: - dict: The dict contains loaded semantic segmentation annotations. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - filename = osp.join(results['seg_prefix'], - results['ann_info']['seg_map']) - img_bytes = self.file_client.get(filename) - results['gt_semantic_seg'] = mmcv.imfrombytes( - img_bytes, flag='unchanged').squeeze() - results['seg_fields'].append('gt_semantic_seg') - return results - - def __call__(self, results): - """Call function to load multiple types annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded bounding box, label, mask and - semantic segmentation annotations. - """ - - if self.with_bbox: - results = self._load_bboxes(results) - if results is None: - return None - if self.with_label: - results = self._load_labels(results) - if self.with_mask: - results = self._load_masks(results) - if self.with_seg: - results = self._load_semantic_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(with_bbox={self.with_bbox}, ' - repr_str += f'with_label={self.with_label}, ' - repr_str += f'with_mask={self.with_mask}, ' - repr_str += f'with_seg={self.with_seg}, ' - repr_str += f'poly2mask={self.poly2mask}, ' - repr_str += f'file_client_args={self.file_client_args})' - return repr_str - - -@PIPELINES.register_module() -class LoadPanopticAnnotations(LoadAnnotations): - """Load multiple types of panoptic annotations. - - Args: - with_bbox (bool): Whether to parse and load the bbox annotation. - Default: True. - with_label (bool): Whether to parse and load the label annotation. - Default: True. - with_mask (bool): Whether to parse and load the mask annotation. - Default: True. - with_seg (bool): Whether to parse and load the semantic segmentation - annotation. Default: True. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - with_bbox=True, - with_label=True, - with_mask=True, - with_seg=True, - file_client_args=dict(backend='disk')): - if rgb2id is None: - raise RuntimeError( - 'panopticapi is not installed, please install it by: ' - 'pip install git+https://github.com/cocodataset/' - 'panopticapi.git.') - - super(LoadPanopticAnnotations, self).__init__( - with_bbox=with_bbox, - with_label=with_label, - with_mask=with_mask, - with_seg=with_seg, - poly2mask=True, - denorm_bbox=False, - file_client_args=file_client_args) - - def _load_masks_and_semantic_segs(self, results): - """Private function to load mask and semantic segmentation annotations. - - In gt_semantic_seg, the foreground label is from `0` to - `num_things - 1`, the background label is from `num_things` to - `num_things + num_stuff - 1`, 255 means the ignored label (`VOID`). - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded mask and semantic segmentation - annotations. `BitmapMasks` is used for mask annotations. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - filename = osp.join(results['seg_prefix'], - results['ann_info']['seg_map']) - img_bytes = self.file_client.get(filename) - pan_png = mmcv.imfrombytes( - img_bytes, flag='color', channel_order='rgb').squeeze() - pan_png = rgb2id(pan_png) - - gt_masks = [] - gt_seg = np.zeros_like(pan_png) + 255 # 255 as ignore - - for mask_info in results['ann_info']['masks']: - mask = (pan_png == mask_info['id']) - gt_seg = np.where(mask, mask_info['category'], gt_seg) - - # The legal thing masks - if mask_info.get('is_thing'): - gt_masks.append(mask.astype(np.uint8)) - - if self.with_mask: - h, w = results['img_info']['height'], results['img_info']['width'] - gt_masks = BitmapMasks(gt_masks, h, w) - results['gt_masks'] = gt_masks - results['mask_fields'].append('gt_masks') - - if self.with_seg: - results['gt_semantic_seg'] = gt_seg - results['seg_fields'].append('gt_semantic_seg') - return results - - def __call__(self, results): - """Call function to load multiple types panoptic annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded bounding box, label, mask and - semantic segmentation annotations. - """ - - if self.with_bbox: - results = self._load_bboxes(results) - if results is None: - return None - if self.with_label: - results = self._load_labels(results) - if self.with_mask or self.with_seg: - # The tasks completed by '_load_masks' and '_load_semantic_segs' - # in LoadAnnotations are merged to one function. - results = self._load_masks_and_semantic_segs(results) - - return results - - -@PIPELINES.register_module() -class LoadProposals: - """Load proposal pipeline. - - Required key is "proposals". Updated keys are "proposals", "bbox_fields". - - Args: - num_max_proposals (int, optional): Maximum number of proposals to load. - If not specified, all proposals will be loaded. - """ - - def __init__(self, num_max_proposals=None): - self.num_max_proposals = num_max_proposals - - def __call__(self, results): - """Call function to load proposals from file. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded proposal annotations. - """ - - proposals = results['proposals'] - if proposals.shape[1] not in (4, 5): - raise AssertionError( - 'proposals should have shapes (n, 4) or (n, 5), ' - f'but found {proposals.shape}') - proposals = proposals[:, :4] - - if self.num_max_proposals is not None: - proposals = proposals[:self.num_max_proposals] - - if len(proposals) == 0: - proposals = np.array([[0, 0, 0, 0]], dtype=np.float32) - results['proposals'] = proposals - results['bbox_fields'].append('proposals') - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(num_max_proposals={self.num_max_proposals})' - - -@PIPELINES.register_module() -class FilterAnnotations: - """Filter invalid annotations. - - Args: - min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth - boxes. Default: (1., 1.) - min_gt_mask_area (int): Minimum foreground area of ground truth masks. - Default: 1 - by_box (bool): Filter instances with bounding boxes not meeting the - min_gt_bbox_wh threshold. Default: True - by_mask (bool): Filter instances with masks not meeting - min_gt_mask_area threshold. Default: False - keep_empty (bool): Whether to return None when it - becomes an empty bbox after filtering. Default: True - """ - - def __init__(self, - min_gt_bbox_wh=(1., 1.), - min_gt_mask_area=1, - by_box=True, - by_mask=False, - keep_empty=True): - # TODO: add more filter options - assert by_box or by_mask - self.min_gt_bbox_wh = min_gt_bbox_wh - self.min_gt_mask_area = min_gt_mask_area - self.by_box = by_box - self.by_mask = by_mask - self.keep_empty = keep_empty - - def __call__(self, results): - if self.by_box: - assert 'gt_bboxes' in results - gt_bboxes = results['gt_bboxes'] - instance_num = gt_bboxes.shape[0] - if self.by_mask: - assert 'gt_masks' in results - gt_masks = results['gt_masks'] - instance_num = len(gt_masks) - - if instance_num == 0: - return results - - tests = [] - if self.by_box: - w = gt_bboxes[:, 2] - gt_bboxes[:, 0] - h = gt_bboxes[:, 3] - gt_bboxes[:, 1] - tests.append((w > self.min_gt_bbox_wh[0]) - & (h > self.min_gt_bbox_wh[1])) - if self.by_mask: - gt_masks = results['gt_masks'] - tests.append(gt_masks.areas >= self.min_gt_mask_area) - - keep = tests[0] - for t in tests[1:]: - keep = keep & t - - keep = keep.nonzero()[0] - - keys = ('gt_bboxes', 'gt_labels', 'gt_masks') - for key in keys: - if key in results: - results[key] = results[key][keep] - if keep.size == 0: - if self.keep_empty: - return None - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(min_gt_bbox_wh={self.min_gt_bbox_wh},' \ - f'min_gt_mask_area={self.min_gt_mask_area},' \ - f'by_box={self.by_box},' \ - f'by_mask={self.by_mask},' \ - f'always_keep={self.always_keep})' diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/test_time_aug.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/test_time_aug.py deleted file mode 100644 index 5f1ab7b7cc81891dd14d136a24cec5228495d2f0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/test_time_aug.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import mmcv - -from ..builder import PIPELINES -from .compose import Compose - - -@PIPELINES.register_module() -class MultiScaleFlipAug: - """Test-time augmentation with multiple scales and flipping. - - An example configuration is as followed: - - .. code-block:: - - img_scale=[(1333, 400), (1333, 800)], - flip=True, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ] - - After MultiScaleFLipAug with above configuration, the results are wrapped - into lists of the same length as followed: - - .. code-block:: - - dict( - img=[...], - img_shape=[...], - scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)] - flip=[False, True, False, True] - ... - ) - - Args: - transforms (list[dict]): Transforms to apply in each augmentation. - img_scale (tuple | list[tuple] | None): Images scales for resizing. - scale_factor (float | list[float] | None): Scale factors for resizing. - flip (bool): Whether apply flip augmentation. Default: False. - flip_direction (str | list[str]): Flip augmentation directions, - options are "horizontal", "vertical" and "diagonal". If - flip_direction is a list, multiple flip augmentations will be - applied. It has no effect when flip == False. Default: - "horizontal". - """ - - def __init__(self, - transforms, - img_scale=None, - scale_factor=None, - flip=False, - flip_direction='horizontal'): - self.transforms = Compose(transforms) - assert (img_scale is None) ^ (scale_factor is None), ( - 'Must have but only one variable can be set') - if img_scale is not None: - self.img_scale = img_scale if isinstance(img_scale, - list) else [img_scale] - self.scale_key = 'scale' - assert mmcv.is_list_of(self.img_scale, tuple) - else: - self.img_scale = scale_factor if isinstance( - scale_factor, list) else [scale_factor] - self.scale_key = 'scale_factor' - - self.flip = flip - self.flip_direction = flip_direction if isinstance( - flip_direction, list) else [flip_direction] - assert mmcv.is_list_of(self.flip_direction, str) - if not self.flip and self.flip_direction != ['horizontal']: - warnings.warn( - 'flip_direction has no effect when flip is set to False') - if (self.flip - and not any([t['type'] == 'RandomFlip' for t in transforms])): - warnings.warn( - 'flip has no effect when RandomFlip is not in transforms') - - def __call__(self, results): - """Call function to apply test time augment transforms on results. - - Args: - results (dict): Result dict contains the data to transform. - - Returns: - dict[str: list]: The augmented data, where each value is wrapped - into a list. - """ - - aug_data = [] - flip_args = [(False, None)] - if self.flip: - flip_args += [(True, direction) - for direction in self.flip_direction] - for scale in self.img_scale: - for flip, direction in flip_args: - _results = results.copy() - _results[self.scale_key] = scale - _results['flip'] = flip - _results['flip_direction'] = direction - data = self.transforms(_results) - aug_data.append(data) - # list of dict to dict of list - aug_data_dict = {key: [] for key in aug_data[0]} - for data in aug_data: - for key, val in data.items(): - aug_data_dict[key].append(val) - return aug_data_dict - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(transforms={self.transforms}, ' - repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' - repr_str += f'flip_direction={self.flip_direction})' - return repr_str diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/transforms.py b/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/transforms.py deleted file mode 100644 index 0a1b3891128e341cc14136b2721397becf0c8529..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/pipelines/transforms.py +++ /dev/null @@ -1,2919 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import inspect -import math -import warnings - -import cv2 -import mmcv -import numpy as np -from numpy import random - -from mmdet.core import BitmapMasks, PolygonMasks, find_inside_bboxes -from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps -from mmdet.utils import log_img_scale -from ..builder import PIPELINES - -try: - from imagecorruptions import corrupt -except ImportError: - corrupt = None - -try: - import albumentations - from albumentations import Compose -except ImportError: - albumentations = None - Compose = None - - -@PIPELINES.register_module() -class Resize: - """Resize images & bbox & mask. - - This transform resizes the input image to some scale. Bboxes and masks are - then resized with the same scale factor. If the input dict contains the key - "scale", then the scale in the input dict is used, otherwise the specified - scale in the init method is used. If the input dict contains the key - "scale_factor" (if MultiScaleFlipAug does not give img_scale but - scale_factor), the actual scale will be computed by image shape and - scale_factor. - - `img_scale` can either be a tuple (single-scale) or a list of tuple - (multi-scale). There are 3 multiscale modes: - - - ``ratio_range is not None``: randomly sample a ratio from the ratio \ - range and multiply it with the image scale. - - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \ - sample a scale from the multiscale range. - - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \ - sample a scale from multiple scales. - - Args: - img_scale (tuple or list[tuple]): Images scales for resizing. - multiscale_mode (str): Either "range" or "value". - ratio_range (tuple[float]): (min_ratio, max_ratio) - keep_ratio (bool): Whether to keep the aspect ratio when resizing the - image. - bbox_clip_border (bool, optional): Whether to clip the objects outside - the border of the image. In some dataset like MOT17, the gt bboxes - are allowed to cross the border of images. Therefore, we don't - need to clip the gt bboxes in these cases. Defaults to True. - backend (str): Image resize backend, choices are 'cv2' and 'pillow'. - These two backends generates slightly different results. Defaults - to 'cv2'. - interpolation (str): Interpolation method, accepted values are - "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' - backend, "nearest", "bilinear" for 'pillow' backend. - override (bool, optional): Whether to override `scale` and - `scale_factor` so as to call resize twice. Default False. If True, - after the first resizing, the existed `scale` and `scale_factor` - will be ignored so the second resizing can be allowed. - This option is a work-around for multiple times of resize in DETR. - Defaults to False. - """ - - def __init__(self, - img_scale=None, - multiscale_mode='range', - ratio_range=None, - keep_ratio=True, - bbox_clip_border=True, - backend='cv2', - interpolation='bilinear', - override=False): - if img_scale is None: - self.img_scale = None - else: - if isinstance(img_scale, list): - self.img_scale = img_scale - else: - self.img_scale = [img_scale] - assert mmcv.is_list_of(self.img_scale, tuple) - - if ratio_range is not None: - # mode 1: given a scale and a range of image ratio - assert len(self.img_scale) == 1 - else: - # mode 2: given multiple scales or a range of scales - assert multiscale_mode in ['value', 'range'] - - self.backend = backend - self.multiscale_mode = multiscale_mode - self.ratio_range = ratio_range - self.keep_ratio = keep_ratio - # TODO: refactor the override option in Resize - self.interpolation = interpolation - self.override = override - self.bbox_clip_border = bbox_clip_border - - @staticmethod - def random_select(img_scales): - """Randomly select an img_scale from given candidates. - - Args: - img_scales (list[tuple]): Images scales for selection. - - Returns: - (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \ - where ``img_scale`` is the selected image scale and \ - ``scale_idx`` is the selected index in the given candidates. - """ - - assert mmcv.is_list_of(img_scales, tuple) - scale_idx = np.random.randint(len(img_scales)) - img_scale = img_scales[scale_idx] - return img_scale, scale_idx - - @staticmethod - def random_sample(img_scales): - """Randomly sample an img_scale when ``multiscale_mode=='range'``. - - Args: - img_scales (list[tuple]): Images scale range for sampling. - There must be two tuples in img_scales, which specify the lower - and upper bound of image scales. - - Returns: - (tuple, None): Returns a tuple ``(img_scale, None)``, where \ - ``img_scale`` is sampled scale and None is just a placeholder \ - to be consistent with :func:`random_select`. - """ - - assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 - img_scale_long = [max(s) for s in img_scales] - img_scale_short = [min(s) for s in img_scales] - long_edge = np.random.randint( - min(img_scale_long), - max(img_scale_long) + 1) - short_edge = np.random.randint( - min(img_scale_short), - max(img_scale_short) + 1) - img_scale = (long_edge, short_edge) - return img_scale, None - - @staticmethod - def random_sample_ratio(img_scale, ratio_range): - """Randomly sample an img_scale when ``ratio_range`` is specified. - - A ratio will be randomly sampled from the range specified by - ``ratio_range``. Then it would be multiplied with ``img_scale`` to - generate sampled scale. - - Args: - img_scale (tuple): Images scale base to multiply with ratio. - ratio_range (tuple[float]): The minimum and maximum ratio to scale - the ``img_scale``. - - Returns: - (tuple, None): Returns a tuple ``(scale, None)``, where \ - ``scale`` is sampled ratio multiplied with ``img_scale`` and \ - None is just a placeholder to be consistent with \ - :func:`random_select`. - """ - - assert isinstance(img_scale, tuple) and len(img_scale) == 2 - min_ratio, max_ratio = ratio_range - assert min_ratio <= max_ratio - ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio - scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) - return scale, None - - def _random_scale(self, results): - """Randomly sample an img_scale according to ``ratio_range`` and - ``multiscale_mode``. - - If ``ratio_range`` is specified, a ratio will be sampled and be - multiplied with ``img_scale``. - If multiple scales are specified by ``img_scale``, a scale will be - sampled according to ``multiscale_mode``. - Otherwise, single scale will be used. - - Args: - results (dict): Result dict from :obj:`dataset`. - - Returns: - dict: Two new keys 'scale` and 'scale_idx` are added into \ - ``results``, which would be used by subsequent pipelines. - """ - - if self.ratio_range is not None: - scale, scale_idx = self.random_sample_ratio( - self.img_scale[0], self.ratio_range) - elif len(self.img_scale) == 1: - scale, scale_idx = self.img_scale[0], 0 - elif self.multiscale_mode == 'range': - scale, scale_idx = self.random_sample(self.img_scale) - elif self.multiscale_mode == 'value': - scale, scale_idx = self.random_select(self.img_scale) - else: - raise NotImplementedError - - results['scale'] = scale - results['scale_idx'] = scale_idx - - def _resize_img(self, results): - """Resize images with ``results['scale']``.""" - for key in results.get('img_fields', ['img']): - if self.keep_ratio: - img, scale_factor = mmcv.imrescale( - results[key], - results['scale'], - return_scale=True, - interpolation=self.interpolation, - backend=self.backend) - # the w_scale and h_scale has minor difference - # a real fix should be done in the mmcv.imrescale in the future - new_h, new_w = img.shape[:2] - h, w = results[key].shape[:2] - w_scale = new_w / w - h_scale = new_h / h - else: - img, w_scale, h_scale = mmcv.imresize( - results[key], - results['scale'], - return_scale=True, - interpolation=self.interpolation, - backend=self.backend) - results[key] = img - - scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], - dtype=np.float32) - results['img_shape'] = img.shape - # in case that there is no padding - results['pad_shape'] = img.shape - results['scale_factor'] = scale_factor - results['keep_ratio'] = self.keep_ratio - - def _resize_bboxes(self, results): - """Resize bounding boxes with ``results['scale_factor']``.""" - for key in results.get('bbox_fields', []): - bboxes = results[key] * results['scale_factor'] - if self.bbox_clip_border: - img_shape = results['img_shape'] - bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) - bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) - results[key] = bboxes - - def _resize_masks(self, results): - """Resize masks with ``results['scale']``""" - for key in results.get('mask_fields', []): - if results[key] is None: - continue - if self.keep_ratio: - results[key] = results[key].rescale(results['scale']) - else: - results[key] = results[key].resize(results['img_shape'][:2]) - - def _resize_seg(self, results): - """Resize semantic segmentation map with ``results['scale']``.""" - for key in results.get('seg_fields', []): - if self.keep_ratio: - gt_seg = mmcv.imrescale( - results[key], - results['scale'], - interpolation='nearest', - backend=self.backend) - else: - gt_seg = mmcv.imresize( - results[key], - results['scale'], - interpolation='nearest', - backend=self.backend) - results[key] = gt_seg - - def __call__(self, results): - """Call function to resize images, bounding boxes, masks, semantic - segmentation map. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \ - 'keep_ratio' keys are added into result dict. - """ - - if 'scale' not in results: - if 'scale_factor' in results: - img_shape = results['img'].shape[:2] - scale_factor = results['scale_factor'] - assert isinstance(scale_factor, float) - results['scale'] = tuple( - [int(x * scale_factor) for x in img_shape][::-1]) - else: - self._random_scale(results) - else: - if not self.override: - assert 'scale_factor' not in results, ( - 'scale and scale_factor cannot be both set.') - else: - results.pop('scale') - if 'scale_factor' in results: - results.pop('scale_factor') - self._random_scale(results) - - self._resize_img(results) - self._resize_bboxes(results) - self._resize_masks(results) - self._resize_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(img_scale={self.img_scale}, ' - repr_str += f'multiscale_mode={self.multiscale_mode}, ' - repr_str += f'ratio_range={self.ratio_range}, ' - repr_str += f'keep_ratio={self.keep_ratio}, ' - repr_str += f'bbox_clip_border={self.bbox_clip_border})' - return repr_str - - -@PIPELINES.register_module() -class RandomFlip: - """Flip the image & bbox & mask. - - If the input dict contains the key "flip", then the flag will be used, - otherwise it will be randomly decided by a ratio specified in the init - method. - - When random flip is enabled, ``flip_ratio``/``direction`` can either be a - float/string or tuple of float/string. There are 3 flip modes: - - - ``flip_ratio`` is float, ``direction`` is string: the image will be - ``direction``ly flipped with probability of ``flip_ratio`` . - E.g., ``flip_ratio=0.5``, ``direction='horizontal'``, - then image will be horizontally flipped with probability of 0.5. - - ``flip_ratio`` is float, ``direction`` is list of string: the image will - be ``direction[i]``ly flipped with probability of - ``flip_ratio/len(direction)``. - E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``, - then image will be horizontally flipped with probability of 0.25, - vertically with probability of 0.25. - - ``flip_ratio`` is list of float, ``direction`` is list of string: - given ``len(flip_ratio) == len(direction)``, the image will - be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``. - E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal', - 'vertical']``, then image will be horizontally flipped with probability - of 0.3, vertically with probability of 0.5. - - Args: - flip_ratio (float | list[float], optional): The flipping probability. - Default: None. - direction(str | list[str], optional): The flipping direction. Options - are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'. - If input is a list, the length must equal ``flip_ratio``. Each - element in ``flip_ratio`` indicates the flip probability of - corresponding direction. - """ - - def __init__(self, flip_ratio=None, direction='horizontal'): - if isinstance(flip_ratio, list): - assert mmcv.is_list_of(flip_ratio, float) - assert 0 <= sum(flip_ratio) <= 1 - elif isinstance(flip_ratio, float): - assert 0 <= flip_ratio <= 1 - elif flip_ratio is None: - pass - else: - raise ValueError('flip_ratios must be None, float, ' - 'or list of float') - self.flip_ratio = flip_ratio - - valid_directions = ['horizontal', 'vertical', 'diagonal'] - if isinstance(direction, str): - assert direction in valid_directions - elif isinstance(direction, list): - assert mmcv.is_list_of(direction, str) - assert set(direction).issubset(set(valid_directions)) - else: - raise ValueError('direction must be either str or list of str') - self.direction = direction - - if isinstance(flip_ratio, list): - assert len(self.flip_ratio) == len(self.direction) - - def bbox_flip(self, bboxes, img_shape, direction): - """Flip bboxes horizontally. - - Args: - bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k) - img_shape (tuple[int]): Image shape (height, width) - direction (str): Flip direction. Options are 'horizontal', - 'vertical'. - - Returns: - numpy.ndarray: Flipped bounding boxes. - """ - - assert bboxes.shape[-1] % 4 == 0 - flipped = bboxes.copy() - if direction == 'horizontal': - w = img_shape[1] - flipped[..., 0::4] = w - bboxes[..., 2::4] - flipped[..., 2::4] = w - bboxes[..., 0::4] - elif direction == 'vertical': - h = img_shape[0] - flipped[..., 1::4] = h - bboxes[..., 3::4] - flipped[..., 3::4] = h - bboxes[..., 1::4] - elif direction == 'diagonal': - w = img_shape[1] - h = img_shape[0] - flipped[..., 0::4] = w - bboxes[..., 2::4] - flipped[..., 1::4] = h - bboxes[..., 3::4] - flipped[..., 2::4] = w - bboxes[..., 0::4] - flipped[..., 3::4] = h - bboxes[..., 1::4] - else: - raise ValueError(f"Invalid flipping direction '{direction}'") - return flipped - - def __call__(self, results): - """Call function to flip bounding boxes, masks, semantic segmentation - maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Flipped results, 'flip', 'flip_direction' keys are added \ - into result dict. - """ - - if 'flip' not in results: - if isinstance(self.direction, list): - # None means non-flip - direction_list = self.direction + [None] - else: - # None means non-flip - direction_list = [self.direction, None] - - if isinstance(self.flip_ratio, list): - non_flip_ratio = 1 - sum(self.flip_ratio) - flip_ratio_list = self.flip_ratio + [non_flip_ratio] - else: - non_flip_ratio = 1 - self.flip_ratio - # exclude non-flip - single_ratio = self.flip_ratio / (len(direction_list) - 1) - flip_ratio_list = [single_ratio] * (len(direction_list) - - 1) + [non_flip_ratio] - - cur_dir = np.random.choice(direction_list, p=flip_ratio_list) - - results['flip'] = cur_dir is not None - if 'flip_direction' not in results: - results['flip_direction'] = cur_dir - if results['flip']: - # flip image - for key in results.get('img_fields', ['img']): - results[key] = mmcv.imflip( - results[key], direction=results['flip_direction']) - # flip bboxes - for key in results.get('bbox_fields', []): - results[key] = self.bbox_flip(results[key], - results['img_shape'], - results['flip_direction']) - # flip masks - for key in results.get('mask_fields', []): - results[key] = results[key].flip(results['flip_direction']) - - # flip segs - for key in results.get('seg_fields', []): - results[key] = mmcv.imflip( - results[key], direction=results['flip_direction']) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})' - - -@PIPELINES.register_module() -class RandomShift: - """Shift the image and box given shift pixels and probability. - - Args: - shift_ratio (float): Probability of shifts. Default 0.5. - max_shift_px (int): The max pixels for shifting. Default 32. - filter_thr_px (int): The width and height threshold for filtering. - The bbox and the rest of the targets below the width and - height threshold will be filtered. Default 1. - """ - - def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1): - assert 0 <= shift_ratio <= 1 - assert max_shift_px >= 0 - self.shift_ratio = shift_ratio - self.max_shift_px = max_shift_px - self.filter_thr_px = int(filter_thr_px) - # The key correspondence from bboxes to labels. - self.bbox2label = { - 'gt_bboxes': 'gt_labels', - 'gt_bboxes_ignore': 'gt_labels_ignore' - } - - def __call__(self, results): - """Call function to random shift images, bounding boxes. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Shift results. - """ - if random.random() < self.shift_ratio: - img_shape = results['img'].shape[:2] - - random_shift_x = random.randint(-self.max_shift_px, - self.max_shift_px) - random_shift_y = random.randint(-self.max_shift_px, - self.max_shift_px) - new_x = max(0, random_shift_x) - ori_x = max(0, -random_shift_x) - new_y = max(0, random_shift_y) - ori_y = max(0, -random_shift_y) - - # TODO: support mask and semantic segmentation maps. - for key in results.get('bbox_fields', []): - bboxes = results[key].copy() - bboxes[..., 0::2] += random_shift_x - bboxes[..., 1::2] += random_shift_y - - # clip border - bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1]) - bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0]) - - # remove invalid bboxes - bbox_w = bboxes[..., 2] - bboxes[..., 0] - bbox_h = bboxes[..., 3] - bboxes[..., 1] - valid_inds = (bbox_w > self.filter_thr_px) & ( - bbox_h > self.filter_thr_px) - # If the shift does not contain any gt-bbox area, skip this - # image. - if key == 'gt_bboxes' and not valid_inds.any(): - return results - bboxes = bboxes[valid_inds] - results[key] = bboxes - - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = self.bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - - for key in results.get('img_fields', ['img']): - img = results[key] - new_img = np.zeros_like(img) - img_h, img_w = img.shape[:2] - new_h = img_h - np.abs(random_shift_y) - new_w = img_w - np.abs(random_shift_x) - new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ - = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w] - results[key] = new_img - - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(max_shift_px={self.max_shift_px}, ' - return repr_str - - -@PIPELINES.register_module() -class Pad: - """Pad the image & masks & segmentation map. - - There are two padding modes: (1) pad to a fixed size and (2) pad to the - minimum size that is divisible by some number. - Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", - - Args: - size (tuple, optional): Fixed padding size. - size_divisor (int, optional): The divisor of padded size. - pad_to_square (bool): Whether to pad the image into a square. - Currently only used for YOLOX. Default: False. - pad_val (dict, optional): A dict for padding value, the default - value is `dict(img=0, masks=0, seg=255)`. - """ - - def __init__(self, - size=None, - size_divisor=None, - pad_to_square=False, - pad_val=dict(img=0, masks=0, seg=255)): - self.size = size - self.size_divisor = size_divisor - if isinstance(pad_val, float) or isinstance(pad_val, int): - warnings.warn( - 'pad_val of float type is deprecated now, ' - f'please use pad_val=dict(img={pad_val}, ' - f'masks={pad_val}, seg=255) instead.', DeprecationWarning) - pad_val = dict(img=pad_val, masks=pad_val, seg=255) - assert isinstance(pad_val, dict) - self.pad_val = pad_val - self.pad_to_square = pad_to_square - - if pad_to_square: - assert size is None and size_divisor is None, \ - 'The size and size_divisor must be None ' \ - 'when pad2square is True' - else: - assert size is not None or size_divisor is not None, \ - 'only one of size and size_divisor should be valid' - assert size is None or size_divisor is None - - def _pad_img(self, results): - """Pad images according to ``self.size``.""" - pad_val = self.pad_val.get('img', 0) - for key in results.get('img_fields', ['img']): - if self.pad_to_square: - max_size = max(results[key].shape[:2]) - self.size = (max_size, max_size) - if self.size is not None: - padded_img = mmcv.impad( - results[key], shape=self.size, pad_val=pad_val) - elif self.size_divisor is not None: - padded_img = mmcv.impad_to_multiple( - results[key], self.size_divisor, pad_val=pad_val) - results[key] = padded_img - results['pad_shape'] = padded_img.shape - results['pad_fixed_size'] = self.size - results['pad_size_divisor'] = self.size_divisor - - def _pad_masks(self, results): - """Pad masks according to ``results['pad_shape']``.""" - pad_shape = results['pad_shape'][:2] - pad_val = self.pad_val.get('masks', 0) - for key in results.get('mask_fields', []): - results[key] = results[key].pad(pad_shape, pad_val=pad_val) - - def _pad_seg(self, results): - """Pad semantic segmentation map according to - ``results['pad_shape']``.""" - pad_val = self.pad_val.get('seg', 255) - for key in results.get('seg_fields', []): - results[key] = mmcv.impad( - results[key], shape=results['pad_shape'][:2], pad_val=pad_val) - - def __call__(self, results): - """Call function to pad images, masks, semantic segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Updated result dict. - """ - self._pad_img(results) - self._pad_masks(results) - self._pad_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(size={self.size}, ' - repr_str += f'size_divisor={self.size_divisor}, ' - repr_str += f'pad_to_square={self.pad_to_square}, ' - repr_str += f'pad_val={self.pad_val})' - return repr_str - - -@PIPELINES.register_module() -class Normalize: - """Normalize the image. - - Added key is "img_norm_cfg". - - Args: - mean (sequence): Mean values of 3 channels. - std (sequence): Std values of 3 channels. - to_rgb (bool): Whether to convert the image from BGR to RGB, - default is true. - """ - - def __init__(self, mean, std, to_rgb=True): - self.mean = np.array(mean, dtype=np.float32) - self.std = np.array(std, dtype=np.float32) - self.to_rgb = to_rgb - - def __call__(self, results): - """Call function to normalize images. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Normalized results, 'img_norm_cfg' key is added into - result dict. - """ - for key in results.get('img_fields', ['img']): - results[key] = mmcv.imnormalize(results[key], self.mean, self.std, - self.to_rgb) - results['img_norm_cfg'] = dict( - mean=self.mean, std=self.std, to_rgb=self.to_rgb) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})' - return repr_str - - -@PIPELINES.register_module() -class RandomCrop: - """Random crop the image & bboxes & masks. - - The absolute `crop_size` is sampled based on `crop_type` and `image_size`, - then the cropped results are generated. - - Args: - crop_size (tuple): The relative ratio or absolute pixels of - height and width. - crop_type (str, optional): one of "relative_range", "relative", - "absolute", "absolute_range". "relative" randomly crops - (h * crop_size[0], w * crop_size[1]) part from an input of size - (h, w). "relative_range" uniformly samples relative crop size from - range [crop_size[0], 1] and [crop_size[1], 1] for height and width - respectively. "absolute" crops from an input with absolute size - (crop_size[0], crop_size[1]). "absolute_range" uniformly samples - crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w - in range [crop_size[0], min(w, crop_size[1])]. Default "absolute". - allow_negative_crop (bool, optional): Whether to allow a crop that does - not contain any bbox area. Default False. - recompute_bbox (bool, optional): Whether to re-compute the boxes based - on cropped instance masks. Default False. - bbox_clip_border (bool, optional): Whether clip the objects outside - the border of the image. Defaults to True. - - Note: - - If the image is smaller than the absolute crop size, return the - original image. - - The keys for bboxes, labels and masks must be aligned. That is, - `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and - `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and - `gt_masks_ignore`. - - If the crop does not contain any gt-bbox region and - `allow_negative_crop` is set to False, skip this image. - """ - - def __init__(self, - crop_size, - crop_type='absolute', - allow_negative_crop=False, - recompute_bbox=False, - bbox_clip_border=True): - if crop_type not in [ - 'relative_range', 'relative', 'absolute', 'absolute_range' - ]: - raise ValueError(f'Invalid crop_type {crop_type}.') - if crop_type in ['absolute', 'absolute_range']: - assert crop_size[0] > 0 and crop_size[1] > 0 - assert isinstance(crop_size[0], int) and isinstance( - crop_size[1], int) - else: - assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 - self.crop_size = crop_size - self.crop_type = crop_type - self.allow_negative_crop = allow_negative_crop - self.bbox_clip_border = bbox_clip_border - self.recompute_bbox = recompute_bbox - # The key correspondence from bboxes to labels and masks. - self.bbox2label = { - 'gt_bboxes': 'gt_labels', - 'gt_bboxes_ignore': 'gt_labels_ignore' - } - self.bbox2mask = { - 'gt_bboxes': 'gt_masks', - 'gt_bboxes_ignore': 'gt_masks_ignore' - } - - def _crop_data(self, results, crop_size, allow_negative_crop): - """Function to randomly crop images, bounding boxes, masks, semantic - segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - crop_size (tuple): Expected absolute size after cropping, (h, w). - allow_negative_crop (bool): Whether to allow a crop that does not - contain any bbox area. Default to False. - - Returns: - dict: Randomly cropped results, 'img_shape' key in result dict is - updated according to crop size. - """ - assert crop_size[0] > 0 and crop_size[1] > 0 - for key in results.get('img_fields', ['img']): - img = results[key] - margin_h = max(img.shape[0] - crop_size[0], 0) - margin_w = max(img.shape[1] - crop_size[1], 0) - offset_h = np.random.randint(0, margin_h + 1) - offset_w = np.random.randint(0, margin_w + 1) - crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] - crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] - - # crop the image - img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] - img_shape = img.shape - results[key] = img - results['img_shape'] = img_shape - - # crop bboxes accordingly and clip to the image boundary - for key in results.get('bbox_fields', []): - # e.g. gt_bboxes and gt_bboxes_ignore - bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h], - dtype=np.float32) - bboxes = results[key] - bbox_offset - if self.bbox_clip_border: - bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) - bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) - valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & ( - bboxes[:, 3] > bboxes[:, 1]) - # If the crop does not contain any gt-bbox area and - # allow_negative_crop is False, skip this image. - if (key == 'gt_bboxes' and not valid_inds.any() - and not allow_negative_crop): - return None - results[key] = bboxes[valid_inds, :] - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = self.bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - - # mask fields, e.g. gt_masks and gt_masks_ignore - mask_key = self.bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][ - valid_inds.nonzero()[0]].crop( - np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) - if self.recompute_bbox: - results[key] = results[mask_key].get_bboxes() - - # crop semantic seg - for key in results.get('seg_fields', []): - results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2] - - return results - - def _get_crop_size(self, image_size): - """Randomly generates the absolute crop size based on `crop_type` and - `image_size`. - - Args: - image_size (tuple): (h, w). - - Returns: - crop_size (tuple): (crop_h, crop_w) in absolute pixels. - """ - h, w = image_size - if self.crop_type == 'absolute': - return (min(self.crop_size[0], h), min(self.crop_size[1], w)) - elif self.crop_type == 'absolute_range': - assert self.crop_size[0] <= self.crop_size[1] - crop_h = np.random.randint( - min(h, self.crop_size[0]), - min(h, self.crop_size[1]) + 1) - crop_w = np.random.randint( - min(w, self.crop_size[0]), - min(w, self.crop_size[1]) + 1) - return crop_h, crop_w - elif self.crop_type == 'relative': - crop_h, crop_w = self.crop_size - return int(h * crop_h + 0.5), int(w * crop_w + 0.5) - elif self.crop_type == 'relative_range': - crop_size = np.asarray(self.crop_size, dtype=np.float32) - crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) - return int(h * crop_h + 0.5), int(w * crop_w + 0.5) - - def __call__(self, results): - """Call function to randomly crop images, bounding boxes, masks, - semantic segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Randomly cropped results, 'img_shape' key in result dict is - updated according to crop size. - """ - image_size = results['img'].shape[:2] - crop_size = self._get_crop_size(image_size) - results = self._crop_data(results, crop_size, self.allow_negative_crop) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(crop_size={self.crop_size}, ' - repr_str += f'crop_type={self.crop_type}, ' - repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' - repr_str += f'bbox_clip_border={self.bbox_clip_border})' - return repr_str - - -@PIPELINES.register_module() -class SegRescale: - """Rescale semantic segmentation maps. - - Args: - scale_factor (float): The scale factor of the final output. - backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. - These two backends generates slightly different results. Defaults - to 'cv2'. - """ - - def __init__(self, scale_factor=1, backend='cv2'): - self.scale_factor = scale_factor - self.backend = backend - - def __call__(self, results): - """Call function to scale the semantic segmentation map. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with semantic segmentation map scaled. - """ - - for key in results.get('seg_fields', []): - if self.scale_factor != 1: - results[key] = mmcv.imrescale( - results[key], - self.scale_factor, - interpolation='nearest', - backend=self.backend) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' - - -@PIPELINES.register_module() -class PhotoMetricDistortion: - """Apply photometric distortion to image sequentially, every transformation - is applied with a probability of 0.5. The position of random contrast is in - second or second to last. - - 1. random brightness - 2. random contrast (mode 0) - 3. convert color from BGR to HSV - 4. random saturation - 5. random hue - 6. convert color from HSV to BGR - 7. random contrast (mode 1) - 8. randomly swap channels - - Args: - brightness_delta (int): delta of brightness. - contrast_range (tuple): range of contrast. - saturation_range (tuple): range of saturation. - hue_delta (int): delta of hue. - """ - - def __init__(self, - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18): - self.brightness_delta = brightness_delta - self.contrast_lower, self.contrast_upper = contrast_range - self.saturation_lower, self.saturation_upper = saturation_range - self.hue_delta = hue_delta - - def __call__(self, results): - """Call function to perform photometric distortion on images. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images distorted. - """ - - if 'img_fields' in results: - assert results['img_fields'] == ['img'], \ - 'Only single img_fields is allowed' - img = results['img'] - img = img.astype(np.float32) - # random brightness - if random.randint(2): - delta = random.uniform(-self.brightness_delta, - self.brightness_delta) - img += delta - - # mode == 0 --> do random contrast first - # mode == 1 --> do random contrast last - mode = random.randint(2) - if mode == 1: - if random.randint(2): - alpha = random.uniform(self.contrast_lower, - self.contrast_upper) - img *= alpha - - # convert color from BGR to HSV - img = mmcv.bgr2hsv(img) - - # random saturation - if random.randint(2): - img[..., 1] *= random.uniform(self.saturation_lower, - self.saturation_upper) - - # random hue - if random.randint(2): - img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) - img[..., 0][img[..., 0] > 360] -= 360 - img[..., 0][img[..., 0] < 0] += 360 - - # convert color from HSV to BGR - img = mmcv.hsv2bgr(img) - - # random contrast - if mode == 0: - if random.randint(2): - alpha = random.uniform(self.contrast_lower, - self.contrast_upper) - img *= alpha - - # randomly swap channels - if random.randint(2): - img = img[..., random.permutation(3)] - - results['img'] = img - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(\nbrightness_delta={self.brightness_delta},\n' - repr_str += 'contrast_range=' - repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n' - repr_str += 'saturation_range=' - repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n' - repr_str += f'hue_delta={self.hue_delta})' - return repr_str - - -@PIPELINES.register_module() -class Expand: - """Random expand the image & bboxes. - - Randomly place the original image on a canvas of 'ratio' x original image - size filled with mean values. The ratio is in the range of ratio_range. - - Args: - mean (tuple): mean value of dataset. - to_rgb (bool): if need to convert the order of mean to align with RGB. - ratio_range (tuple): range of expand ratio. - prob (float): probability of applying this transformation - """ - - def __init__(self, - mean=(0, 0, 0), - to_rgb=True, - ratio_range=(1, 4), - seg_ignore_label=None, - prob=0.5): - self.to_rgb = to_rgb - self.ratio_range = ratio_range - if to_rgb: - self.mean = mean[::-1] - else: - self.mean = mean - self.min_ratio, self.max_ratio = ratio_range - self.seg_ignore_label = seg_ignore_label - self.prob = prob - - def __call__(self, results): - """Call function to expand images, bounding boxes. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images, bounding boxes expanded - """ - - if random.uniform(0, 1) > self.prob: - return results - - if 'img_fields' in results: - assert results['img_fields'] == ['img'], \ - 'Only single img_fields is allowed' - img = results['img'] - - h, w, c = img.shape - ratio = random.uniform(self.min_ratio, self.max_ratio) - # speedup expand when meets large image - if np.all(self.mean == self.mean[0]): - expand_img = np.empty((int(h * ratio), int(w * ratio), c), - img.dtype) - expand_img.fill(self.mean[0]) - else: - expand_img = np.full((int(h * ratio), int(w * ratio), c), - self.mean, - dtype=img.dtype) - left = int(random.uniform(0, w * ratio - w)) - top = int(random.uniform(0, h * ratio - h)) - expand_img[top:top + h, left:left + w] = img - - results['img'] = expand_img - # expand bboxes - for key in results.get('bbox_fields', []): - results[key] = results[key] + np.tile( - (left, top), 2).astype(results[key].dtype) - - # expand masks - for key in results.get('mask_fields', []): - results[key] = results[key].expand( - int(h * ratio), int(w * ratio), top, left) - - # expand segs - for key in results.get('seg_fields', []): - gt_seg = results[key] - expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), - self.seg_ignore_label, - dtype=gt_seg.dtype) - expand_gt_seg[top:top + h, left:left + w] = gt_seg - results[key] = expand_gt_seg - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' - repr_str += f'ratio_range={self.ratio_range}, ' - repr_str += f'seg_ignore_label={self.seg_ignore_label})' - return repr_str - - -@PIPELINES.register_module() -class MinIoURandomCrop: - """Random crop the image & bboxes, the cropped patches have minimum IoU - requirement with original image & bboxes, the IoU threshold is randomly - selected from min_ious. - - Args: - min_ious (tuple): minimum IoU threshold for all intersections with - bounding boxes - min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, - where a >= min_crop_size). - bbox_clip_border (bool, optional): Whether clip the objects outside - the border of the image. Defaults to True. - - Note: - The keys for bboxes, labels and masks should be paired. That is, \ - `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \ - `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`. - """ - - def __init__(self, - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3, - bbox_clip_border=True): - # 1: return ori img - self.min_ious = min_ious - self.sample_mode = (1, *min_ious, 0) - self.min_crop_size = min_crop_size - self.bbox_clip_border = bbox_clip_border - self.bbox2label = { - 'gt_bboxes': 'gt_labels', - 'gt_bboxes_ignore': 'gt_labels_ignore' - } - self.bbox2mask = { - 'gt_bboxes': 'gt_masks', - 'gt_bboxes_ignore': 'gt_masks_ignore' - } - - def __call__(self, results): - """Call function to crop images and bounding boxes with minimum IoU - constraint. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images and bounding boxes cropped, \ - 'img_shape' key is updated. - """ - - if 'img_fields' in results: - assert results['img_fields'] == ['img'], \ - 'Only single img_fields is allowed' - img = results['img'] - assert 'bbox_fields' in results - boxes = [results[key] for key in results['bbox_fields']] - boxes = np.concatenate(boxes, 0) - h, w, c = img.shape - while True: - mode = random.choice(self.sample_mode) - self.mode = mode - if mode == 1: - return results - - min_iou = mode - for i in range(50): - new_w = random.uniform(self.min_crop_size * w, w) - new_h = random.uniform(self.min_crop_size * h, h) - - # h / w in [0.5, 2] - if new_h / new_w < 0.5 or new_h / new_w > 2: - continue - - left = random.uniform(w - new_w) - top = random.uniform(h - new_h) - - patch = np.array( - (int(left), int(top), int(left + new_w), int(top + new_h))) - # Line or point crop is not allowed - if patch[2] == patch[0] or patch[3] == patch[1]: - continue - overlaps = bbox_overlaps( - patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) - if len(overlaps) > 0 and overlaps.min() < min_iou: - continue - - # center of boxes should inside the crop img - # only adjust boxes and instance masks when the gt is not empty - if len(overlaps) > 0: - # adjust boxes - def is_center_of_bboxes_in_patch(boxes, patch): - center = (boxes[:, :2] + boxes[:, 2:]) / 2 - mask = ((center[:, 0] > patch[0]) * - (center[:, 1] > patch[1]) * - (center[:, 0] < patch[2]) * - (center[:, 1] < patch[3])) - return mask - - mask = is_center_of_bboxes_in_patch(boxes, patch) - if not mask.any(): - continue - for key in results.get('bbox_fields', []): - boxes = results[key].copy() - mask = is_center_of_bboxes_in_patch(boxes, patch) - boxes = boxes[mask] - if self.bbox_clip_border: - boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) - boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) - boxes -= np.tile(patch[:2], 2) - - results[key] = boxes - # labels - label_key = self.bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][mask] - - # mask fields - mask_key = self.bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][ - mask.nonzero()[0]].crop(patch) - # adjust the img no matter whether the gt is empty before crop - img = img[patch[1]:patch[3], patch[0]:patch[2]] - results['img'] = img - results['img_shape'] = img.shape - - # seg fields - for key in results.get('seg_fields', []): - results[key] = results[key][patch[1]:patch[3], - patch[0]:patch[2]] - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(min_ious={self.min_ious}, ' - repr_str += f'min_crop_size={self.min_crop_size}, ' - repr_str += f'bbox_clip_border={self.bbox_clip_border})' - return repr_str - - -@PIPELINES.register_module() -class Corrupt: - """Corruption augmentation. - - Corruption transforms implemented based on - `imagecorruptions `_. - - Args: - corruption (str): Corruption name. - severity (int, optional): The severity of corruption. Default: 1. - """ - - def __init__(self, corruption, severity=1): - self.corruption = corruption - self.severity = severity - - def __call__(self, results): - """Call function to corrupt image. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images corrupted. - """ - - if corrupt is None: - raise RuntimeError('imagecorruptions is not installed') - if 'img_fields' in results: - assert results['img_fields'] == ['img'], \ - 'Only single img_fields is allowed' - results['img'] = corrupt( - results['img'].astype(np.uint8), - corruption_name=self.corruption, - severity=self.severity) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(corruption={self.corruption}, ' - repr_str += f'severity={self.severity})' - return repr_str - - -@PIPELINES.register_module() -class Albu: - """Albumentation augmentation. - - Adds custom transformations from Albumentations library. - Please, visit `https://albumentations.readthedocs.io` - to get more information. - - An example of ``transforms`` is as followed: - - .. code-block:: - - [ - dict( - type='ShiftScaleRotate', - shift_limit=0.0625, - scale_limit=0.0, - rotate_limit=0, - interpolation=1, - p=0.5), - dict( - type='RandomBrightnessContrast', - brightness_limit=[0.1, 0.3], - contrast_limit=[0.1, 0.3], - p=0.2), - dict(type='ChannelShuffle', p=0.1), - dict( - type='OneOf', - transforms=[ - dict(type='Blur', blur_limit=3, p=1.0), - dict(type='MedianBlur', blur_limit=3, p=1.0) - ], - p=0.1), - ] - - Args: - transforms (list[dict]): A list of albu transformations - bbox_params (dict): Bbox_params for albumentation `Compose` - keymap (dict): Contains {'input key':'albumentation-style key'} - skip_img_without_anno (bool): Whether to skip the image if no ann left - after aug - """ - - def __init__(self, - transforms, - bbox_params=None, - keymap=None, - update_pad_shape=False, - skip_img_without_anno=False): - if Compose is None: - raise RuntimeError('albumentations is not installed') - - # Args will be modified later, copying it will be safer - transforms = copy.deepcopy(transforms) - if bbox_params is not None: - bbox_params = copy.deepcopy(bbox_params) - if keymap is not None: - keymap = copy.deepcopy(keymap) - self.transforms = transforms - self.filter_lost_elements = False - self.update_pad_shape = update_pad_shape - self.skip_img_without_anno = skip_img_without_anno - - # A simple workaround to remove masks without boxes - if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params - and 'filter_lost_elements' in bbox_params): - self.filter_lost_elements = True - self.origin_label_fields = bbox_params['label_fields'] - bbox_params['label_fields'] = ['idx_mapper'] - del bbox_params['filter_lost_elements'] - - self.bbox_params = ( - self.albu_builder(bbox_params) if bbox_params else None) - self.aug = Compose([self.albu_builder(t) for t in self.transforms], - bbox_params=self.bbox_params) - - if not keymap: - self.keymap_to_albu = { - 'img': 'image', - 'gt_masks': 'masks', - 'gt_bboxes': 'bboxes' - } - else: - self.keymap_to_albu = keymap - self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} - - def albu_builder(self, cfg): - """Import a module from albumentations. - - It inherits some of :func:`build_from_cfg` logic. - - Args: - cfg (dict): Config dict. It should at least contain the key "type". - - Returns: - obj: The constructed object. - """ - - assert isinstance(cfg, dict) and 'type' in cfg - args = cfg.copy() - - obj_type = args.pop('type') - if mmcv.is_str(obj_type): - if albumentations is None: - raise RuntimeError('albumentations is not installed') - obj_cls = getattr(albumentations, obj_type) - elif inspect.isclass(obj_type): - obj_cls = obj_type - else: - raise TypeError( - f'type must be a str or valid type, but got {type(obj_type)}') - - if 'transforms' in args: - args['transforms'] = [ - self.albu_builder(transform) - for transform in args['transforms'] - ] - - return obj_cls(**args) - - @staticmethod - def mapper(d, keymap): - """Dictionary mapper. Renames keys according to keymap provided. - - Args: - d (dict): old dict - keymap (dict): {'old_key':'new_key'} - Returns: - dict: new dict. - """ - - updated_dict = {} - for k, v in zip(d.keys(), d.values()): - new_k = keymap.get(k, k) - updated_dict[new_k] = d[k] - return updated_dict - - def __call__(self, results): - # dict to albumentations format - results = self.mapper(results, self.keymap_to_albu) - # TODO: add bbox_fields - if 'bboxes' in results: - # to list of boxes - if isinstance(results['bboxes'], np.ndarray): - results['bboxes'] = [x for x in results['bboxes']] - # add pseudo-field for filtration - if self.filter_lost_elements: - results['idx_mapper'] = np.arange(len(results['bboxes'])) - - # TODO: Support mask structure in albu - if 'masks' in results: - if isinstance(results['masks'], PolygonMasks): - raise NotImplementedError( - 'Albu only supports BitMap masks now') - ori_masks = results['masks'] - if albumentations.__version__ < '0.5': - results['masks'] = results['masks'].masks - else: - results['masks'] = [mask for mask in results['masks'].masks] - - results = self.aug(**results) - - if 'bboxes' in results: - if isinstance(results['bboxes'], list): - results['bboxes'] = np.array( - results['bboxes'], dtype=np.float32) - results['bboxes'] = results['bboxes'].reshape(-1, 4) - - # filter label_fields - if self.filter_lost_elements: - - for label in self.origin_label_fields: - results[label] = np.array( - [results[label][i] for i in results['idx_mapper']]) - if 'masks' in results: - results['masks'] = np.array( - [results['masks'][i] for i in results['idx_mapper']]) - results['masks'] = ori_masks.__class__( - results['masks'], results['image'].shape[0], - results['image'].shape[1]) - - if (not len(results['idx_mapper']) - and self.skip_img_without_anno): - return None - - if 'gt_labels' in results: - if isinstance(results['gt_labels'], list): - results['gt_labels'] = np.array(results['gt_labels']) - results['gt_labels'] = results['gt_labels'].astype(np.int64) - - # back to the original format - results = self.mapper(results, self.keymap_back) - - # update final shape - if self.update_pad_shape: - results['pad_shape'] = results['img'].shape - - return results - - def __repr__(self): - repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' - return repr_str - - -@PIPELINES.register_module() -class RandomCenterCropPad: - """Random center crop and random around padding for CornerNet. - - This operation generates randomly cropped image from the original image and - pads it simultaneously. Different from :class:`RandomCrop`, the output - shape may not equal to ``crop_size`` strictly. We choose a random value - from ``ratios`` and the output shape could be larger or smaller than - ``crop_size``. The padding operation is also different from :class:`Pad`, - here we use around padding instead of right-bottom padding. - - The relation between output image (padding image) and original image: - - .. code:: text - - output image - - +----------------------------+ - | padded area | - +------|----------------------------|----------+ - | | cropped area | | - | | +---------------+ | | - | | | . center | | | original image - | | | range | | | - | | +---------------+ | | - +------|----------------------------|----------+ - | padded area | - +----------------------------+ - - There are 5 main areas in the figure: - - - output image: output image of this operation, also called padding - image in following instruction. - - original image: input image of this operation. - - padded area: non-intersect area of output image and original image. - - cropped area: the overlap of output image and original image. - - center range: a smaller area where random center chosen from. - center range is computed by ``border`` and original image's shape - to avoid our random center is too close to original image's border. - - Also this operation act differently in train and test mode, the summary - pipeline is listed below. - - Train pipeline: - - 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image - will be ``random_ratio * crop_size``. - 2. Choose a ``random_center`` in center range. - 3. Generate padding image with center matches the ``random_center``. - 4. Initialize the padding image with pixel value equals to ``mean``. - 5. Copy the cropped area to padding image. - 6. Refine annotations. - - Test pipeline: - - 1. Compute output shape according to ``test_pad_mode``. - 2. Generate padding image with center matches the original image - center. - 3. Initialize the padding image with pixel value equals to ``mean``. - 4. Copy the ``cropped area`` to padding image. - - Args: - crop_size (tuple | None): expected size after crop, final size will - computed according to ratio. Requires (h, w) in train mode, and - None in test mode. - ratios (tuple): random select a ratio from tuple and crop image to - (crop_size[0] * ratio) * (crop_size[1] * ratio). - Only available in train mode. - border (int): max distance from center select area to image border. - Only available in train mode. - mean (sequence): Mean values of 3 channels. - std (sequence): Std values of 3 channels. - to_rgb (bool): Whether to convert the image from BGR to RGB. - test_mode (bool): whether involve random variables in transform. - In train mode, crop_size is fixed, center coords and ratio is - random selected from predefined lists. In test mode, crop_size - is image's original shape, center coords and ratio is fixed. - test_pad_mode (tuple): padding method and padding shape value, only - available in test mode. Default is using 'logical_or' with - 127 as padding shape value. - - - 'logical_or': final_shape = input_shape | padding_shape_value - - 'size_divisor': final_shape = int( - ceil(input_shape / padding_shape_value) * padding_shape_value) - test_pad_add_pix (int): Extra padding pixel in test mode. Default 0. - bbox_clip_border (bool, optional): Whether clip the objects outside - the border of the image. Defaults to True. - """ - - def __init__(self, - crop_size=None, - ratios=(0.9, 1.0, 1.1), - border=128, - mean=None, - std=None, - to_rgb=None, - test_mode=False, - test_pad_mode=('logical_or', 127), - test_pad_add_pix=0, - bbox_clip_border=True): - if test_mode: - assert crop_size is None, 'crop_size must be None in test mode' - assert ratios is None, 'ratios must be None in test mode' - assert border is None, 'border must be None in test mode' - assert isinstance(test_pad_mode, (list, tuple)) - assert test_pad_mode[0] in ['logical_or', 'size_divisor'] - else: - assert isinstance(crop_size, (list, tuple)) - assert crop_size[0] > 0 and crop_size[1] > 0, ( - 'crop_size must > 0 in train mode') - assert isinstance(ratios, (list, tuple)) - assert test_pad_mode is None, ( - 'test_pad_mode must be None in train mode') - - self.crop_size = crop_size - self.ratios = ratios - self.border = border - # We do not set default value to mean, std and to_rgb because these - # hyper-parameters are easy to forget but could affect the performance. - # Please use the same setting as Normalize for performance assurance. - assert mean is not None and std is not None and to_rgb is not None - self.to_rgb = to_rgb - self.input_mean = mean - self.input_std = std - if to_rgb: - self.mean = mean[::-1] - self.std = std[::-1] - else: - self.mean = mean - self.std = std - self.test_mode = test_mode - self.test_pad_mode = test_pad_mode - self.test_pad_add_pix = test_pad_add_pix - self.bbox_clip_border = bbox_clip_border - - def _get_border(self, border, size): - """Get final border for the target size. - - This function generates a ``final_border`` according to image's shape. - The area between ``final_border`` and ``size - final_border`` is the - ``center range``. We randomly choose center from the ``center range`` - to avoid our random center is too close to original image's border. - Also ``center range`` should be larger than 0. - - Args: - border (int): The initial border, default is 128. - size (int): The width or height of original image. - Returns: - int: The final border. - """ - k = 2 * border / size - i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k))) - return border // i - - def _filter_boxes(self, patch, boxes): - """Check whether the center of each box is in the patch. - - Args: - patch (list[int]): The cropped area, [left, top, right, bottom]. - boxes (numpy array, (N x 4)): Ground truth boxes. - - Returns: - mask (numpy array, (N,)): Each box is inside or outside the patch. - """ - center = (boxes[:, :2] + boxes[:, 2:]) / 2 - mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( - center[:, 0] < patch[2]) * ( - center[:, 1] < patch[3]) - return mask - - def _crop_image_and_paste(self, image, center, size): - """Crop image with a given center and size, then paste the cropped - image to a blank image with two centers align. - - This function is equivalent to generating a blank image with ``size`` - as its shape. Then cover it on the original image with two centers ( - the center of blank image and the random center of original image) - aligned. The overlap area is paste from the original image and the - outside area is filled with ``mean pixel``. - - Args: - image (np array, H x W x C): Original image. - center (list[int]): Target crop center coord. - size (list[int]): Target crop size. [target_h, target_w] - - Returns: - cropped_img (np array, target_h x target_w x C): Cropped image. - border (np array, 4): The distance of four border of - ``cropped_img`` to the original image area, [top, bottom, - left, right] - patch (list[int]): The cropped area, [left, top, right, bottom]. - """ - center_y, center_x = center - target_h, target_w = size - img_h, img_w, img_c = image.shape - - x0 = max(0, center_x - target_w // 2) - x1 = min(center_x + target_w // 2, img_w) - y0 = max(0, center_y - target_h // 2) - y1 = min(center_y + target_h // 2, img_h) - patch = np.array((int(x0), int(y0), int(x1), int(y1))) - - left, right = center_x - x0, x1 - center_x - top, bottom = center_y - y0, y1 - center_y - - cropped_center_y, cropped_center_x = target_h // 2, target_w // 2 - cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype) - for i in range(img_c): - cropped_img[:, :, i] += self.mean[i] - y_slice = slice(cropped_center_y - top, cropped_center_y + bottom) - x_slice = slice(cropped_center_x - left, cropped_center_x + right) - cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :] - - border = np.array([ - cropped_center_y - top, cropped_center_y + bottom, - cropped_center_x - left, cropped_center_x + right - ], - dtype=np.float32) - - return cropped_img, border, patch - - def _train_aug(self, results): - """Random crop and around padding the original image. - - Args: - results (dict): Image infomations in the augment pipeline. - - Returns: - results (dict): The updated dict. - """ - img = results['img'] - h, w, c = img.shape - boxes = results['gt_bboxes'] - while True: - scale = random.choice(self.ratios) - new_h = int(self.crop_size[0] * scale) - new_w = int(self.crop_size[1] * scale) - h_border = self._get_border(self.border, h) - w_border = self._get_border(self.border, w) - - for i in range(50): - center_x = random.randint(low=w_border, high=w - w_border) - center_y = random.randint(low=h_border, high=h - h_border) - - cropped_img, border, patch = self._crop_image_and_paste( - img, [center_y, center_x], [new_h, new_w]) - - mask = self._filter_boxes(patch, boxes) - # if image do not have valid bbox, any crop patch is valid. - if not mask.any() and len(boxes) > 0: - continue - - results['img'] = cropped_img - results['img_shape'] = cropped_img.shape - results['pad_shape'] = cropped_img.shape - - x0, y0, x1, y1 = patch - - left_w, top_h = center_x - x0, center_y - y0 - cropped_center_x, cropped_center_y = new_w // 2, new_h // 2 - - # crop bboxes accordingly and clip to the image boundary - for key in results.get('bbox_fields', []): - mask = self._filter_boxes(patch, results[key]) - bboxes = results[key][mask] - bboxes[:, 0:4:2] += cropped_center_x - left_w - x0 - bboxes[:, 1:4:2] += cropped_center_y - top_h - y0 - if self.bbox_clip_border: - bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w) - bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h) - keep = (bboxes[:, 2] > bboxes[:, 0]) & ( - bboxes[:, 3] > bboxes[:, 1]) - bboxes = bboxes[keep] - results[key] = bboxes - if key in ['gt_bboxes']: - if 'gt_labels' in results: - labels = results['gt_labels'][mask] - labels = labels[keep] - results['gt_labels'] = labels - if 'gt_masks' in results: - raise NotImplementedError( - 'RandomCenterCropPad only supports bbox.') - - # crop semantic seg - for key in results.get('seg_fields', []): - raise NotImplementedError( - 'RandomCenterCropPad only supports bbox.') - return results - - def _test_aug(self, results): - """Around padding the original image without cropping. - - The padding mode and value are from ``test_pad_mode``. - - Args: - results (dict): Image infomations in the augment pipeline. - - Returns: - results (dict): The updated dict. - """ - img = results['img'] - h, w, c = img.shape - results['img_shape'] = img.shape - if self.test_pad_mode[0] in ['logical_or']: - # self.test_pad_add_pix is only used for centernet - target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix - target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix - elif self.test_pad_mode[0] in ['size_divisor']: - divisor = self.test_pad_mode[1] - target_h = int(np.ceil(h / divisor)) * divisor - target_w = int(np.ceil(w / divisor)) * divisor - else: - raise NotImplementedError( - 'RandomCenterCropPad only support two testing pad mode:' - 'logical-or and size_divisor.') - - cropped_img, border, _ = self._crop_image_and_paste( - img, [h // 2, w // 2], [target_h, target_w]) - results['img'] = cropped_img - results['pad_shape'] = cropped_img.shape - results['border'] = border - return results - - def __call__(self, results): - img = results['img'] - assert img.dtype == np.float32, ( - 'RandomCenterCropPad needs the input image of dtype np.float32,' - ' please set "to_float32=True" in "LoadImageFromFile" pipeline') - h, w, c = img.shape - assert c == len(self.mean) - if self.test_mode: - return self._test_aug(results) - else: - return self._train_aug(results) - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(crop_size={self.crop_size}, ' - repr_str += f'ratios={self.ratios}, ' - repr_str += f'border={self.border}, ' - repr_str += f'mean={self.input_mean}, ' - repr_str += f'std={self.input_std}, ' - repr_str += f'to_rgb={self.to_rgb}, ' - repr_str += f'test_mode={self.test_mode}, ' - repr_str += f'test_pad_mode={self.test_pad_mode}, ' - repr_str += f'bbox_clip_border={self.bbox_clip_border})' - return repr_str - - -@PIPELINES.register_module() -class CutOut: - """CutOut operation. - - Randomly drop some regions of image used in - `Cutout `_. - - Args: - n_holes (int | tuple[int, int]): Number of regions to be dropped. - If it is given as a list, number of holes will be randomly - selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. - cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate - shape of dropped regions. It can be `tuple[int, int]` to use a - fixed cutout shape, or `list[tuple[int, int]]` to randomly choose - shape from the list. - cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The - candidate ratio of dropped regions. It can be `tuple[float, float]` - to use a fixed ratio or `list[tuple[float, float]]` to randomly - choose ratio from the list. Please note that `cutout_shape` - and `cutout_ratio` cannot be both given at the same time. - fill_in (tuple[float, float, float] | tuple[int, int, int]): The value - of pixel to fill in the dropped regions. Default: (0, 0, 0). - """ - - def __init__(self, - n_holes, - cutout_shape=None, - cutout_ratio=None, - fill_in=(0, 0, 0)): - - assert (cutout_shape is None) ^ (cutout_ratio is None), \ - 'Either cutout_shape or cutout_ratio should be specified.' - assert (isinstance(cutout_shape, (list, tuple)) - or isinstance(cutout_ratio, (list, tuple))) - if isinstance(n_holes, tuple): - assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] - else: - n_holes = (n_holes, n_holes) - self.n_holes = n_holes - self.fill_in = fill_in - self.with_ratio = cutout_ratio is not None - self.candidates = cutout_ratio if self.with_ratio else cutout_shape - if not isinstance(self.candidates, list): - self.candidates = [self.candidates] - - def __call__(self, results): - """Call function to drop some regions of image.""" - h, w, c = results['img'].shape - n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) - for _ in range(n_holes): - x1 = np.random.randint(0, w) - y1 = np.random.randint(0, h) - index = np.random.randint(0, len(self.candidates)) - if not self.with_ratio: - cutout_w, cutout_h = self.candidates[index] - else: - cutout_w = int(self.candidates[index][0] * w) - cutout_h = int(self.candidates[index][1] * h) - - x2 = np.clip(x1 + cutout_w, 0, w) - y2 = np.clip(y1 + cutout_h, 0, h) - results['img'][y1:y2, x1:x2, :] = self.fill_in - - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(n_holes={self.n_holes}, ' - repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio - else f'cutout_shape={self.candidates}, ') - repr_str += f'fill_in={self.fill_in})' - return repr_str - - -@PIPELINES.register_module() -class Mosaic: - """Mosaic augmentation. - - Given 4 images, mosaic transform combines them into - one output image. The output image is composed of the parts from each sub- - image. - - .. code:: text - - mosaic transform - center_x - +------------------------------+ - | pad | pad | - | +-----------+ | - | | | | - | | image1 |--------+ | - | | | | | - | | | image2 | | - center_y |----+-------------+-----------| - | | cropped | | - |pad | image3 | image4 | - | | | | - +----|-------------+-----------+ - | | - +-------------+ - - The mosaic transform steps are as follows: - - 1. Choose the mosaic center as the intersections of 4 images - 2. Get the left top image according to the index, and randomly - sample another 3 images from the custom dataset. - 3. Sub image will be cropped if image is larger than mosaic patch - - Args: - img_scale (Sequence[int]): Image size after mosaic pipeline of single - image. The shape order should be (height, width). - Default to (640, 640). - center_ratio_range (Sequence[float]): Center ratio range of mosaic - output. Default to (0.5, 1.5). - min_bbox_size (int | float): The minimum pixel for filtering - invalid bboxes after the mosaic pipeline. Default to 0. - bbox_clip_border (bool, optional): Whether to clip the objects outside - the border of the image. In some dataset like MOT17, the gt bboxes - are allowed to cross the border of images. Therefore, we don't - need to clip the gt bboxes in these cases. Defaults to True. - skip_filter (bool): Whether to skip filtering rules. If it - is True, the filter rule will not be applied, and the - `min_bbox_size` is invalid. Default to True. - pad_val (int): Pad value. Default to 114. - prob (float): Probability of applying this transformation. - Default to 1.0. - """ - - def __init__(self, - img_scale=(640, 640), - center_ratio_range=(0.5, 1.5), - min_bbox_size=0, - bbox_clip_border=True, - skip_filter=True, - pad_val=114, - prob=1.0): - assert isinstance(img_scale, tuple) - assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\ - f'got {prob}.' - - log_img_scale(img_scale, skip_square=True) - self.img_scale = img_scale - self.center_ratio_range = center_ratio_range - self.min_bbox_size = min_bbox_size - self.bbox_clip_border = bbox_clip_border - self.skip_filter = skip_filter - self.pad_val = pad_val - self.prob = prob - - def __call__(self, results): - """Call function to make a mosaic of image. - - Args: - results (dict): Result dict. - - Returns: - dict: Result dict with mosaic transformed. - """ - - if random.uniform(0, 1) > self.prob: - return results - - results = self._mosaic_transform(results) - return results - - def get_indexes(self, dataset): - """Call function to collect indexes. - - Args: - dataset (:obj:`MultiImageMixDataset`): The dataset. - - Returns: - list: indexes. - """ - - indexes = [random.randint(0, len(dataset)) for _ in range(3)] - return indexes - - def _mosaic_transform(self, results): - """Mosaic transform function. - - Args: - results (dict): Result dict. - - Returns: - dict: Updated result dict. - """ - - assert 'mix_results' in results - mosaic_labels = [] - mosaic_bboxes = [] - if len(results['img'].shape) == 3: - mosaic_img = np.full( - (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3), - self.pad_val, - dtype=results['img'].dtype) - else: - mosaic_img = np.full( - (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)), - self.pad_val, - dtype=results['img'].dtype) - - # mosaic center x, y - center_x = int( - random.uniform(*self.center_ratio_range) * self.img_scale[1]) - center_y = int( - random.uniform(*self.center_ratio_range) * self.img_scale[0]) - center_position = (center_x, center_y) - - loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right') - for i, loc in enumerate(loc_strs): - if loc == 'top_left': - results_patch = copy.deepcopy(results) - else: - results_patch = copy.deepcopy(results['mix_results'][i - 1]) - - img_i = results_patch['img'] - h_i, w_i = img_i.shape[:2] - # keep_ratio resize - scale_ratio_i = min(self.img_scale[0] / h_i, - self.img_scale[1] / w_i) - img_i = mmcv.imresize( - img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i))) - - # compute the combine parameters - paste_coord, crop_coord = self._mosaic_combine( - loc, center_position, img_i.shape[:2][::-1]) - x1_p, y1_p, x2_p, y2_p = paste_coord - x1_c, y1_c, x2_c, y2_c = crop_coord - - # crop and paste image - mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c] - - # adjust coordinate - gt_bboxes_i = results_patch['gt_bboxes'] - gt_labels_i = results_patch['gt_labels'] - - if gt_bboxes_i.shape[0] > 0: - padw = x1_p - x1_c - padh = y1_p - y1_c - gt_bboxes_i[:, 0::2] = \ - scale_ratio_i * gt_bboxes_i[:, 0::2] + padw - gt_bboxes_i[:, 1::2] = \ - scale_ratio_i * gt_bboxes_i[:, 1::2] + padh - - mosaic_bboxes.append(gt_bboxes_i) - mosaic_labels.append(gt_labels_i) - - if len(mosaic_labels) > 0: - mosaic_bboxes = np.concatenate(mosaic_bboxes, 0) - mosaic_labels = np.concatenate(mosaic_labels, 0) - - if self.bbox_clip_border: - mosaic_bboxes[:, 0::2] = np.clip(mosaic_bboxes[:, 0::2], 0, - 2 * self.img_scale[1]) - mosaic_bboxes[:, 1::2] = np.clip(mosaic_bboxes[:, 1::2], 0, - 2 * self.img_scale[0]) - - if not self.skip_filter: - mosaic_bboxes, mosaic_labels = \ - self._filter_box_candidates(mosaic_bboxes, mosaic_labels) - - # remove outside bboxes - inside_inds = find_inside_bboxes(mosaic_bboxes, 2 * self.img_scale[0], - 2 * self.img_scale[1]) - mosaic_bboxes = mosaic_bboxes[inside_inds] - mosaic_labels = mosaic_labels[inside_inds] - - results['img'] = mosaic_img - results['img_shape'] = mosaic_img.shape - results['gt_bboxes'] = mosaic_bboxes - results['gt_labels'] = mosaic_labels - - return results - - def _mosaic_combine(self, loc, center_position_xy, img_shape_wh): - """Calculate global coordinate of mosaic image and local coordinate of - cropped sub-image. - - Args: - loc (str): Index for the sub-image, loc in ('top_left', - 'top_right', 'bottom_left', 'bottom_right'). - center_position_xy (Sequence[float]): Mixing center for 4 images, - (x, y). - img_shape_wh (Sequence[int]): Width and height of sub-image - - Returns: - tuple[tuple[float]]: Corresponding coordinate of pasting and - cropping - - paste_coord (tuple): paste corner coordinate in mosaic image. - - crop_coord (tuple): crop corner coordinate in mosaic image. - """ - assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right') - if loc == 'top_left': - # index0 to top left part of image - x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ - max(center_position_xy[1] - img_shape_wh[1], 0), \ - center_position_xy[0], \ - center_position_xy[1] - crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - ( - y2 - y1), img_shape_wh[0], img_shape_wh[1] - - elif loc == 'top_right': - # index1 to top right part of image - x1, y1, x2, y2 = center_position_xy[0], \ - max(center_position_xy[1] - img_shape_wh[1], 0), \ - min(center_position_xy[0] + img_shape_wh[0], - self.img_scale[1] * 2), \ - center_position_xy[1] - crop_coord = 0, img_shape_wh[1] - (y2 - y1), min( - img_shape_wh[0], x2 - x1), img_shape_wh[1] - - elif loc == 'bottom_left': - # index2 to bottom left part of image - x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \ - center_position_xy[1], \ - center_position_xy[0], \ - min(self.img_scale[0] * 2, center_position_xy[1] + - img_shape_wh[1]) - crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min( - y2 - y1, img_shape_wh[1]) - - else: - # index3 to bottom right part of image - x1, y1, x2, y2 = center_position_xy[0], \ - center_position_xy[1], \ - min(center_position_xy[0] + img_shape_wh[0], - self.img_scale[1] * 2), \ - min(self.img_scale[0] * 2, center_position_xy[1] + - img_shape_wh[1]) - crop_coord = 0, 0, min(img_shape_wh[0], - x2 - x1), min(y2 - y1, img_shape_wh[1]) - - paste_coord = x1, y1, x2, y2 - return paste_coord, crop_coord - - def _filter_box_candidates(self, bboxes, labels): - """Filter out bboxes too small after Mosaic.""" - bbox_w = bboxes[:, 2] - bboxes[:, 0] - bbox_h = bboxes[:, 3] - bboxes[:, 1] - valid_inds = (bbox_w > self.min_bbox_size) & \ - (bbox_h > self.min_bbox_size) - valid_inds = np.nonzero(valid_inds)[0] - return bboxes[valid_inds], labels[valid_inds] - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'img_scale={self.img_scale}, ' - repr_str += f'center_ratio_range={self.center_ratio_range}, ' - repr_str += f'pad_val={self.pad_val}, ' - repr_str += f'min_bbox_size={self.min_bbox_size}, ' - repr_str += f'skip_filter={self.skip_filter})' - return repr_str - - -@PIPELINES.register_module() -class MixUp: - """MixUp data augmentation. - - .. code:: text - - mixup transform - +------------------------------+ - | mixup image | | - | +--------|--------+ | - | | | | | - |---------------+ | | - | | | | - | | image | | - | | | | - | | | | - | |-----------------+ | - | pad | - +------------------------------+ - - The mixup transform steps are as follows: - - 1. Another random image is picked by dataset and embedded in - the top left patch(after padding and resizing) - 2. The target of mixup transform is the weighted average of mixup - image and origin image. - - Args: - img_scale (Sequence[int]): Image output size after mixup pipeline. - The shape order should be (height, width). Default: (640, 640). - ratio_range (Sequence[float]): Scale ratio of mixup image. - Default: (0.5, 1.5). - flip_ratio (float): Horizontal flip ratio of mixup image. - Default: 0.5. - pad_val (int): Pad value. Default: 114. - max_iters (int): The maximum number of iterations. If the number of - iterations is greater than `max_iters`, but gt_bbox is still - empty, then the iteration is terminated. Default: 15. - min_bbox_size (float): Width and height threshold to filter bboxes. - If the height or width of a box is smaller than this value, it - will be removed. Default: 5. - min_area_ratio (float): Threshold of area ratio between - original bboxes and wrapped bboxes. If smaller than this value, - the box will be removed. Default: 0.2. - max_aspect_ratio (float): Aspect ratio of width and height - threshold to filter bboxes. If max(h/w, w/h) larger than this - value, the box will be removed. Default: 20. - bbox_clip_border (bool, optional): Whether to clip the objects outside - the border of the image. In some dataset like MOT17, the gt bboxes - are allowed to cross the border of images. Therefore, we don't - need to clip the gt bboxes in these cases. Defaults to True. - skip_filter (bool): Whether to skip filtering rules. If it - is True, the filter rule will not be applied, and the - `min_bbox_size` and `min_area_ratio` and `max_aspect_ratio` - is invalid. Default to True. - """ - - def __init__(self, - img_scale=(640, 640), - ratio_range=(0.5, 1.5), - flip_ratio=0.5, - pad_val=114, - max_iters=15, - min_bbox_size=5, - min_area_ratio=0.2, - max_aspect_ratio=20, - bbox_clip_border=True, - skip_filter=True): - assert isinstance(img_scale, tuple) - log_img_scale(img_scale, skip_square=True) - self.dynamic_scale = img_scale - self.ratio_range = ratio_range - self.flip_ratio = flip_ratio - self.pad_val = pad_val - self.max_iters = max_iters - self.min_bbox_size = min_bbox_size - self.min_area_ratio = min_area_ratio - self.max_aspect_ratio = max_aspect_ratio - self.bbox_clip_border = bbox_clip_border - self.skip_filter = skip_filter - - def __call__(self, results): - """Call function to make a mixup of image. - - Args: - results (dict): Result dict. - - Returns: - dict: Result dict with mixup transformed. - """ - - results = self._mixup_transform(results) - return results - - def get_indexes(self, dataset): - """Call function to collect indexes. - - Args: - dataset (:obj:`MultiImageMixDataset`): The dataset. - - Returns: - list: indexes. - """ - - for i in range(self.max_iters): - index = random.randint(0, len(dataset)) - gt_bboxes_i = dataset.get_ann_info(index)['bboxes'] - if len(gt_bboxes_i) != 0: - break - - return index - - def _mixup_transform(self, results): - """MixUp transform function. - - Args: - results (dict): Result dict. - - Returns: - dict: Updated result dict. - """ - - assert 'mix_results' in results - assert len( - results['mix_results']) == 1, 'MixUp only support 2 images now !' - - if results['mix_results'][0]['gt_bboxes'].shape[0] == 0: - # empty bbox - return results - - retrieve_results = results['mix_results'][0] - retrieve_img = retrieve_results['img'] - - jit_factor = random.uniform(*self.ratio_range) - is_filp = random.uniform(0, 1) > self.flip_ratio - - if len(retrieve_img.shape) == 3: - out_img = np.ones( - (self.dynamic_scale[0], self.dynamic_scale[1], 3), - dtype=retrieve_img.dtype) * self.pad_val - else: - out_img = np.ones( - self.dynamic_scale, dtype=retrieve_img.dtype) * self.pad_val - - # 1. keep_ratio resize - scale_ratio = min(self.dynamic_scale[0] / retrieve_img.shape[0], - self.dynamic_scale[1] / retrieve_img.shape[1]) - retrieve_img = mmcv.imresize( - retrieve_img, (int(retrieve_img.shape[1] * scale_ratio), - int(retrieve_img.shape[0] * scale_ratio))) - - # 2. paste - out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img - - # 3. scale jit - scale_ratio *= jit_factor - out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor), - int(out_img.shape[0] * jit_factor))) - - # 4. flip - if is_filp: - out_img = out_img[:, ::-1, :] - - # 5. random crop - ori_img = results['img'] - origin_h, origin_w = out_img.shape[:2] - target_h, target_w = ori_img.shape[:2] - padded_img = np.zeros( - (max(origin_h, target_h), max(origin_w, - target_w), 3)).astype(np.uint8) - padded_img[:origin_h, :origin_w] = out_img - - x_offset, y_offset = 0, 0 - if padded_img.shape[0] > target_h: - y_offset = random.randint(0, padded_img.shape[0] - target_h) - if padded_img.shape[1] > target_w: - x_offset = random.randint(0, padded_img.shape[1] - target_w) - padded_cropped_img = padded_img[y_offset:y_offset + target_h, - x_offset:x_offset + target_w] - - # 6. adjust bbox - retrieve_gt_bboxes = retrieve_results['gt_bboxes'] - retrieve_gt_bboxes[:, 0::2] = retrieve_gt_bboxes[:, 0::2] * scale_ratio - retrieve_gt_bboxes[:, 1::2] = retrieve_gt_bboxes[:, 1::2] * scale_ratio - if self.bbox_clip_border: - retrieve_gt_bboxes[:, 0::2] = np.clip(retrieve_gt_bboxes[:, 0::2], - 0, origin_w) - retrieve_gt_bboxes[:, 1::2] = np.clip(retrieve_gt_bboxes[:, 1::2], - 0, origin_h) - - if is_filp: - retrieve_gt_bboxes[:, 0::2] = ( - origin_w - retrieve_gt_bboxes[:, 0::2][:, ::-1]) - - # 7. filter - cp_retrieve_gt_bboxes = retrieve_gt_bboxes.copy() - cp_retrieve_gt_bboxes[:, 0::2] = \ - cp_retrieve_gt_bboxes[:, 0::2] - x_offset - cp_retrieve_gt_bboxes[:, 1::2] = \ - cp_retrieve_gt_bboxes[:, 1::2] - y_offset - if self.bbox_clip_border: - cp_retrieve_gt_bboxes[:, 0::2] = np.clip( - cp_retrieve_gt_bboxes[:, 0::2], 0, target_w) - cp_retrieve_gt_bboxes[:, 1::2] = np.clip( - cp_retrieve_gt_bboxes[:, 1::2], 0, target_h) - - # 8. mix up - ori_img = ori_img.astype(np.float32) - mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32) - - retrieve_gt_labels = retrieve_results['gt_labels'] - if not self.skip_filter: - keep_list = self._filter_box_candidates(retrieve_gt_bboxes.T, - cp_retrieve_gt_bboxes.T) - - retrieve_gt_labels = retrieve_gt_labels[keep_list] - cp_retrieve_gt_bboxes = cp_retrieve_gt_bboxes[keep_list] - - mixup_gt_bboxes = np.concatenate( - (results['gt_bboxes'], cp_retrieve_gt_bboxes), axis=0) - mixup_gt_labels = np.concatenate( - (results['gt_labels'], retrieve_gt_labels), axis=0) - - # remove outside bbox - inside_inds = find_inside_bboxes(mixup_gt_bboxes, target_h, target_w) - mixup_gt_bboxes = mixup_gt_bboxes[inside_inds] - mixup_gt_labels = mixup_gt_labels[inside_inds] - - results['img'] = mixup_img.astype(np.uint8) - results['img_shape'] = mixup_img.shape - results['gt_bboxes'] = mixup_gt_bboxes - results['gt_labels'] = mixup_gt_labels - - return results - - def _filter_box_candidates(self, bbox1, bbox2): - """Compute candidate boxes which include following 5 things: - - bbox1 before augment, bbox2 after augment, min_bbox_size (pixels), - min_area_ratio, max_aspect_ratio. - """ - - w1, h1 = bbox1[2] - bbox1[0], bbox1[3] - bbox1[1] - w2, h2 = bbox2[2] - bbox2[0], bbox2[3] - bbox2[1] - ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) - return ((w2 > self.min_bbox_size) - & (h2 > self.min_bbox_size) - & (w2 * h2 / (w1 * h1 + 1e-16) > self.min_area_ratio) - & (ar < self.max_aspect_ratio)) - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'dynamic_scale={self.dynamic_scale}, ' - repr_str += f'ratio_range={self.ratio_range}, ' - repr_str += f'flip_ratio={self.flip_ratio}, ' - repr_str += f'pad_val={self.pad_val}, ' - repr_str += f'max_iters={self.max_iters}, ' - repr_str += f'min_bbox_size={self.min_bbox_size}, ' - repr_str += f'min_area_ratio={self.min_area_ratio}, ' - repr_str += f'max_aspect_ratio={self.max_aspect_ratio}, ' - repr_str += f'skip_filter={self.skip_filter})' - return repr_str - - -@PIPELINES.register_module() -class RandomAffine: - """Random affine transform data augmentation. - - This operation randomly generates affine transform matrix which including - rotation, translation, shear and scaling transforms. - - Args: - max_rotate_degree (float): Maximum degrees of rotation transform. - Default: 10. - max_translate_ratio (float): Maximum ratio of translation. - Default: 0.1. - scaling_ratio_range (tuple[float]): Min and max ratio of - scaling transform. Default: (0.5, 1.5). - max_shear_degree (float): Maximum degrees of shear - transform. Default: 2. - border (tuple[int]): Distance from height and width sides of input - image to adjust output shape. Only used in mosaic dataset. - Default: (0, 0). - border_val (tuple[int]): Border padding values of 3 channels. - Default: (114, 114, 114). - min_bbox_size (float): Width and height threshold to filter bboxes. - If the height or width of a box is smaller than this value, it - will be removed. Default: 2. - min_area_ratio (float): Threshold of area ratio between - original bboxes and wrapped bboxes. If smaller than this value, - the box will be removed. Default: 0.2. - max_aspect_ratio (float): Aspect ratio of width and height - threshold to filter bboxes. If max(h/w, w/h) larger than this - value, the box will be removed. - bbox_clip_border (bool, optional): Whether to clip the objects outside - the border of the image. In some dataset like MOT17, the gt bboxes - are allowed to cross the border of images. Therefore, we don't - need to clip the gt bboxes in these cases. Defaults to True. - skip_filter (bool): Whether to skip filtering rules. If it - is True, the filter rule will not be applied, and the - `min_bbox_size` and `min_area_ratio` and `max_aspect_ratio` - is invalid. Default to True. - """ - - def __init__(self, - max_rotate_degree=10.0, - max_translate_ratio=0.1, - scaling_ratio_range=(0.5, 1.5), - max_shear_degree=2.0, - border=(0, 0), - border_val=(114, 114, 114), - min_bbox_size=2, - min_area_ratio=0.2, - max_aspect_ratio=20, - bbox_clip_border=True, - skip_filter=True): - assert 0 <= max_translate_ratio <= 1 - assert scaling_ratio_range[0] <= scaling_ratio_range[1] - assert scaling_ratio_range[0] > 0 - self.max_rotate_degree = max_rotate_degree - self.max_translate_ratio = max_translate_ratio - self.scaling_ratio_range = scaling_ratio_range - self.max_shear_degree = max_shear_degree - self.border = border - self.border_val = border_val - self.min_bbox_size = min_bbox_size - self.min_area_ratio = min_area_ratio - self.max_aspect_ratio = max_aspect_ratio - self.bbox_clip_border = bbox_clip_border - self.skip_filter = skip_filter - - def __call__(self, results): - img = results['img'] - height = img.shape[0] + self.border[0] * 2 - width = img.shape[1] + self.border[1] * 2 - - # Rotation - rotation_degree = random.uniform(-self.max_rotate_degree, - self.max_rotate_degree) - rotation_matrix = self._get_rotation_matrix(rotation_degree) - - # Scaling - scaling_ratio = random.uniform(self.scaling_ratio_range[0], - self.scaling_ratio_range[1]) - scaling_matrix = self._get_scaling_matrix(scaling_ratio) - - # Shear - x_degree = random.uniform(-self.max_shear_degree, - self.max_shear_degree) - y_degree = random.uniform(-self.max_shear_degree, - self.max_shear_degree) - shear_matrix = self._get_shear_matrix(x_degree, y_degree) - - # Translation - trans_x = random.uniform(-self.max_translate_ratio, - self.max_translate_ratio) * width - trans_y = random.uniform(-self.max_translate_ratio, - self.max_translate_ratio) * height - translate_matrix = self._get_translation_matrix(trans_x, trans_y) - - warp_matrix = ( - translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix) - - img = cv2.warpPerspective( - img, - warp_matrix, - dsize=(width, height), - borderValue=self.border_val) - results['img'] = img - results['img_shape'] = img.shape - - for key in results.get('bbox_fields', []): - bboxes = results[key] - num_bboxes = len(bboxes) - if num_bboxes: - # homogeneous coordinates - xs = bboxes[:, [0, 0, 2, 2]].reshape(num_bboxes * 4) - ys = bboxes[:, [1, 3, 3, 1]].reshape(num_bboxes * 4) - ones = np.ones_like(xs) - points = np.vstack([xs, ys, ones]) - - warp_points = warp_matrix @ points - warp_points = warp_points[:2] / warp_points[2] - xs = warp_points[0].reshape(num_bboxes, 4) - ys = warp_points[1].reshape(num_bboxes, 4) - - warp_bboxes = np.vstack( - (xs.min(1), ys.min(1), xs.max(1), ys.max(1))).T - - if self.bbox_clip_border: - warp_bboxes[:, [0, 2]] = \ - warp_bboxes[:, [0, 2]].clip(0, width) - warp_bboxes[:, [1, 3]] = \ - warp_bboxes[:, [1, 3]].clip(0, height) - - # remove outside bbox - valid_index = find_inside_bboxes(warp_bboxes, height, width) - if not self.skip_filter: - # filter bboxes - filter_index = self.filter_gt_bboxes( - bboxes * scaling_ratio, warp_bboxes) - valid_index = valid_index & filter_index - - results[key] = warp_bboxes[valid_index] - if key in ['gt_bboxes']: - if 'gt_labels' in results: - results['gt_labels'] = results['gt_labels'][ - valid_index] - - if 'gt_masks' in results: - raise NotImplementedError( - 'RandomAffine only supports bbox.') - return results - - def filter_gt_bboxes(self, origin_bboxes, wrapped_bboxes): - origin_w = origin_bboxes[:, 2] - origin_bboxes[:, 0] - origin_h = origin_bboxes[:, 3] - origin_bboxes[:, 1] - wrapped_w = wrapped_bboxes[:, 2] - wrapped_bboxes[:, 0] - wrapped_h = wrapped_bboxes[:, 3] - wrapped_bboxes[:, 1] - aspect_ratio = np.maximum(wrapped_w / (wrapped_h + 1e-16), - wrapped_h / (wrapped_w + 1e-16)) - - wh_valid_idx = (wrapped_w > self.min_bbox_size) & \ - (wrapped_h > self.min_bbox_size) - area_valid_idx = wrapped_w * wrapped_h / (origin_w * origin_h + - 1e-16) > self.min_area_ratio - aspect_ratio_valid_idx = aspect_ratio < self.max_aspect_ratio - return wh_valid_idx & area_valid_idx & aspect_ratio_valid_idx - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(max_rotate_degree={self.max_rotate_degree}, ' - repr_str += f'max_translate_ratio={self.max_translate_ratio}, ' - repr_str += f'scaling_ratio={self.scaling_ratio_range}, ' - repr_str += f'max_shear_degree={self.max_shear_degree}, ' - repr_str += f'border={self.border}, ' - repr_str += f'border_val={self.border_val}, ' - repr_str += f'min_bbox_size={self.min_bbox_size}, ' - repr_str += f'min_area_ratio={self.min_area_ratio}, ' - repr_str += f'max_aspect_ratio={self.max_aspect_ratio}, ' - repr_str += f'skip_filter={self.skip_filter})' - return repr_str - - @staticmethod - def _get_rotation_matrix(rotate_degrees): - radian = math.radians(rotate_degrees) - rotation_matrix = np.array( - [[np.cos(radian), -np.sin(radian), 0.], - [np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]], - dtype=np.float32) - return rotation_matrix - - @staticmethod - def _get_scaling_matrix(scale_ratio): - scaling_matrix = np.array( - [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], - dtype=np.float32) - return scaling_matrix - - @staticmethod - def _get_share_matrix(scale_ratio): - scaling_matrix = np.array( - [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]], - dtype=np.float32) - return scaling_matrix - - @staticmethod - def _get_shear_matrix(x_shear_degrees, y_shear_degrees): - x_radian = math.radians(x_shear_degrees) - y_radian = math.radians(y_shear_degrees) - shear_matrix = np.array([[1, np.tan(x_radian), 0.], - [np.tan(y_radian), 1, 0.], [0., 0., 1.]], - dtype=np.float32) - return shear_matrix - - @staticmethod - def _get_translation_matrix(x, y): - translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]], - dtype=np.float32) - return translation_matrix - - -@PIPELINES.register_module() -class YOLOXHSVRandomAug: - """Apply HSV augmentation to image sequentially. It is referenced from - https://github.com/Megvii- - BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21. - - Args: - hue_delta (int): delta of hue. Default: 5. - saturation_delta (int): delta of saturation. Default: 30. - value_delta (int): delat of value. Default: 30. - """ - - def __init__(self, hue_delta=5, saturation_delta=30, value_delta=30): - self.hue_delta = hue_delta - self.saturation_delta = saturation_delta - self.value_delta = value_delta - - def __call__(self, results): - img = results['img'] - hsv_gains = np.random.uniform(-1, 1, 3) * [ - self.hue_delta, self.saturation_delta, self.value_delta - ] - # random selection of h, s, v - hsv_gains *= np.random.randint(0, 2, 3) - # prevent overflow - hsv_gains = hsv_gains.astype(np.int16) - img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16) - - img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180 - img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255) - img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255) - cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img) - - results['img'] = img - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(hue_delta={self.hue_delta}, ' - repr_str += f'saturation_delta={self.saturation_delta}, ' - repr_str += f'value_delta={self.value_delta})' - return repr_str - - -@PIPELINES.register_module() -class CopyPaste: - """Simple Copy-Paste is a Strong Data Augmentation Method for Instance - Segmentation The simple copy-paste transform steps are as follows: - - 1. The destination image is already resized with aspect ratio kept, - cropped and padded. - 2. Randomly select a source image, which is also already resized - with aspect ratio kept, cropped and padded in a similar way - as the destination image. - 3. Randomly select some objects from the source image. - 4. Paste these source objects to the destination image directly, - due to the source and destination image have the same size. - 5. Update object masks of the destination image, for some origin objects - may be occluded. - 6. Generate bboxes from the updated destination masks and - filter some objects which are totally occluded, and adjust bboxes - which are partly occluded. - 7. Append selected source bboxes, masks, and labels. - - Args: - max_num_pasted (int): The maximum number of pasted objects. - Default: 100. - bbox_occluded_thr (int): The threshold of occluded bbox. - Default: 10. - mask_occluded_thr (int): The threshold of occluded mask. - Default: 300. - selected (bool): Whether select objects or not. If select is False, - all objects of the source image will be pasted to the - destination image. - Default: True. - """ - - def __init__( - self, - max_num_pasted=100, - bbox_occluded_thr=10, - mask_occluded_thr=300, - selected=True, - ): - self.max_num_pasted = max_num_pasted - self.bbox_occluded_thr = bbox_occluded_thr - self.mask_occluded_thr = mask_occluded_thr - self.selected = selected - - def get_indexes(self, dataset): - """Call function to collect indexes.s. - - Args: - dataset (:obj:`MultiImageMixDataset`): The dataset. - Returns: - list: Indexes. - """ - return random.randint(0, len(dataset)) - - def __call__(self, results): - """Call function to make a copy-paste of image. - - Args: - results (dict): Result dict. - Returns: - dict: Result dict with copy-paste transformed. - """ - - assert 'mix_results' in results - num_images = len(results['mix_results']) - assert num_images == 1, \ - f'CopyPaste only supports processing 2 images, got {num_images}' - if self.selected: - selected_results = self._select_object(results['mix_results'][0]) - else: - selected_results = results['mix_results'][0] - return self._copy_paste(results, selected_results) - - def _select_object(self, results): - """Select some objects from the source results.""" - bboxes = results['gt_bboxes'] - labels = results['gt_labels'] - masks = results['gt_masks'] - max_num_pasted = min(bboxes.shape[0] + 1, self.max_num_pasted) - num_pasted = np.random.randint(0, max_num_pasted) - selected_inds = np.random.choice( - bboxes.shape[0], size=num_pasted, replace=False) - - selected_bboxes = bboxes[selected_inds] - selected_labels = labels[selected_inds] - selected_masks = masks[selected_inds] - - results['gt_bboxes'] = selected_bboxes - results['gt_labels'] = selected_labels - results['gt_masks'] = selected_masks - return results - - def _copy_paste(self, dst_results, src_results): - """CopyPaste transform function. - - Args: - dst_results (dict): Result dict of the destination image. - src_results (dict): Result dict of the source image. - Returns: - dict: Updated result dict. - """ - dst_img = dst_results['img'] - dst_bboxes = dst_results['gt_bboxes'] - dst_labels = dst_results['gt_labels'] - dst_masks = dst_results['gt_masks'] - - src_img = src_results['img'] - src_bboxes = src_results['gt_bboxes'] - src_labels = src_results['gt_labels'] - src_masks = src_results['gt_masks'] - - if len(src_bboxes) == 0: - return dst_results - - # update masks and generate bboxes from updated masks - composed_mask = np.where(np.any(src_masks.masks, axis=0), 1, 0) - updated_dst_masks = self.get_updated_masks(dst_masks, composed_mask) - updated_dst_bboxes = updated_dst_masks.get_bboxes() - assert len(updated_dst_bboxes) == len(updated_dst_masks) - - # filter totally occluded objects - bboxes_inds = np.all( - np.abs( - (updated_dst_bboxes - dst_bboxes)) <= self.bbox_occluded_thr, - axis=-1) - masks_inds = updated_dst_masks.masks.sum( - axis=(1, 2)) > self.mask_occluded_thr - valid_inds = bboxes_inds | masks_inds - - # Paste source objects to destination image directly - img = dst_img * (1 - composed_mask[..., np.newaxis] - ) + src_img * composed_mask[..., np.newaxis] - bboxes = np.concatenate([updated_dst_bboxes[valid_inds], src_bboxes]) - labels = np.concatenate([dst_labels[valid_inds], src_labels]) - masks = np.concatenate( - [updated_dst_masks.masks[valid_inds], src_masks.masks]) - - dst_results['img'] = img - dst_results['gt_bboxes'] = bboxes - dst_results['gt_labels'] = labels - dst_results['gt_masks'] = BitmapMasks(masks, masks.shape[1], - masks.shape[2]) - - return dst_results - - def get_updated_masks(self, masks, composed_mask): - assert masks.masks.shape[-2:] == composed_mask.shape[-2:], \ - 'Cannot compare two arrays of different size' - masks.masks = np.where(composed_mask, 0, masks.masks) - return masks - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'max_num_pasted={self.max_num_pasted}, ' - repr_str += f'bbox_occluded_thr={self.bbox_occluded_thr}, ' - repr_str += f'mask_occluded_thr={self.mask_occluded_thr}, ' - repr_str += f'selected={self.selected}, ' - return repr_str diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/__init__.py b/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/__init__.py deleted file mode 100644 index a4c7ea135af652712e5a9f14a2002c516c44a16b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .class_aware_sampler import ClassAwareSampler -from .distributed_sampler import DistributedSampler -from .group_sampler import DistributedGroupSampler, GroupSampler -from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler - -__all__ = [ - 'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler', - 'InfiniteGroupBatchSampler', 'InfiniteBatchSampler', 'ClassAwareSampler' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/class_aware_sampler.py b/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/class_aware_sampler.py deleted file mode 100644 index c52708eb8b98d85b3fac3ee55c7519be60681896..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/class_aware_sampler.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -from mmcv.runner import get_dist_info -from torch.utils.data import Sampler - -from mmdet.core.utils import sync_random_seed - - -class ClassAwareSampler(Sampler): - r"""Sampler that restricts data loading to the label of the dataset. - - A class-aware sampling strategy to effectively tackle the - non-uniform class distribution. The length of the training data is - consistent with source data. Simple improvements based on `Relay - Backpropagation for Effective Learning of Deep Convolutional - Neural Networks `_ - - The implementation logic is referred to - https://github.com/Sense-X/TSD/blob/master/mmdet/datasets/samplers/distributed_classaware_sampler.py - - Args: - dataset: Dataset used for sampling. - samples_per_gpu (int): When model is :obj:`DistributedDataParallel`, - it is the number of training samples on each GPU. - When model is :obj:`DataParallel`, it is - `num_gpus * samples_per_gpu`. - Default : 1. - num_replicas (optional): Number of processes participating in - distributed training. - rank (optional): Rank of the current process within num_replicas. - seed (int, optional): random seed used to shuffle the sampler if - ``shuffle=True``. This number should be identical across all - processes in the distributed group. Default: 0. - num_sample_class (int): The number of samples taken from each - per-label list. Default: 1 - """ - - def __init__(self, - dataset, - samples_per_gpu=1, - num_replicas=None, - rank=None, - seed=0, - num_sample_class=1): - _rank, _num_replicas = get_dist_info() - if num_replicas is None: - num_replicas = _num_replicas - if rank is None: - rank = _rank - - self.dataset = dataset - self.num_replicas = num_replicas - self.samples_per_gpu = samples_per_gpu - self.rank = rank - self.epoch = 0 - # Must be the same across all workers. If None, will use a - # random seed shared among workers - # (require synchronization among all workers) - self.seed = sync_random_seed(seed) - - # The number of samples taken from each per-label list - assert num_sample_class > 0 and isinstance(num_sample_class, int) - self.num_sample_class = num_sample_class - # Get per-label image list from dataset - assert hasattr(dataset, 'get_cat2imgs'), \ - 'dataset must have `get_cat2imgs` function' - self.cat_dict = dataset.get_cat2imgs() - - self.num_samples = int( - math.ceil( - len(self.dataset) * 1.0 / self.num_replicas / - self.samples_per_gpu)) * self.samples_per_gpu - self.total_size = self.num_samples * self.num_replicas - - # get number of images containing each category - self.num_cat_imgs = [len(x) for x in self.cat_dict.values()] - # filter labels without images - self.valid_cat_inds = [ - i for i, length in enumerate(self.num_cat_imgs) if length != 0 - ] - self.num_classes = len(self.valid_cat_inds) - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch + self.seed) - - # initialize label list - label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g) - # initialize each per-label image list - data_iter_dict = dict() - for i in self.valid_cat_inds: - data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g) - - def gen_cat_img_inds(cls_list, data_dict, num_sample_cls): - """Traverse the categories and extract `num_sample_cls` image - indexes of the corresponding categories one by one.""" - id_indices = [] - for _ in range(len(cls_list)): - cls_idx = next(cls_list) - for _ in range(num_sample_cls): - id = next(data_dict[cls_idx]) - id_indices.append(id) - return id_indices - - # deterministically shuffle based on epoch - num_bins = int( - math.ceil(self.total_size * 1.0 / self.num_classes / - self.num_sample_class)) - indices = [] - for i in range(num_bins): - indices += gen_cat_img_inds(label_iter_list, data_iter_dict, - self.num_sample_class) - - # fix extra samples to make it evenly divisible - if len(indices) >= self.total_size: - indices = indices[:self.total_size] - else: - indices += indices[:(self.total_size - len(indices))] - assert len(indices) == self.total_size - - # subsample - offset = self.num_samples * self.rank - indices = indices[offset:offset + self.num_samples] - assert len(indices) == self.num_samples - - return iter(indices) - - def __len__(self): - return self.num_samples - - def set_epoch(self, epoch): - self.epoch = epoch - - -class RandomCycleIter: - """Shuffle the list and do it again after the list have traversed. - - The implementation logic is referred to - https://github.com/wutong16/DistributionBalancedLoss/blob/master/mllt/datasets/loader/sampler.py - - Example: - >>> label_list = [0, 1, 2, 4, 5] - >>> g = torch.Generator() - >>> g.manual_seed(0) - >>> label_iter_list = RandomCycleIter(label_list, generator=g) - >>> index = next(label_iter_list) - Args: - data (list or ndarray): The data that needs to be shuffled. - generator: An torch.Generator object, which is used in setting the seed - for generating random numbers. - """ # noqa: W605 - - def __init__(self, data, generator=None): - self.data = data - self.length = len(data) - self.index = torch.randperm(self.length, generator=generator).numpy() - self.i = 0 - self.generator = generator - - def __iter__(self): - return self - - def __len__(self): - return len(self.data) - - def __next__(self): - if self.i == self.length: - self.index = torch.randperm( - self.length, generator=self.generator).numpy() - self.i = 0 - idx = self.data[self.index[self.i]] - self.i += 1 - return idx diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/distributed_sampler.py b/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/distributed_sampler.py deleted file mode 100644 index 1bc8b7c3602cee288e4ab8d661819c0a2490d4ee..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/distributed_sampler.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -from torch.utils.data import DistributedSampler as _DistributedSampler - -from mmdet.core.utils import sync_random_seed -from mmdet.utils import get_device - - -class DistributedSampler(_DistributedSampler): - - def __init__(self, - dataset, - num_replicas=None, - rank=None, - shuffle=True, - seed=0): - super().__init__( - dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - - # In distributed sampling, different ranks should sample - # non-overlapped data in the dataset. Therefore, this function - # is used to make sure that each rank shuffles the data indices - # in the same order based on the same seed. Then different ranks - # could use different indices to select non-overlapped data from the - # same data list. - device = get_device() - self.seed = sync_random_seed(seed, device) - - def __iter__(self): - # deterministically shuffle based on epoch - if self.shuffle: - g = torch.Generator() - # When :attr:`shuffle=True`, this ensures all replicas - # use a different random ordering for each epoch. - # Otherwise, the next iteration of this sampler will - # yield the same ordering. - g.manual_seed(self.epoch + self.seed) - indices = torch.randperm(len(self.dataset), generator=g).tolist() - else: - indices = torch.arange(len(self.dataset)).tolist() - - # add extra samples to make it evenly divisible - # in case that indices is shorter than half of total_size - indices = (indices * - math.ceil(self.total_size / len(indices)))[:self.total_size] - assert len(indices) == self.total_size - - # subsample - indices = indices[self.rank:self.total_size:self.num_replicas] - assert len(indices) == self.num_samples - - return iter(indices) diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/group_sampler.py b/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/group_sampler.py deleted file mode 100644 index 783d2b21cca753f12a7a617f049f84a2b6541dd9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/group_sampler.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import numpy as np -import torch -from mmcv.runner import get_dist_info -from torch.utils.data import Sampler - - -class GroupSampler(Sampler): - - def __init__(self, dataset, samples_per_gpu=1): - assert hasattr(dataset, 'flag') - self.dataset = dataset - self.samples_per_gpu = samples_per_gpu - self.flag = dataset.flag.astype(np.int64) - self.group_sizes = np.bincount(self.flag) - self.num_samples = 0 - for i, size in enumerate(self.group_sizes): - self.num_samples += int(np.ceil( - size / self.samples_per_gpu)) * self.samples_per_gpu - - def __iter__(self): - indices = [] - for i, size in enumerate(self.group_sizes): - if size == 0: - continue - indice = np.where(self.flag == i)[0] - assert len(indice) == size - np.random.shuffle(indice) - num_extra = int(np.ceil(size / self.samples_per_gpu) - ) * self.samples_per_gpu - len(indice) - indice = np.concatenate( - [indice, np.random.choice(indice, num_extra)]) - indices.append(indice) - indices = np.concatenate(indices) - indices = [ - indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] - for i in np.random.permutation( - range(len(indices) // self.samples_per_gpu)) - ] - indices = np.concatenate(indices) - indices = indices.astype(np.int64).tolist() - assert len(indices) == self.num_samples - return iter(indices) - - def __len__(self): - return self.num_samples - - -class DistributedGroupSampler(Sampler): - """Sampler that restricts data loading to a subset of the dataset. - - It is especially useful in conjunction with - :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each - process can pass a DistributedSampler instance as a DataLoader sampler, - and load a subset of the original dataset that is exclusive to it. - - .. note:: - Dataset is assumed to be of constant size. - - Arguments: - dataset: Dataset used for sampling. - num_replicas (optional): Number of processes participating in - distributed training. - rank (optional): Rank of the current process within num_replicas. - seed (int, optional): random seed used to shuffle the sampler if - ``shuffle=True``. This number should be identical across all - processes in the distributed group. Default: 0. - """ - - def __init__(self, - dataset, - samples_per_gpu=1, - num_replicas=None, - rank=None, - seed=0): - _rank, _num_replicas = get_dist_info() - if num_replicas is None: - num_replicas = _num_replicas - if rank is None: - rank = _rank - self.dataset = dataset - self.samples_per_gpu = samples_per_gpu - self.num_replicas = num_replicas - self.rank = rank - self.epoch = 0 - self.seed = seed if seed is not None else 0 - - assert hasattr(self.dataset, 'flag') - self.flag = self.dataset.flag - self.group_sizes = np.bincount(self.flag) - - self.num_samples = 0 - for i, j in enumerate(self.group_sizes): - self.num_samples += int( - math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu / - self.num_replicas)) * self.samples_per_gpu - self.total_size = self.num_samples * self.num_replicas - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch + self.seed) - - indices = [] - for i, size in enumerate(self.group_sizes): - if size > 0: - indice = np.where(self.flag == i)[0] - assert len(indice) == size - # add .numpy() to avoid bug when selecting indice in parrots. - # TODO: check whether torch.randperm() can be replaced by - # numpy.random.permutation(). - indice = indice[list( - torch.randperm(int(size), generator=g).numpy())].tolist() - extra = int( - math.ceil( - size * 1.0 / self.samples_per_gpu / self.num_replicas) - ) * self.samples_per_gpu * self.num_replicas - len(indice) - # pad indice - tmp = indice.copy() - for _ in range(extra // size): - indice.extend(tmp) - indice.extend(tmp[:extra % size]) - indices.extend(indice) - - assert len(indices) == self.total_size - - indices = [ - indices[j] for i in list( - torch.randperm( - len(indices) // self.samples_per_gpu, generator=g)) - for j in range(i * self.samples_per_gpu, (i + 1) * - self.samples_per_gpu) - ] - - # subsample - offset = self.num_samples * self.rank - indices = indices[offset:offset + self.num_samples] - assert len(indices) == self.num_samples - - return iter(indices) - - def __len__(self): - return self.num_samples - - def set_epoch(self, epoch): - self.epoch = epoch diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/infinite_sampler.py b/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/infinite_sampler.py deleted file mode 100644 index d42487e6ac0c3e63cd8c4a0bb5ead9644b09a0ea..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/samplers/infinite_sampler.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import itertools - -import numpy as np -import torch -from mmcv.runner import get_dist_info -from torch.utils.data.sampler import Sampler - -from mmdet.core.utils import sync_random_seed - - -class InfiniteGroupBatchSampler(Sampler): - """Similar to `BatchSampler` warping a `GroupSampler. It is designed for - iteration-based runners like `IterBasedRunner` and yields a mini-batch - indices each time, all indices in a batch should be in the same group. - - The implementation logic is referred to - https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py - - Args: - dataset (object): The dataset. - batch_size (int): When model is :obj:`DistributedDataParallel`, - it is the number of training samples on each GPU. - When model is :obj:`DataParallel`, it is - `num_gpus * samples_per_gpu`. - Default : 1. - world_size (int, optional): Number of processes participating in - distributed training. Default: None. - rank (int, optional): Rank of current process. Default: None. - seed (int): Random seed. Default: 0. - shuffle (bool): Whether shuffle the indices of a dummy `epoch`, it - should be noted that `shuffle` can not guarantee that you can - generate sequential indices because it need to ensure - that all indices in a batch is in a group. Default: True. - """ # noqa: W605 - - def __init__(self, - dataset, - batch_size=1, - world_size=None, - rank=None, - seed=0, - shuffle=True): - _rank, _world_size = get_dist_info() - if world_size is None: - world_size = _world_size - if rank is None: - rank = _rank - self.rank = rank - self.world_size = world_size - self.dataset = dataset - self.batch_size = batch_size - # In distributed sampling, different ranks should sample - # non-overlapped data in the dataset. Therefore, this function - # is used to make sure that each rank shuffles the data indices - # in the same order based on the same seed. Then different ranks - # could use different indices to select non-overlapped data from the - # same data list. - self.seed = sync_random_seed(seed) - self.shuffle = shuffle - - assert hasattr(self.dataset, 'flag') - self.flag = self.dataset.flag - self.group_sizes = np.bincount(self.flag) - # buffer used to save indices of each group - self.buffer_per_group = {k: [] for k in range(len(self.group_sizes))} - - self.size = len(dataset) - self.indices = self._indices_of_rank() - - def _infinite_indices(self): - """Infinitely yield a sequence of indices.""" - g = torch.Generator() - g.manual_seed(self.seed) - while True: - if self.shuffle: - yield from torch.randperm(self.size, generator=g).tolist() - - else: - yield from torch.arange(self.size).tolist() - - def _indices_of_rank(self): - """Slice the infinite indices by rank.""" - yield from itertools.islice(self._infinite_indices(), self.rank, None, - self.world_size) - - def __iter__(self): - # once batch size is reached, yield the indices - for idx in self.indices: - flag = self.flag[idx] - group_buffer = self.buffer_per_group[flag] - group_buffer.append(idx) - if len(group_buffer) == self.batch_size: - yield group_buffer[:] - del group_buffer[:] - - def __len__(self): - """Length of base dataset.""" - return self.size - - def set_epoch(self, epoch): - """Not supported in `IterationBased` runner.""" - raise NotImplementedError - - -class InfiniteBatchSampler(Sampler): - """Similar to `BatchSampler` warping a `DistributedSampler. It is designed - iteration-based runners like `IterBasedRunner` and yields a mini-batch - indices each time. - - The implementation logic is referred to - https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py - - Args: - dataset (object): The dataset. - batch_size (int): When model is :obj:`DistributedDataParallel`, - it is the number of training samples on each GPU, - When model is :obj:`DataParallel`, it is - `num_gpus * samples_per_gpu`. - Default : 1. - world_size (int, optional): Number of processes participating in - distributed training. Default: None. - rank (int, optional): Rank of current process. Default: None. - seed (int): Random seed. Default: 0. - shuffle (bool): Whether shuffle the dataset or not. Default: True. - """ # noqa: W605 - - def __init__(self, - dataset, - batch_size=1, - world_size=None, - rank=None, - seed=0, - shuffle=True): - _rank, _world_size = get_dist_info() - if world_size is None: - world_size = _world_size - if rank is None: - rank = _rank - self.rank = rank - self.world_size = world_size - self.dataset = dataset - self.batch_size = batch_size - # In distributed sampling, different ranks should sample - # non-overlapped data in the dataset. Therefore, this function - # is used to make sure that each rank shuffles the data indices - # in the same order based on the same seed. Then different ranks - # could use different indices to select non-overlapped data from the - # same data list. - self.seed = sync_random_seed(seed) - self.shuffle = shuffle - self.size = len(dataset) - self.indices = self._indices_of_rank() - - def _infinite_indices(self): - """Infinitely yield a sequence of indices.""" - g = torch.Generator() - g.manual_seed(self.seed) - while True: - if self.shuffle: - yield from torch.randperm(self.size, generator=g).tolist() - - else: - yield from torch.arange(self.size).tolist() - - def _indices_of_rank(self): - """Slice the infinite indices by rank.""" - yield from itertools.islice(self._infinite_indices(), self.rank, None, - self.world_size) - - def __iter__(self): - # once batch size is reached, yield the indices - batch_buffer = [] - for idx in self.indices: - batch_buffer.append(idx) - if len(batch_buffer) == self.batch_size: - yield batch_buffer - batch_buffer = [] - - def __len__(self): - """Length of base dataset.""" - return self.size - - def set_epoch(self, epoch): - """Not supported in `IterationBased` runner.""" - raise NotImplementedError diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/utils.py b/cv/detection/co-detr/pytorch/mmdet/datasets/utils.py deleted file mode 100644 index 26e922d2ba8edb5dd0a1242a96f32ad56505393f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/utils.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings - -from mmcv.cnn import VGG -from mmcv.runner.hooks import HOOKS, Hook - -from mmdet.datasets.builder import PIPELINES -from mmdet.datasets.pipelines import (LoadAnnotations, LoadImageFromFile, - LoadPanopticAnnotations) -from mmdet.models.dense_heads import GARPNHead, RPNHead -from mmdet.models.roi_heads.mask_heads import FusedSemanticHead - - -def replace_ImageToTensor(pipelines): - """Replace the ImageToTensor transform in a data pipeline to - DefaultFormatBundle, which is normally useful in batch inference. - - Args: - pipelines (list[dict]): Data pipeline configs. - - Returns: - list: The new pipeline list with all ImageToTensor replaced by - DefaultFormatBundle. - - Examples: - >>> pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict( - ... type='MultiScaleFlipAug', - ... img_scale=(1333, 800), - ... flip=False, - ... transforms=[ - ... dict(type='Resize', keep_ratio=True), - ... dict(type='RandomFlip'), - ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), - ... dict(type='Pad', size_divisor=32), - ... dict(type='ImageToTensor', keys=['img']), - ... dict(type='Collect', keys=['img']), - ... ]) - ... ] - >>> expected_pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict( - ... type='MultiScaleFlipAug', - ... img_scale=(1333, 800), - ... flip=False, - ... transforms=[ - ... dict(type='Resize', keep_ratio=True), - ... dict(type='RandomFlip'), - ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), - ... dict(type='Pad', size_divisor=32), - ... dict(type='DefaultFormatBundle'), - ... dict(type='Collect', keys=['img']), - ... ]) - ... ] - >>> assert expected_pipelines == replace_ImageToTensor(pipelines) - """ - pipelines = copy.deepcopy(pipelines) - for i, pipeline in enumerate(pipelines): - if pipeline['type'] == 'MultiScaleFlipAug': - assert 'transforms' in pipeline - pipeline['transforms'] = replace_ImageToTensor( - pipeline['transforms']) - elif pipeline['type'] == 'ImageToTensor': - warnings.warn( - '"ImageToTensor" pipeline is replaced by ' - '"DefaultFormatBundle" for batch inference. It is ' - 'recommended to manually replace it in the test ' - 'data pipeline in your config file.', UserWarning) - pipelines[i] = {'type': 'DefaultFormatBundle'} - return pipelines - - -def get_loading_pipeline(pipeline): - """Only keep loading image and annotations related configuration. - - Args: - pipeline (list[dict]): Data pipeline configs. - - Returns: - list[dict]: The new pipeline list with only keep - loading image and annotations related configuration. - - Examples: - >>> pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict(type='LoadAnnotations', with_bbox=True), - ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - ... dict(type='RandomFlip', flip_ratio=0.5), - ... dict(type='Normalize', **img_norm_cfg), - ... dict(type='Pad', size_divisor=32), - ... dict(type='DefaultFormatBundle'), - ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) - ... ] - >>> expected_pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict(type='LoadAnnotations', with_bbox=True) - ... ] - >>> assert expected_pipelines ==\ - ... get_loading_pipeline(pipelines) - """ - loading_pipeline_cfg = [] - for cfg in pipeline: - obj_cls = PIPELINES.get(cfg['type']) - # TODO:use more elegant way to distinguish loading modules - if obj_cls is not None and obj_cls in (LoadImageFromFile, - LoadAnnotations, - LoadPanopticAnnotations): - loading_pipeline_cfg.append(cfg) - assert len(loading_pipeline_cfg) == 2, \ - 'The data pipeline in your config file must include ' \ - 'loading image and annotations related pipeline.' - return loading_pipeline_cfg - - -@HOOKS.register_module() -class NumClassCheckHook(Hook): - - def _check_head(self, runner): - """Check whether the `num_classes` in head matches the length of - `CLASSES` in `dataset`. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - model = runner.model - dataset = runner.data_loader.dataset - if dataset.CLASSES is None: - runner.logger.warning( - f'Please set `CLASSES` ' - f'in the {dataset.__class__.__name__} and' - f'check if it is consistent with the `num_classes` ' - f'of head') - else: - assert type(dataset.CLASSES) is not str, \ - (f'`CLASSES` in {dataset.__class__.__name__}' - f'should be a tuple of str.' - f'Add comma if number of classes is 1 as ' - f'CLASSES = ({dataset.CLASSES},)') - for name, module in model.named_modules(): - if hasattr(module, 'num_classes') and not isinstance( - module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)): - assert module.num_classes == len(dataset.CLASSES), \ - (f'The `num_classes` ({module.num_classes}) in ' - f'{module.__class__.__name__} of ' - f'{model.__class__.__name__} does not matches ' - f'the length of `CLASSES` ' - f'{len(dataset.CLASSES)}) in ' - f'{dataset.__class__.__name__}') - - def before_train_epoch(self, runner): - """Check whether the training dataset is compatible with head. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - self._check_head(runner) - - def before_val_epoch(self, runner): - """Check whether the dataset in val epoch is compatible with head. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - self._check_head(runner) diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/voc.py b/cv/detection/co-detr/pytorch/mmdet/datasets/voc.py deleted file mode 100644 index 0a3ea7aac75c7ef3ee1576ec05f251fd47412b72..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/voc.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import OrderedDict - -from mmcv.utils import print_log - -from mmdet.core import eval_map, eval_recalls -from .builder import DATASETS -from .xml_style import XMLDataset - - -@DATASETS.register_module() -class VOCDataset(XMLDataset): - - CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', - 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', - 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', - 'tvmonitor') - - PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192), - (197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255), - (153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252), - (182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0), - (0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)] - - def __init__(self, **kwargs): - super(VOCDataset, self).__init__(**kwargs) - if 'VOC2007' in self.img_prefix: - self.year = 2007 - elif 'VOC2012' in self.img_prefix: - self.year = 2012 - else: - raise ValueError('Cannot infer dataset year from img_prefix') - - def evaluate(self, - results, - metric='mAP', - logger=None, - proposal_nums=(100, 300, 1000), - iou_thr=0.5, - scale_ranges=None): - """Evaluate in VOC protocol. - - Args: - results (list[list | tuple]): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. Options are - 'mAP', 'recall'. - logger (logging.Logger | str, optional): Logger used for printing - related information during evaluation. Default: None. - proposal_nums (Sequence[int]): Proposal number used for evaluating - recalls, such as recall@100, recall@1000. - Default: (100, 300, 1000). - iou_thr (float | list[float]): IoU threshold. Default: 0.5. - scale_ranges (list[tuple], optional): Scale ranges for evaluating - mAP. If not specified, all bounding boxes would be included in - evaluation. Default: None. - - Returns: - dict[str, float]: AP/recall metrics. - """ - - if not isinstance(metric, str): - assert len(metric) == 1 - metric = metric[0] - allowed_metrics = ['mAP', 'recall'] - if metric not in allowed_metrics: - raise KeyError(f'metric {metric} is not supported') - annotations = [self.get_ann_info(i) for i in range(len(self))] - eval_results = OrderedDict() - iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr - if metric == 'mAP': - assert isinstance(iou_thrs, list) - if self.year == 2007: - ds_name = 'voc07' - else: - ds_name = self.CLASSES - mean_aps = [] - for iou_thr in iou_thrs: - print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') - # Follow the official implementation, - # http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar - # we should use the legacy coordinate system in mmdet 1.x, - # which means w, h should be computed as 'x2 - x1 + 1` and - # `y2 - y1 + 1` - mean_ap, _ = eval_map( - results, - annotations, - scale_ranges=None, - iou_thr=iou_thr, - dataset=ds_name, - logger=logger, - use_legacy_coordinate=True) - mean_aps.append(mean_ap) - eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) - eval_results['mAP'] = sum(mean_aps) / len(mean_aps) - eval_results.move_to_end('mAP', last=False) - elif metric == 'recall': - gt_bboxes = [ann['bboxes'] for ann in annotations] - recalls = eval_recalls( - gt_bboxes, - results, - proposal_nums, - iou_thrs, - logger=logger, - use_legacy_coordinate=True) - for i, num in enumerate(proposal_nums): - for j, iou_thr in enumerate(iou_thrs): - eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j] - if recalls.shape[1] > 1: - ar = recalls.mean(axis=1) - for i, num in enumerate(proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - return eval_results diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/wider_face.py b/cv/detection/co-detr/pytorch/mmdet/datasets/wider_face.py deleted file mode 100644 index 85a5fdc549659f9cf72e4511de28cc0ccb4a9f4c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/wider_face.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import xml.etree.ElementTree as ET - -import mmcv - -from .builder import DATASETS -from .xml_style import XMLDataset - - -@DATASETS.register_module() -class WIDERFaceDataset(XMLDataset): - """Reader for the WIDER Face dataset in PASCAL VOC format. - - Conversion scripts can be found in - https://github.com/sovrasov/wider-face-pascal-voc-annotations - """ - CLASSES = ('face', ) - - PALETTE = [(0, 255, 0)] - - def __init__(self, **kwargs): - super(WIDERFaceDataset, self).__init__(**kwargs) - - def load_annotations(self, ann_file): - """Load annotation from WIDERFace XML style annotation file. - - Args: - ann_file (str): Path of XML file. - - Returns: - list[dict]: Annotation info from XML file. - """ - - data_infos = [] - img_ids = mmcv.list_from_file(ann_file) - for img_id in img_ids: - filename = f'{img_id}.jpg' - xml_path = osp.join(self.img_prefix, 'Annotations', - f'{img_id}.xml') - tree = ET.parse(xml_path) - root = tree.getroot() - size = root.find('size') - width = int(size.find('width').text) - height = int(size.find('height').text) - folder = root.find('folder').text - data_infos.append( - dict( - id=img_id, - filename=osp.join(folder, filename), - width=width, - height=height)) - - return data_infos diff --git a/cv/detection/co-detr/pytorch/mmdet/datasets/xml_style.py b/cv/detection/co-detr/pytorch/mmdet/datasets/xml_style.py deleted file mode 100644 index 039d5d7d08fc9874b7378444c0ff63b5d8dd2ade..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/datasets/xml_style.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -import xml.etree.ElementTree as ET - -import mmcv -import numpy as np -from PIL import Image - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class XMLDataset(CustomDataset): - """XML dataset for detection. - - Args: - min_size (int | float, optional): The minimum size of bounding - boxes in the images. If the size of a bounding box is less than - ``min_size``, it would be add to ignored field. - img_subdir (str): Subdir where images are stored. Default: JPEGImages. - ann_subdir (str): Subdir where annotations are. Default: Annotations. - """ - - def __init__(self, - min_size=None, - img_subdir='JPEGImages', - ann_subdir='Annotations', - **kwargs): - assert self.CLASSES or kwargs.get( - 'classes', None), 'CLASSES in `XMLDataset` can not be None.' - self.img_subdir = img_subdir - self.ann_subdir = ann_subdir - super(XMLDataset, self).__init__(**kwargs) - self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)} - self.min_size = min_size - - def load_annotations(self, ann_file): - """Load annotation from XML style ann_file. - - Args: - ann_file (str): Path of XML file. - - Returns: - list[dict]: Annotation info from XML file. - """ - - data_infos = [] - img_ids = mmcv.list_from_file(ann_file) - for img_id in img_ids: - filename = osp.join(self.img_subdir, f'{img_id}.jpg') - xml_path = osp.join(self.img_prefix, self.ann_subdir, - f'{img_id}.xml') - tree = ET.parse(xml_path) - root = tree.getroot() - size = root.find('size') - if size is not None: - width = int(size.find('width').text) - height = int(size.find('height').text) - else: - img_path = osp.join(self.img_prefix, filename) - img = Image.open(img_path) - width, height = img.size - data_infos.append( - dict(id=img_id, filename=filename, width=width, height=height)) - - return data_infos - - def _filter_imgs(self, min_size=32): - """Filter images too small or without annotation.""" - valid_inds = [] - for i, img_info in enumerate(self.data_infos): - if min(img_info['width'], img_info['height']) < min_size: - continue - if self.filter_empty_gt: - img_id = img_info['id'] - xml_path = osp.join(self.img_prefix, self.ann_subdir, - f'{img_id}.xml') - tree = ET.parse(xml_path) - root = tree.getroot() - for obj in root.findall('object'): - name = obj.find('name').text - if name in self.CLASSES: - valid_inds.append(i) - break - else: - valid_inds.append(i) - return valid_inds - - def get_ann_info(self, idx): - """Get annotation from XML file by index. - - Args: - idx (int): Index of data. - - Returns: - dict: Annotation info of specified index. - """ - - img_id = self.data_infos[idx]['id'] - xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') - tree = ET.parse(xml_path) - root = tree.getroot() - bboxes = [] - labels = [] - bboxes_ignore = [] - labels_ignore = [] - for obj in root.findall('object'): - name = obj.find('name').text - if name not in self.CLASSES: - continue - label = self.cat2label[name] - difficult = obj.find('difficult') - difficult = 0 if difficult is None else int(difficult.text) - bnd_box = obj.find('bndbox') - # TODO: check whether it is necessary to use int - # Coordinates may be float type - bbox = [ - int(float(bnd_box.find('xmin').text)), - int(float(bnd_box.find('ymin').text)), - int(float(bnd_box.find('xmax').text)), - int(float(bnd_box.find('ymax').text)) - ] - ignore = False - if self.min_size: - assert not self.test_mode - w = bbox[2] - bbox[0] - h = bbox[3] - bbox[1] - if w < self.min_size or h < self.min_size: - ignore = True - if difficult or ignore: - bboxes_ignore.append(bbox) - labels_ignore.append(label) - else: - bboxes.append(bbox) - labels.append(label) - if not bboxes: - bboxes = np.zeros((0, 4)) - labels = np.zeros((0, )) - else: - bboxes = np.array(bboxes, ndmin=2) - 1 - labels = np.array(labels) - if not bboxes_ignore: - bboxes_ignore = np.zeros((0, 4)) - labels_ignore = np.zeros((0, )) - else: - bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 - labels_ignore = np.array(labels_ignore) - ann = dict( - bboxes=bboxes.astype(np.float32), - labels=labels.astype(np.int64), - bboxes_ignore=bboxes_ignore.astype(np.float32), - labels_ignore=labels_ignore.astype(np.int64)) - return ann - - def get_cat_ids(self, idx): - """Get category ids in XML file by index. - - Args: - idx (int): Index of data. - - Returns: - list[int]: All categories in the image of specified index. - """ - - cat_ids = [] - img_id = self.data_infos[idx]['id'] - xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml') - tree = ET.parse(xml_path) - root = tree.getroot() - for obj in root.findall('object'): - name = obj.find('name').text - if name not in self.CLASSES: - continue - label = self.cat2label[name] - cat_ids.append(label) - - return cat_ids diff --git a/cv/detection/co-detr/pytorch/mmdet/models/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/__init__.py deleted file mode 100644 index 12efb013d26d6b7ee27226a0f205d7e009e4b5f3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .backbones import * # noqa: F401,F403 -from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, - ROI_EXTRACTORS, SHARED_HEADS, build_backbone, - build_detector, build_head, build_loss, build_neck, - build_roi_extractor, build_shared_head) -from .dense_heads import * # noqa: F401,F403 -from .detectors import * # noqa: F401,F403 -from .losses import * # noqa: F401,F403 -from .necks import * # noqa: F401,F403 -from .plugins import * # noqa: F401,F403 -from .roi_heads import * # noqa: F401,F403 -from .seg_heads import * # noqa: F401,F403 - -__all__ = [ - 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', - 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', - 'build_shared_head', 'build_head', 'build_loss', 'build_detector' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/__init__.py deleted file mode 100644 index 91b50d254a8866c7376286470c47e4de936d07ad..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .csp_darknet import CSPDarknet -from .darknet import Darknet -from .detectors_resnet import DetectoRS_ResNet -from .detectors_resnext import DetectoRS_ResNeXt -from .efficientnet import EfficientNet -from .hourglass import HourglassNet -from .hrnet import HRNet -from .mobilenet_v2 import MobileNetV2 -from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2 -from .regnet import RegNet -from .res2net import Res2Net -from .resnest import ResNeSt -from .resnet import ResNet, ResNetV1d -from .resnext import ResNeXt -from .ssd_vgg import SSDVGG -from .swin import SwinTransformer -from .trident_resnet import TridentResNet - -__all__ = [ - 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', - 'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet', - 'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet', - 'SwinTransformer', 'PyramidVisionTransformer', - 'PyramidVisionTransformerV2', 'EfficientNet' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/csp_darknet.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/csp_darknet.py deleted file mode 100644 index 2bbf3968a818ad9c1d27d82e3ef17e9c2f8072bc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/csp_darknet.py +++ /dev/null @@ -1,284 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule -from mmcv.runner import BaseModule -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES -from ..utils import CSPLayer - - -class Focus(nn.Module): - """Focus width and height information into channel space. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - kernel_size (int): The kernel size of the convolution. Default: 1 - stride (int): The stride of the convolution. Default: 1 - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN', momentum=0.03, eps=0.001). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='Swish'). - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=1, - stride=1, - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish')): - super().__init__() - self.conv = ConvModule( - in_channels * 4, - out_channels, - kernel_size, - stride, - padding=(kernel_size - 1) // 2, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - def forward(self, x): - # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) - patch_top_left = x[..., ::2, ::2] - patch_top_right = x[..., ::2, 1::2] - patch_bot_left = x[..., 1::2, ::2] - patch_bot_right = x[..., 1::2, 1::2] - x = torch.cat( - ( - patch_top_left, - patch_bot_left, - patch_top_right, - patch_bot_right, - ), - dim=1, - ) - return self.conv(x) - - -class SPPBottleneck(BaseModule): - """Spatial pyramid pooling layer used in YOLOv3-SPP. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling - layers. Default: (5, 9, 13). - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='Swish'). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_sizes=(5, 9, 13), - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - init_cfg=None): - super().__init__(init_cfg) - mid_channels = in_channels // 2 - self.conv1 = ConvModule( - in_channels, - mid_channels, - 1, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.poolings = nn.ModuleList([ - nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) - for ks in kernel_sizes - ]) - conv2_channels = mid_channels * (len(kernel_sizes) + 1) - self.conv2 = ConvModule( - conv2_channels, - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - def forward(self, x): - x = self.conv1(x) - x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1) - x = self.conv2(x) - return x - - -@BACKBONES.register_module() -class CSPDarknet(BaseModule): - """CSP-Darknet backbone used in YOLOv5 and YOLOX. - - Args: - arch (str): Architecture of CSP-Darknet, from {P5, P6}. - Default: P5. - deepen_factor (float): Depth multiplier, multiply number of - blocks in CSP layer by this amount. Default: 1.0. - widen_factor (float): Width multiplier, multiply number of - channels in each layer by this amount. Default: 1.0. - out_indices (Sequence[int]): Output from which stages. - Default: (2, 3, 4). - frozen_stages (int): Stages to be frozen (stop grad and set eval - mode). -1 means not freezing any parameters. Default: -1. - use_depthwise (bool): Whether to use depthwise separable convolution. - Default: False. - arch_ovewrite(list): Overwrite default arch settings. Default: None. - spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP - layers. Default: (5, 9, 13). - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - Example: - >>> from mmdet.models import CSPDarknet - >>> import torch - >>> self = CSPDarknet(depth=53) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 416, 416) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - ... - (1, 256, 52, 52) - (1, 512, 26, 26) - (1, 1024, 13, 13) - """ - # From left to right: - # in_channels, out_channels, num_blocks, add_identity, use_spp - arch_settings = { - 'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False], - [256, 512, 9, True, False], [512, 1024, 3, False, True]], - 'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False], - [256, 512, 9, True, False], [512, 768, 3, True, False], - [768, 1024, 3, False, True]] - } - - def __init__(self, - arch='P5', - deepen_factor=1.0, - widen_factor=1.0, - out_indices=(2, 3, 4), - frozen_stages=-1, - use_depthwise=False, - arch_ovewrite=None, - spp_kernal_sizes=(5, 9, 13), - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - norm_eval=False, - init_cfg=dict( - type='Kaiming', - layer='Conv2d', - a=math.sqrt(5), - distribution='uniform', - mode='fan_in', - nonlinearity='leaky_relu')): - super().__init__(init_cfg) - arch_setting = self.arch_settings[arch] - if arch_ovewrite: - arch_setting = arch_ovewrite - assert set(out_indices).issubset( - i for i in range(len(arch_setting) + 1)) - if frozen_stages not in range(-1, len(arch_setting) + 1): - raise ValueError('frozen_stages must be in range(-1, ' - 'len(arch_setting) + 1). But received ' - f'{frozen_stages}') - - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.use_depthwise = use_depthwise - self.norm_eval = norm_eval - conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule - - self.stem = Focus( - 3, - int(arch_setting[0][0] * widen_factor), - kernel_size=3, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.layers = ['stem'] - - for i, (in_channels, out_channels, num_blocks, add_identity, - use_spp) in enumerate(arch_setting): - in_channels = int(in_channels * widen_factor) - out_channels = int(out_channels * widen_factor) - num_blocks = max(round(num_blocks * deepen_factor), 1) - stage = [] - conv_layer = conv( - in_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - stage.append(conv_layer) - if use_spp: - spp = SPPBottleneck( - out_channels, - out_channels, - kernel_sizes=spp_kernal_sizes, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - stage.append(spp) - csp_layer = CSPLayer( - out_channels, - out_channels, - num_blocks=num_blocks, - add_identity=add_identity, - use_depthwise=use_depthwise, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - stage.append(csp_layer) - self.add_module(f'stage{i + 1}', nn.Sequential(*stage)) - self.layers.append(f'stage{i + 1}') - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for i in range(self.frozen_stages + 1): - m = getattr(self, self.layers[i]) - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super(CSPDarknet, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() - - def forward(self, x): - outs = [] - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - if i in self.out_indices: - outs.append(x) - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/darknet.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/darknet.py deleted file mode 100644 index adfb1159b507d9fdc5bc6af20fe64411a8b55f92..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/darknet.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Copyright (c) 2019 Western Digital Corporation or its affiliates. - -import warnings - -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES - - -class ResBlock(BaseModule): - """The basic residual block used in Darknet. Each ResBlock consists of two - ConvModules and the input is added to the final output. Each ConvModule is - composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer - has half of the number of the filters as much as the second convLayer. The - first convLayer has filter size of 1x1 and the second one has the filter - size of 3x3. - - Args: - in_channels (int): The input channels. Must be even. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - init_cfg=None): - super(ResBlock, self).__init__(init_cfg) - assert in_channels % 2 == 0 # ensure the in_channels is even - half_in_channels = in_channels // 2 - - # shortcut - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - - self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) - self.conv2 = ConvModule( - half_in_channels, in_channels, 3, padding=1, **cfg) - - def forward(self, x): - residual = x - out = self.conv1(x) - out = self.conv2(out) - out = out + residual - - return out - - -@BACKBONES.register_module() -class Darknet(BaseModule): - """Darknet backbone. - - Args: - depth (int): Depth of Darknet. Currently only support 53. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Example: - >>> from mmdet.models import Darknet - >>> import torch - >>> self = Darknet(depth=53) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 416, 416) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - ... - (1, 256, 52, 52) - (1, 512, 26, 26) - (1, 1024, 13, 13) - """ - - # Dict(depth: (layers, channels)) - arch_settings = { - 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), - (512, 1024))) - } - - def __init__(self, - depth=53, - out_indices=(3, 4, 5), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - norm_eval=True, - pretrained=None, - init_cfg=None): - super(Darknet, self).__init__(init_cfg) - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for darknet') - - self.depth = depth - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.layers, self.channels = self.arch_settings[depth] - - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - - self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg) - - self.cr_blocks = ['conv1'] - for i, n_layers in enumerate(self.layers): - layer_name = f'conv_res_block{i + 1}' - in_c, out_c = self.channels[i] - self.add_module( - layer_name, - self.make_conv_res_block(in_c, out_c, n_layers, **cfg)) - self.cr_blocks.append(layer_name) - - self.norm_eval = norm_eval - - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - if init_cfg is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ] - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - outs = [] - for i, layer_name in enumerate(self.cr_blocks): - cr_block = getattr(self, layer_name) - x = cr_block(x) - if i in self.out_indices: - outs.append(x) - - return tuple(outs) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for i in range(self.frozen_stages): - m = getattr(self, self.cr_blocks[i]) - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super(Darknet, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() - - @staticmethod - def make_conv_res_block(in_channels, - out_channels, - res_repeat, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', - negative_slope=0.1)): - """In Darknet backbone, ConvLayer is usually followed by ResBlock. This - function will make that. The Conv layers always have 3x3 filters with - stride=2. The number of the filters in Conv layer is the same as the - out channels of the ResBlock. - - Args: - in_channels (int): The number of input channels. - out_channels (int): The number of output channels. - res_repeat (int): The number of ResBlocks. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - """ - - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - - model = nn.Sequential() - model.add_module( - 'conv', - ConvModule( - in_channels, out_channels, 3, stride=2, padding=1, **cfg)) - for idx in range(res_repeat): - model.add_module('res{}'.format(idx), - ResBlock(out_channels, **cfg)) - return model diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/detectors_resnet.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/detectors_resnet.py deleted file mode 100644 index a3c0d40b4284c1c2d5006df28620d230d93646cd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/detectors_resnet.py +++ /dev/null @@ -1,353 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, - kaiming_init) -from mmcv.runner import Sequential, load_checkpoint -from torch.nn.modules.batchnorm import _BatchNorm - -from mmdet.utils import get_root_logger -from ..builder import BACKBONES -from .resnet import BasicBlock -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNet - - -class Bottleneck(_Bottleneck): - r"""Bottleneck for the ResNet backbone in `DetectoRS - `_. - - This bottleneck allows the users to specify whether to use - SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid). - - Args: - inplanes (int): The number of input channels. - planes (int): The number of output channels before expansion. - rfp_inplanes (int, optional): The number of channels from RFP. - Default: None. If specified, an additional conv layer will be - added for ``rfp_feat``. Otherwise, the structure is the same as - base class. - sac (dict, optional): Dictionary to construct SAC. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - expansion = 4 - - def __init__(self, - inplanes, - planes, - rfp_inplanes=None, - sac=None, - init_cfg=None, - **kwargs): - super(Bottleneck, self).__init__( - inplanes, planes, init_cfg=init_cfg, **kwargs) - - assert sac is None or isinstance(sac, dict) - self.sac = sac - self.with_sac = sac is not None - if self.with_sac: - self.conv2 = build_conv_layer( - self.sac, - planes, - planes, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - bias=False) - - self.rfp_inplanes = rfp_inplanes - if self.rfp_inplanes: - self.rfp_conv = build_conv_layer( - None, - self.rfp_inplanes, - planes * self.expansion, - 1, - stride=1, - bias=True) - if init_cfg is None: - self.init_cfg = dict( - type='Constant', val=0, override=dict(name='rfp_conv')) - - def rfp_forward(self, x, rfp_feat): - """The forward function that also takes the RFP features as input.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - out = self.conv2(out) - out = self.norm2(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - if self.rfp_inplanes: - rfp_feat = self.rfp_conv(rfp_feat) - out = out + rfp_feat - - out = self.relu(out) - - return out - - -class ResLayer(Sequential): - """ResLayer to build ResNet style backbone for RPF in detectoRS. - - The difference between this module and base class is that we pass - ``rfp_inplanes`` to the first block. - - Args: - block (nn.Module): block used to build ResLayer. - inplanes (int): inplanes of block. - planes (int): planes of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default: True - rfp_inplanes (int, optional): The number of channels from RFP. - Default: None. If specified, an additional conv layer will be - added for ``rfp_feat``. Otherwise, the structure is the same as - base class. - """ - - def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - downsample_first=True, - rfp_inplanes=None, - **kwargs): - self.block = block - assert downsample_first, f'downsample_first={downsample_first} is ' \ - 'not supported in DetectoRS' - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = [] - conv_stride = stride - if avg_down and stride != 1: - conv_stride = 1 - downsample.append( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False)) - downsample.extend([ - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - rfp_inplanes=rfp_inplanes, - **kwargs)) - inplanes = planes * block.expansion - for _ in range(1, num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - - super(ResLayer, self).__init__(*layers) - - -@BACKBONES.register_module() -class DetectoRS_ResNet(ResNet): - """ResNet backbone for DetectoRS. - - Args: - sac (dict, optional): Dictionary to construct SAC (Switchable Atrous - Convolution). Default: None. - stage_with_sac (list): Which stage to use sac. Default: (False, False, - False, False). - rfp_inplanes (int, optional): The number of channels from RFP. - Default: None. If specified, an additional conv layer will be - added for ``rfp_feat``. Otherwise, the structure is the same as - base class. - output_img (bool): If ``True``, the input image will be inserted into - the starting position of output. Default: False. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, - sac=None, - stage_with_sac=(False, False, False, False), - rfp_inplanes=None, - output_img=False, - pretrained=None, - init_cfg=None, - **kwargs): - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - self.pretrained = pretrained - if init_cfg is not None: - assert isinstance(init_cfg, dict), \ - f'init_cfg must be a dict, but got {type(init_cfg)}' - if 'type' in init_cfg: - assert init_cfg.get('type') == 'Pretrained', \ - 'Only can initialize module by loading a pretrained model' - else: - raise KeyError('`init_cfg` must contain the key "type"') - self.pretrained = init_cfg.get('checkpoint') - self.sac = sac - self.stage_with_sac = stage_with_sac - self.rfp_inplanes = rfp_inplanes - self.output_img = output_img - super(DetectoRS_ResNet, self).__init__(**kwargs) - - self.inplanes = self.stem_channels - self.res_layers = [] - for i, num_blocks in enumerate(self.stage_blocks): - stride = self.strides[i] - dilation = self.dilations[i] - dcn = self.dcn if self.stage_with_dcn[i] else None - sac = self.sac if self.stage_with_sac[i] else None - if self.plugins is not None: - stage_plugins = self.make_stage_plugins(self.plugins, i) - else: - stage_plugins = None - planes = self.base_channels * 2**i - res_layer = self.make_res_layer( - block=self.block, - inplanes=self.inplanes, - planes=planes, - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=self.with_cp, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - dcn=dcn, - sac=sac, - rfp_inplanes=rfp_inplanes if i > 0 else None, - plugins=stage_plugins) - self.inplanes = planes * self.block.expansion - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - # In order to be properly initialized by RFP - def init_weights(self): - # Calling this method will cause parameter initialization exception - # super(DetectoRS_ResNet, self).init_weights() - - if isinstance(self.pretrained, str): - logger = get_root_logger() - load_checkpoint(self, self.pretrained, strict=False, logger=logger) - elif self.pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - - if self.dcn is not None: - for m in self.modules(): - if isinstance(m, Bottleneck) and hasattr( - m.conv2, 'conv_offset'): - constant_init(m.conv2.conv_offset, 0) - - if self.zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - constant_init(m.norm3, 0) - elif isinstance(m, BasicBlock): - constant_init(m.norm2, 0) - else: - raise TypeError('pretrained must be a str or None') - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.""" - return ResLayer(**kwargs) - - def forward(self, x): - """Forward function.""" - outs = list(super(DetectoRS_ResNet, self).forward(x)) - if self.output_img: - outs.insert(0, x) - return tuple(outs) - - def rfp_forward(self, x, rfp_feats): - """Forward function for RFP.""" - if self.deep_stem: - x = self.stem(x) - else: - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - rfp_feat = rfp_feats[i] if i > 0 else None - for layer in res_layer: - x = layer.rfp_forward(x, rfp_feat) - if i in self.out_indices: - outs.append(x) - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/detectors_resnext.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/detectors_resnext.py deleted file mode 100644 index 5e8b20a0266a9d7e37ff1d39b3a160abef565c85..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/detectors_resnext.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from .detectors_resnet import Bottleneck as _Bottleneck -from .detectors_resnet import DetectoRS_ResNet - - -class Bottleneck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - **kwargs): - """Bottleneck block for ResNeXt. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, width, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - self.with_modulated_dcn = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if self.with_sac: - self.conv2 = build_conv_layer( - self.sac, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - elif not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - -@BACKBONES.register_module() -class DetectoRS_ResNeXt(DetectoRS_ResNet): - """ResNeXt backbone for DetectoRS. - - Args: - groups (int): The number of groups in ResNeXt. - base_width (int): The base width of ResNeXt. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, groups=1, base_width=4, **kwargs): - self.groups = groups - self.base_width = base_width - super(DetectoRS_ResNeXt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - return super().make_res_layer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/efficientnet.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/efficientnet.py deleted file mode 100644 index 7ee359567d91d0e42aa09dd2ad3be4ba006176c0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/efficientnet.py +++ /dev/null @@ -1,417 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import math -from functools import partial - -import torch -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn.bricks import ConvModule, DropPath -from mmcv.runner import BaseModule, Sequential - -from ..builder import BACKBONES -from ..utils import InvertedResidual, SELayer, make_divisible - - -class EdgeResidual(BaseModule): - """Edge Residual Block. - - Args: - in_channels (int): The input channels of this module. - out_channels (int): The output channels of this module. - mid_channels (int): The input channels of the second convolution. - kernel_size (int): The kernel size of the first convolution. - Defaults to 3. - stride (int): The stride of the first convolution. Defaults to 1. - se_cfg (dict, optional): Config dict for se layer. Defaults to None, - which means no se layer. - with_residual (bool): Use residual connection. Defaults to True. - conv_cfg (dict, optional): Config dict for convolution layer. - Defaults to None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Defaults to ``dict(type='BN')``. - act_cfg (dict): Config dict for activation layer. - Defaults to ``dict(type='ReLU')``. - drop_path_rate (float): stochastic depth rate. Defaults to 0. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Defaults to False. - init_cfg (dict | list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels, - kernel_size=3, - stride=1, - se_cfg=None, - with_residual=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - drop_path_rate=0., - with_cp=False, - init_cfg=None, - **kwargs): - super(EdgeResidual, self).__init__(init_cfg=init_cfg) - assert stride in [1, 2] - self.with_cp = with_cp - self.drop_path = DropPath( - drop_path_rate) if drop_path_rate > 0 else nn.Identity() - self.with_se = se_cfg is not None - self.with_residual = ( - stride == 1 and in_channels == out_channels and with_residual) - - if self.with_se: - assert isinstance(se_cfg, dict) - - self.conv1 = ConvModule( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=1, - padding=kernel_size // 2, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - if self.with_se: - self.se = SELayer(**se_cfg) - - self.conv2 = ConvModule( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=stride, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, x): - - def _inner_forward(x): - out = x - out = self.conv1(out) - - if self.with_se: - out = self.se(out) - - out = self.conv2(out) - - if self.with_residual: - return x + self.drop_path(out) - else: - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -def model_scaling(layer_setting, arch_setting): - """Scaling operation to the layer's parameters according to the - arch_setting.""" - # scale width - new_layer_setting = copy.deepcopy(layer_setting) - for layer_cfg in new_layer_setting: - for block_cfg in layer_cfg: - block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8) - - # scale depth - split_layer_setting = [new_layer_setting[0]] - for layer_cfg in new_layer_setting[1:-1]: - tmp_index = [0] - for i in range(len(layer_cfg) - 1): - if layer_cfg[i + 1][1] != layer_cfg[i][1]: - tmp_index.append(i + 1) - tmp_index.append(len(layer_cfg)) - for i in range(len(tmp_index) - 1): - split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i + - 1]]) - split_layer_setting.append(new_layer_setting[-1]) - - num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]] - new_layers = [ - int(math.ceil(arch_setting[1] * num)) for num in num_of_layers - ] - - merge_layer_setting = [split_layer_setting[0]] - for i, layer_cfg in enumerate(split_layer_setting[1:-1]): - if new_layers[i] <= num_of_layers[i]: - tmp_layer_cfg = layer_cfg[:new_layers[i]] - else: - tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * ( - new_layers[i] - num_of_layers[i]) - if tmp_layer_cfg[0][3] == 1 and i != 0: - merge_layer_setting[-1] += tmp_layer_cfg.copy() - else: - merge_layer_setting.append(tmp_layer_cfg.copy()) - merge_layer_setting.append(split_layer_setting[-1]) - - return merge_layer_setting - - -@BACKBONES.register_module() -class EfficientNet(BaseModule): - """EfficientNet backbone. - - Args: - arch (str): Architecture of efficientnet. Defaults to b0. - out_indices (Sequence[int]): Output from which stages. - Defaults to (6, ). - frozen_stages (int): Stages to be frozen (all param fixed). - Defaults to 0, which means not freezing any parameters. - conv_cfg (dict): Config dict for convolution layer. - Defaults to None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Defaults to dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Defaults to dict(type='Swish'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Defaults to False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Defaults to False. - """ - - # Parameters to build layers. - # 'b' represents the architecture of normal EfficientNet family includes - # 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'. - # 'e' represents the architecture of EfficientNet-EdgeTPU including 'es', - # 'em', 'el'. - # 6 parameters are needed to construct a layer, From left to right: - # - kernel_size: The kernel size of the block - # - out_channel: The number of out_channels of the block - # - se_ratio: The sequeeze ratio of SELayer. - # - stride: The stride of the block - # - expand_ratio: The expand_ratio of the mid_channels - # - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual - layer_settings = { - 'b': [[[3, 32, 0, 2, 0, -1]], - [[3, 16, 4, 1, 1, 0]], - [[3, 24, 4, 2, 6, 0], - [3, 24, 4, 1, 6, 0]], - [[5, 40, 4, 2, 6, 0], - [5, 40, 4, 1, 6, 0]], - [[3, 80, 4, 2, 6, 0], - [3, 80, 4, 1, 6, 0], - [3, 80, 4, 1, 6, 0], - [5, 112, 4, 1, 6, 0], - [5, 112, 4, 1, 6, 0], - [5, 112, 4, 1, 6, 0]], - [[5, 192, 4, 2, 6, 0], - [5, 192, 4, 1, 6, 0], - [5, 192, 4, 1, 6, 0], - [5, 192, 4, 1, 6, 0], - [3, 320, 4, 1, 6, 0]], - [[1, 1280, 0, 1, 0, -1]] - ], - 'e': [[[3, 32, 0, 2, 0, -1]], - [[3, 24, 0, 1, 3, 1]], - [[3, 32, 0, 2, 8, 1], - [3, 32, 0, 1, 8, 1]], - [[3, 48, 0, 2, 8, 1], - [3, 48, 0, 1, 8, 1], - [3, 48, 0, 1, 8, 1], - [3, 48, 0, 1, 8, 1]], - [[5, 96, 0, 2, 8, 0], - [5, 96, 0, 1, 8, 0], - [5, 96, 0, 1, 8, 0], - [5, 96, 0, 1, 8, 0], - [5, 96, 0, 1, 8, 0], - [5, 144, 0, 1, 8, 0], - [5, 144, 0, 1, 8, 0], - [5, 144, 0, 1, 8, 0], - [5, 144, 0, 1, 8, 0]], - [[5, 192, 0, 2, 8, 0], - [5, 192, 0, 1, 8, 0]], - [[1, 1280, 0, 1, 0, -1]] - ] - } # yapf: disable - - # Parameters to build different kinds of architecture. - # From left to right: scaling factor for width, scaling factor for depth, - # resolution. - arch_settings = { - 'b0': (1.0, 1.0, 224), - 'b1': (1.0, 1.1, 240), - 'b2': (1.1, 1.2, 260), - 'b3': (1.2, 1.4, 300), - 'b4': (1.4, 1.8, 380), - 'b5': (1.6, 2.2, 456), - 'b6': (1.8, 2.6, 528), - 'b7': (2.0, 3.1, 600), - 'b8': (2.2, 3.6, 672), - 'es': (1.0, 1.0, 224), - 'em': (1.0, 1.1, 240), - 'el': (1.2, 1.4, 300) - } - - def __init__(self, - arch='b0', - drop_path_rate=0., - out_indices=(6, ), - frozen_stages=0, - conv_cfg=dict(type='Conv2dAdaptivePadding'), - norm_cfg=dict(type='BN', eps=1e-3), - act_cfg=dict(type='Swish'), - norm_eval=False, - with_cp=False, - init_cfg=[ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - layer=['_BatchNorm', 'GroupNorm'], - val=1) - ]): - super(EfficientNet, self).__init__(init_cfg) - assert arch in self.arch_settings, \ - f'"{arch}" is not one of the arch_settings ' \ - f'({", ".join(self.arch_settings.keys())})' - self.arch_setting = self.arch_settings[arch] - self.layer_setting = self.layer_settings[arch[:1]] - for index in out_indices: - if index not in range(0, len(self.layer_setting)): - raise ValueError('the item in out_indices must in ' - f'range(0, {len(self.layer_setting)}). ' - f'But received {index}') - - if frozen_stages not in range(len(self.layer_setting) + 1): - raise ValueError('frozen_stages must be in range(0, ' - f'{len(self.layer_setting) + 1}). ' - f'But received {frozen_stages}') - self.drop_path_rate = drop_path_rate - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.layer_setting = model_scaling(self.layer_setting, - self.arch_setting) - block_cfg_0 = self.layer_setting[0][0] - block_cfg_last = self.layer_setting[-1][0] - self.in_channels = make_divisible(block_cfg_0[1], 8) - self.out_channels = block_cfg_last[1] - self.layers = nn.ModuleList() - self.layers.append( - ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=block_cfg_0[0], - stride=block_cfg_0[3], - padding=block_cfg_0[0] // 2, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - self.make_layer() - # Avoid building unused layers in mmdetection. - if len(self.layers) < max(self.out_indices) + 1: - self.layers.append( - ConvModule( - in_channels=self.in_channels, - out_channels=self.out_channels, - kernel_size=block_cfg_last[0], - stride=block_cfg_last[3], - padding=block_cfg_last[0] // 2, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - - def make_layer(self): - # Without the first and the final conv block. - layer_setting = self.layer_setting[1:-1] - - total_num_blocks = sum([len(x) for x in layer_setting]) - block_idx = 0 - dpr = [ - x.item() - for x in torch.linspace(0, self.drop_path_rate, total_num_blocks) - ] # stochastic depth decay rule - - for i, layer_cfg in enumerate(layer_setting): - # Avoid building unused layers in mmdetection. - if i > max(self.out_indices) - 1: - break - layer = [] - for i, block_cfg in enumerate(layer_cfg): - (kernel_size, out_channels, se_ratio, stride, expand_ratio, - block_type) = block_cfg - - mid_channels = int(self.in_channels * expand_ratio) - out_channels = make_divisible(out_channels, 8) - if se_ratio <= 0: - se_cfg = None - else: - # In mmdetection, the `divisor` is deleted to align - # the logic of SELayer with mmcls. - se_cfg = dict( - channels=mid_channels, - ratio=expand_ratio * se_ratio, - act_cfg=(self.act_cfg, dict(type='Sigmoid'))) - if block_type == 1: # edge tpu - if i > 0 and expand_ratio == 3: - with_residual = False - expand_ratio = 4 - else: - with_residual = True - mid_channels = int(self.in_channels * expand_ratio) - if se_cfg is not None: - # In mmdetection, the `divisor` is deleted to align - # the logic of SELayer with mmcls. - se_cfg = dict( - channels=mid_channels, - ratio=se_ratio * expand_ratio, - act_cfg=(self.act_cfg, dict(type='Sigmoid'))) - block = partial(EdgeResidual, with_residual=with_residual) - else: - block = InvertedResidual - layer.append( - block( - in_channels=self.in_channels, - out_channels=out_channels, - mid_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - se_cfg=se_cfg, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - drop_path_rate=dpr[block_idx], - with_cp=self.with_cp, - # In mmdetection, `with_expand_conv` is set to align - # the logic of InvertedResidual with mmcls. - with_expand_conv=(mid_channels != self.in_channels))) - self.in_channels = out_channels - block_idx += 1 - self.layers.append(Sequential(*layer)) - - def forward(self, x): - outs = [] - for i, layer in enumerate(self.layers): - x = layer(x) - if i in self.out_indices: - outs.append(x) - - return tuple(outs) - - def _freeze_stages(self): - for i in range(self.frozen_stages): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super(EfficientNet, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/hourglass.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/hourglass.py deleted file mode 100644 index f0dfb434f8508d34d37a831230a8f794f0c354b4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/hourglass.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule - -from ..builder import BACKBONES -from ..utils import ResLayer -from .resnet import BasicBlock - - -class HourglassModule(BaseModule): - """Hourglass Module for HourglassNet backbone. - - Generate module recursively and use BasicBlock as the base unit. - - Args: - depth (int): Depth of current HourglassModule. - stage_channels (list[int]): Feature channels of sub-modules in current - and follow-up HourglassModule. - stage_blocks (list[int]): Number of sub-modules stacked in current and - follow-up HourglassModule. - norm_cfg (dict): Dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - upsample_cfg (dict, optional): Config dict for interpolate layer. - Default: `dict(mode='nearest')` - """ - - def __init__(self, - depth, - stage_channels, - stage_blocks, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=None, - upsample_cfg=dict(mode='nearest')): - super(HourglassModule, self).__init__(init_cfg) - - self.depth = depth - - cur_block = stage_blocks[0] - next_block = stage_blocks[1] - - cur_channel = stage_channels[0] - next_channel = stage_channels[1] - - self.up1 = ResLayer( - BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) - - self.low1 = ResLayer( - BasicBlock, - cur_channel, - next_channel, - cur_block, - stride=2, - norm_cfg=norm_cfg) - - if self.depth > 1: - self.low2 = HourglassModule(depth - 1, stage_channels[1:], - stage_blocks[1:]) - else: - self.low2 = ResLayer( - BasicBlock, - next_channel, - next_channel, - next_block, - norm_cfg=norm_cfg) - - self.low3 = ResLayer( - BasicBlock, - next_channel, - cur_channel, - cur_block, - norm_cfg=norm_cfg, - downsample_first=False) - - self.up2 = F.interpolate - self.upsample_cfg = upsample_cfg - - def forward(self, x): - """Forward function.""" - up1 = self.up1(x) - low1 = self.low1(x) - low2 = self.low2(low1) - low3 = self.low3(low2) - # Fixing `scale factor` (e.g. 2) is common for upsampling, but - # in some cases the spatial size is mismatched and error will arise. - if 'scale_factor' in self.upsample_cfg: - up2 = self.up2(low3, **self.upsample_cfg) - else: - shape = up1.shape[2:] - up2 = self.up2(low3, size=shape, **self.upsample_cfg) - return up1 + up2 - - -@BACKBONES.register_module() -class HourglassNet(BaseModule): - """HourglassNet backbone. - - Stacked Hourglass Networks for Human Pose Estimation. - More details can be found in the `paper - `_ . - - Args: - downsample_times (int): Downsample times in a HourglassModule. - num_stacks (int): Number of HourglassModule modules stacked, - 1 for Hourglass-52, 2 for Hourglass-104. - stage_channels (list[int]): Feature channel of each sub-module in a - HourglassModule. - stage_blocks (list[int]): Number of sub-modules stacked in a - HourglassModule. - feat_channel (int): Feature channel of conv after a HourglassModule. - norm_cfg (dict): Dictionary to construct and config norm layer. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Example: - >>> from mmdet.models import HourglassNet - >>> import torch - >>> self = HourglassNet() - >>> self.eval() - >>> inputs = torch.rand(1, 3, 511, 511) - >>> level_outputs = self.forward(inputs) - >>> for level_output in level_outputs: - ... print(tuple(level_output.shape)) - (1, 256, 128, 128) - (1, 256, 128, 128) - """ - - def __init__(self, - downsample_times=5, - num_stacks=2, - stage_channels=(256, 256, 384, 384, 384, 512), - stage_blocks=(2, 2, 2, 2, 2, 4), - feat_channel=256, - norm_cfg=dict(type='BN', requires_grad=True), - pretrained=None, - init_cfg=None): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super(HourglassNet, self).__init__(init_cfg) - - self.num_stacks = num_stacks - assert self.num_stacks >= 1 - assert len(stage_channels) == len(stage_blocks) - assert len(stage_channels) > downsample_times - - cur_channel = stage_channels[0] - - self.stem = nn.Sequential( - ConvModule( - 3, cur_channel // 2, 7, padding=3, stride=2, - norm_cfg=norm_cfg), - ResLayer( - BasicBlock, - cur_channel // 2, - cur_channel, - 1, - stride=2, - norm_cfg=norm_cfg)) - - self.hourglass_modules = nn.ModuleList([ - HourglassModule(downsample_times, stage_channels, stage_blocks) - for _ in range(num_stacks) - ]) - - self.inters = ResLayer( - BasicBlock, - cur_channel, - cur_channel, - num_stacks - 1, - norm_cfg=norm_cfg) - - self.conv1x1s = nn.ModuleList([ - ConvModule( - cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) - for _ in range(num_stacks - 1) - ]) - - self.out_convs = nn.ModuleList([ - ConvModule( - cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) - for _ in range(num_stacks) - ]) - - self.remap_convs = nn.ModuleList([ - ConvModule( - feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) - for _ in range(num_stacks - 1) - ]) - - self.relu = nn.ReLU(inplace=True) - - def init_weights(self): - """Init module weights.""" - # Training Centripetal Model needs to reset parameters for Conv2d - super(HourglassNet, self).init_weights() - for m in self.modules(): - if isinstance(m, nn.Conv2d): - m.reset_parameters() - - def forward(self, x): - """Forward function.""" - inter_feat = self.stem(x) - out_feats = [] - - for ind in range(self.num_stacks): - single_hourglass = self.hourglass_modules[ind] - out_conv = self.out_convs[ind] - - hourglass_feat = single_hourglass(inter_feat) - out_feat = out_conv(hourglass_feat) - out_feats.append(out_feat) - - if ind < self.num_stacks - 1: - inter_feat = self.conv1x1s[ind]( - inter_feat) + self.remap_convs[ind]( - out_feat) - inter_feat = self.inters[ind](self.relu(inter_feat)) - - return out_feats diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/hrnet.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/hrnet.py deleted file mode 100644 index 06c210a6d422ccc0f55c96dc6c29be052af5494f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/hrnet.py +++ /dev/null @@ -1,589 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcv.runner import BaseModule, ModuleList, Sequential -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES -from .resnet import BasicBlock, Bottleneck - - -class HRModule(BaseModule): - """High-Resolution Module for HRNet. - - In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange - is in this module. - """ - - def __init__(self, - num_branches, - blocks, - num_blocks, - in_channels, - num_channels, - multiscale_output=True, - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - block_init_cfg=None, - init_cfg=None): - super(HRModule, self).__init__(init_cfg) - self.block_init_cfg = block_init_cfg - self._check_branches(num_branches, num_blocks, in_channels, - num_channels) - - self.in_channels = in_channels - self.num_branches = num_branches - - self.multiscale_output = multiscale_output - self.norm_cfg = norm_cfg - self.conv_cfg = conv_cfg - self.with_cp = with_cp - self.branches = self._make_branches(num_branches, blocks, num_blocks, - num_channels) - self.fuse_layers = self._make_fuse_layers() - self.relu = nn.ReLU(inplace=False) - - def _check_branches(self, num_branches, num_blocks, in_channels, - num_channels): - if num_branches != len(num_blocks): - error_msg = f'NUM_BRANCHES({num_branches}) ' \ - f'!= NUM_BLOCKS({len(num_blocks)})' - raise ValueError(error_msg) - - if num_branches != len(num_channels): - error_msg = f'NUM_BRANCHES({num_branches}) ' \ - f'!= NUM_CHANNELS({len(num_channels)})' - raise ValueError(error_msg) - - if num_branches != len(in_channels): - error_msg = f'NUM_BRANCHES({num_branches}) ' \ - f'!= NUM_INCHANNELS({len(in_channels)})' - raise ValueError(error_msg) - - def _make_one_branch(self, - branch_index, - block, - num_blocks, - num_channels, - stride=1): - downsample = None - if stride != 1 or \ - self.in_channels[branch_index] != \ - num_channels[branch_index] * block.expansion: - downsample = nn.Sequential( - build_conv_layer( - self.conv_cfg, - self.in_channels[branch_index], - num_channels[branch_index] * block.expansion, - kernel_size=1, - stride=stride, - bias=False), - build_norm_layer(self.norm_cfg, num_channels[branch_index] * - block.expansion)[1]) - - layers = [] - layers.append( - block( - self.in_channels[branch_index], - num_channels[branch_index], - stride, - downsample=downsample, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg, - init_cfg=self.block_init_cfg)) - self.in_channels[branch_index] = \ - num_channels[branch_index] * block.expansion - for i in range(1, num_blocks[branch_index]): - layers.append( - block( - self.in_channels[branch_index], - num_channels[branch_index], - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg, - init_cfg=self.block_init_cfg)) - - return Sequential(*layers) - - def _make_branches(self, num_branches, block, num_blocks, num_channels): - branches = [] - - for i in range(num_branches): - branches.append( - self._make_one_branch(i, block, num_blocks, num_channels)) - - return ModuleList(branches) - - def _make_fuse_layers(self): - if self.num_branches == 1: - return None - - num_branches = self.num_branches - in_channels = self.in_channels - fuse_layers = [] - num_out_branches = num_branches if self.multiscale_output else 1 - for i in range(num_out_branches): - fuse_layer = [] - for j in range(num_branches): - if j > i: - fuse_layer.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[i], - kernel_size=1, - stride=1, - padding=0, - bias=False), - build_norm_layer(self.norm_cfg, in_channels[i])[1], - nn.Upsample( - scale_factor=2**(j - i), mode='nearest'))) - elif j == i: - fuse_layer.append(None) - else: - conv_downsamples = [] - for k in range(i - j): - if k == i - j - 1: - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[i], - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[i])[1])) - else: - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[j], - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[j])[1], - nn.ReLU(inplace=False))) - fuse_layer.append(nn.Sequential(*conv_downsamples)) - fuse_layers.append(nn.ModuleList(fuse_layer)) - - return nn.ModuleList(fuse_layers) - - def forward(self, x): - """Forward function.""" - if self.num_branches == 1: - return [self.branches[0](x[0])] - - for i in range(self.num_branches): - x[i] = self.branches[i](x[i]) - - x_fuse = [] - for i in range(len(self.fuse_layers)): - y = 0 - for j in range(self.num_branches): - if i == j: - y += x[j] - else: - y += self.fuse_layers[i][j](x[j]) - x_fuse.append(self.relu(y)) - return x_fuse - - -@BACKBONES.register_module() -class HRNet(BaseModule): - """HRNet backbone. - - `High-Resolution Representations for Labeling Pixels and Regions - arXiv: `_. - - Args: - extra (dict): Detailed configuration for each stage of HRNet. - There must be 4 stages, the configuration for each stage must have - 5 keys: - - - num_modules(int): The number of HRModule in this stage. - - num_branches(int): The number of branches in the HRModule. - - block(str): The type of convolution block. - - num_blocks(tuple): The number of blocks in each branch. - The length must be equal to num_branches. - - num_channels(tuple): The number of channels in each branch. - The length must be equal to num_branches. - in_channels (int): Number of input image channels. Default: 3. - conv_cfg (dict): Dictionary to construct and config conv layer. - norm_cfg (dict): Dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: True. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. Default: False. - multiscale_output (bool): Whether to output multi-level features - produced by multiple branches. If False, only the first level - feature will be output. Default: True. - pretrained (str, optional): Model pretrained path. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - - Example: - >>> from mmdet.models import HRNet - >>> import torch - >>> extra = dict( - >>> stage1=dict( - >>> num_modules=1, - >>> num_branches=1, - >>> block='BOTTLENECK', - >>> num_blocks=(4, ), - >>> num_channels=(64, )), - >>> stage2=dict( - >>> num_modules=1, - >>> num_branches=2, - >>> block='BASIC', - >>> num_blocks=(4, 4), - >>> num_channels=(32, 64)), - >>> stage3=dict( - >>> num_modules=4, - >>> num_branches=3, - >>> block='BASIC', - >>> num_blocks=(4, 4, 4), - >>> num_channels=(32, 64, 128)), - >>> stage4=dict( - >>> num_modules=3, - >>> num_branches=4, - >>> block='BASIC', - >>> num_blocks=(4, 4, 4, 4), - >>> num_channels=(32, 64, 128, 256))) - >>> self = HRNet(extra, in_channels=1) - >>> self.eval() - >>> inputs = torch.rand(1, 1, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 32, 8, 8) - (1, 64, 4, 4) - (1, 128, 2, 2) - (1, 256, 1, 1) - """ - - blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} - - def __init__(self, - extra, - in_channels=3, - conv_cfg=None, - norm_cfg=dict(type='BN'), - norm_eval=True, - with_cp=False, - zero_init_residual=False, - multiscale_output=True, - pretrained=None, - init_cfg=None): - super(HRNet, self).__init__(init_cfg) - - self.pretrained = pretrained - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - if init_cfg is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ] - else: - raise TypeError('pretrained must be a str or None') - - # Assert configurations of 4 stages are in extra - assert 'stage1' in extra and 'stage2' in extra \ - and 'stage3' in extra and 'stage4' in extra - # Assert whether the length of `num_blocks` and `num_channels` are - # equal to `num_branches` - for i in range(4): - cfg = extra[f'stage{i + 1}'] - assert len(cfg['num_blocks']) == cfg['num_branches'] and \ - len(cfg['num_channels']) == cfg['num_branches'] - - self.extra = extra - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - self.zero_init_residual = zero_init_residual - - # stem net - self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) - self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) - - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - 64, - kernel_size=3, - stride=2, - padding=1, - bias=False) - - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - self.conv_cfg, - 64, - 64, - kernel_size=3, - stride=2, - padding=1, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.relu = nn.ReLU(inplace=True) - - # stage 1 - self.stage1_cfg = self.extra['stage1'] - num_channels = self.stage1_cfg['num_channels'][0] - block_type = self.stage1_cfg['block'] - num_blocks = self.stage1_cfg['num_blocks'][0] - - block = self.blocks_dict[block_type] - stage1_out_channels = num_channels * block.expansion - self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) - - # stage 2 - self.stage2_cfg = self.extra['stage2'] - num_channels = self.stage2_cfg['num_channels'] - block_type = self.stage2_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [channel * block.expansion for channel in num_channels] - self.transition1 = self._make_transition_layer([stage1_out_channels], - num_channels) - self.stage2, pre_stage_channels = self._make_stage( - self.stage2_cfg, num_channels) - - # stage 3 - self.stage3_cfg = self.extra['stage3'] - num_channels = self.stage3_cfg['num_channels'] - block_type = self.stage3_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [channel * block.expansion for channel in num_channels] - self.transition2 = self._make_transition_layer(pre_stage_channels, - num_channels) - self.stage3, pre_stage_channels = self._make_stage( - self.stage3_cfg, num_channels) - - # stage 4 - self.stage4_cfg = self.extra['stage4'] - num_channels = self.stage4_cfg['num_channels'] - block_type = self.stage4_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [channel * block.expansion for channel in num_channels] - self.transition3 = self._make_transition_layer(pre_stage_channels, - num_channels) - self.stage4, pre_stage_channels = self._make_stage( - self.stage4_cfg, num_channels, multiscale_output=multiscale_output) - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: the normalization layer named "norm2" """ - return getattr(self, self.norm2_name) - - def _make_transition_layer(self, num_channels_pre_layer, - num_channels_cur_layer): - num_branches_cur = len(num_channels_cur_layer) - num_branches_pre = len(num_channels_pre_layer) - - transition_layers = [] - for i in range(num_branches_cur): - if i < num_branches_pre: - if num_channels_cur_layer[i] != num_channels_pre_layer[i]: - transition_layers.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - num_channels_pre_layer[i], - num_channels_cur_layer[i], - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - num_channels_cur_layer[i])[1], - nn.ReLU(inplace=True))) - else: - transition_layers.append(None) - else: - conv_downsamples = [] - for j in range(i + 1 - num_branches_pre): - in_channels = num_channels_pre_layer[-1] - out_channels = num_channels_cur_layer[i] \ - if j == i - num_branches_pre else in_channels - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels, - out_channels, - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, out_channels)[1], - nn.ReLU(inplace=True))) - transition_layers.append(nn.Sequential(*conv_downsamples)) - - return nn.ModuleList(transition_layers) - - def _make_layer(self, block, inplanes, planes, blocks, stride=1): - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - build_conv_layer( - self.conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False), - build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) - - layers = [] - block_init_cfg = None - if self.pretrained is None and not hasattr( - self, 'init_cfg') and self.zero_init_residual: - if block is BasicBlock: - block_init_cfg = dict( - type='Constant', val=0, override=dict(name='norm2')) - elif block is Bottleneck: - block_init_cfg = dict( - type='Constant', val=0, override=dict(name='norm3')) - layers.append( - block( - inplanes, - planes, - stride, - downsample=downsample, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg, - init_cfg=block_init_cfg, - )) - inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append( - block( - inplanes, - planes, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg, - init_cfg=block_init_cfg)) - - return Sequential(*layers) - - def _make_stage(self, layer_config, in_channels, multiscale_output=True): - num_modules = layer_config['num_modules'] - num_branches = layer_config['num_branches'] - num_blocks = layer_config['num_blocks'] - num_channels = layer_config['num_channels'] - block = self.blocks_dict[layer_config['block']] - - hr_modules = [] - block_init_cfg = None - if self.pretrained is None and not hasattr( - self, 'init_cfg') and self.zero_init_residual: - if block is BasicBlock: - block_init_cfg = dict( - type='Constant', val=0, override=dict(name='norm2')) - elif block is Bottleneck: - block_init_cfg = dict( - type='Constant', val=0, override=dict(name='norm3')) - - for i in range(num_modules): - # multi_scale_output is only used for the last module - if not multiscale_output and i == num_modules - 1: - reset_multiscale_output = False - else: - reset_multiscale_output = True - - hr_modules.append( - HRModule( - num_branches, - block, - num_blocks, - in_channels, - num_channels, - reset_multiscale_output, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg, - block_init_cfg=block_init_cfg)) - - return Sequential(*hr_modules), in_channels - - def forward(self, x): - """Forward function.""" - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.conv2(x) - x = self.norm2(x) - x = self.relu(x) - x = self.layer1(x) - - x_list = [] - for i in range(self.stage2_cfg['num_branches']): - if self.transition1[i] is not None: - x_list.append(self.transition1[i](x)) - else: - x_list.append(x) - y_list = self.stage2(x_list) - - x_list = [] - for i in range(self.stage3_cfg['num_branches']): - if self.transition2[i] is not None: - x_list.append(self.transition2[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage3(x_list) - - x_list = [] - for i in range(self.stage4_cfg['num_branches']): - if self.transition3[i] is not None: - x_list.append(self.transition3[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage4(x_list) - - return y_list - - def train(self, mode=True): - """Convert the model into training mode will keeping the normalization - layer freezed.""" - super(HRNet, self).train(mode) - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/mobilenet_v2.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/mobilenet_v2.py deleted file mode 100644 index 8c6fcfaaa4c550b3568343f6b9baf1512d41b4db..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/mobilenet_v2.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES -from ..utils import InvertedResidual, make_divisible - - -@BACKBONES.register_module() -class MobileNetV2(BaseModule): - """MobileNetV2 backbone. - - Args: - widen_factor (float): Width multiplier, multiply number of - channels in each layer by this amount. Default: 1.0. - out_indices (Sequence[int], optional): Output from which stages. - Default: (1, 2, 4, 7). - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - conv_cfg (dict, optional): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - # Parameters to build layers. 4 parameters are needed to construct a - # layer, from left to right: expand_ratio, channel, num_blocks, stride. - arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], - [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], - [6, 320, 1, 1]] - - def __init__(self, - widen_factor=1., - out_indices=(1, 2, 4, 7), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - norm_eval=False, - with_cp=False, - pretrained=None, - init_cfg=None): - super(MobileNetV2, self).__init__(init_cfg) - - self.pretrained = pretrained - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - if init_cfg is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ] - else: - raise TypeError('pretrained must be a str or None') - - self.widen_factor = widen_factor - self.out_indices = out_indices - if not set(out_indices).issubset(set(range(0, 8))): - raise ValueError('out_indices must be a subset of range' - f'(0, 8). But received {out_indices}') - - if frozen_stages not in range(-1, 8): - raise ValueError('frozen_stages must be in range(-1, 8). ' - f'But received {frozen_stages}') - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.in_channels = make_divisible(32 * widen_factor, 8) - - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.layers = [] - - for i, layer_cfg in enumerate(self.arch_settings): - expand_ratio, channel, num_blocks, stride = layer_cfg - out_channels = make_divisible(channel * widen_factor, 8) - inverted_res_layer = self.make_layer( - out_channels=out_channels, - num_blocks=num_blocks, - stride=stride, - expand_ratio=expand_ratio) - layer_name = f'layer{i + 1}' - self.add_module(layer_name, inverted_res_layer) - self.layers.append(layer_name) - - if widen_factor > 1.0: - self.out_channel = int(1280 * widen_factor) - else: - self.out_channel = 1280 - - layer = ConvModule( - in_channels=self.in_channels, - out_channels=self.out_channel, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.add_module('conv2', layer) - self.layers.append('conv2') - - def make_layer(self, out_channels, num_blocks, stride, expand_ratio): - """Stack InvertedResidual blocks to build a layer for MobileNetV2. - - Args: - out_channels (int): out_channels of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - expand_ratio (int): Expand the number of channels of the - hidden layer in InvertedResidual by this ratio. Default: 6. - """ - layers = [] - for i in range(num_blocks): - if i >= 1: - stride = 1 - layers.append( - InvertedResidual( - self.in_channels, - out_channels, - mid_channels=int(round(self.in_channels * expand_ratio)), - stride=stride, - with_expand_conv=expand_ratio != 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - with_cp=self.with_cp)) - self.in_channels = out_channels - - return nn.Sequential(*layers) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for i in range(1, self.frozen_stages + 1): - layer = getattr(self, f'layer{i}') - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def forward(self, x): - """Forward function.""" - x = self.conv1(x) - outs = [] - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - if i in self.out_indices: - outs.append(x) - return tuple(outs) - - def train(self, mode=True): - """Convert the model into training mode while keep normalization layer - frozen.""" - super(MobileNetV2, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/pvt.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/pvt.py deleted file mode 100644 index 8b7d5d5344a7968b95a088f3c7822840016a52db..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/pvt.py +++ /dev/null @@ -1,591 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -import warnings - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import (Conv2d, build_activation_layer, build_norm_layer, - constant_init, normal_init, trunc_normal_init) -from mmcv.cnn.bricks.drop import build_dropout -from mmcv.cnn.bricks.transformer import MultiheadAttention -from mmcv.cnn.utils.weight_init import trunc_normal_ -from mmcv.runner import (BaseModule, ModuleList, Sequential, _load_checkpoint, - load_state_dict) -from torch.nn.modules.utils import _pair as to_2tuple - -from ...utils import get_root_logger -from ..builder import BACKBONES -from ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw, pvt_convert - - -class MixFFN(BaseModule): - """An implementation of MixFFN of PVT. - - The differences between MixFFN & FFN: - 1. Use 1X1 Conv to replace Linear layer. - 2. Introduce 3X3 Depth-wise Conv to encode positional information. - - Args: - embed_dims (int): The feature dimension. Same as - `MultiheadAttention`. - feedforward_channels (int): The hidden dimension of FFNs. - act_cfg (dict, optional): The activation config for FFNs. - Default: dict(type='GELU'). - ffn_drop (float, optional): Probability of an element to be - zeroed in FFN. Default 0.0. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. - Default: None. - use_conv (bool): If True, add 3x3 DWConv between two Linear layers. - Defaults: False. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - """ - - def __init__(self, - embed_dims, - feedforward_channels, - act_cfg=dict(type='GELU'), - ffn_drop=0., - dropout_layer=None, - use_conv=False, - init_cfg=None): - super(MixFFN, self).__init__(init_cfg=init_cfg) - - self.embed_dims = embed_dims - self.feedforward_channels = feedforward_channels - self.act_cfg = act_cfg - activate = build_activation_layer(act_cfg) - - in_channels = embed_dims - fc1 = Conv2d( - in_channels=in_channels, - out_channels=feedforward_channels, - kernel_size=1, - stride=1, - bias=True) - if use_conv: - # 3x3 depth wise conv to provide positional encode information - dw_conv = Conv2d( - in_channels=feedforward_channels, - out_channels=feedforward_channels, - kernel_size=3, - stride=1, - padding=(3 - 1) // 2, - bias=True, - groups=feedforward_channels) - fc2 = Conv2d( - in_channels=feedforward_channels, - out_channels=in_channels, - kernel_size=1, - stride=1, - bias=True) - drop = nn.Dropout(ffn_drop) - layers = [fc1, activate, drop, fc2, drop] - if use_conv: - layers.insert(1, dw_conv) - self.layers = Sequential(*layers) - self.dropout_layer = build_dropout( - dropout_layer) if dropout_layer else torch.nn.Identity() - - def forward(self, x, hw_shape, identity=None): - out = nlc_to_nchw(x, hw_shape) - out = self.layers(out) - out = nchw_to_nlc(out) - if identity is None: - identity = x - return identity + self.dropout_layer(out) - - -class SpatialReductionAttention(MultiheadAttention): - """An implementation of Spatial Reduction Attention of PVT. - - This module is modified from MultiheadAttention which is a module from - mmcv.cnn.bricks.transformer. - - Args: - embed_dims (int): The embedding dimension. - num_heads (int): Parallel attention heads. - attn_drop (float): A Dropout layer on attn_output_weights. - Default: 0.0. - proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. - Default: 0.0. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. Default: None. - batch_first (bool): Key, Query and Value are shape of - (batch, n, embed_dim) - or (n, batch, embed_dim). Default: False. - qkv_bias (bool): enable bias for qkv if True. Default: True. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - sr_ratio (int): The ratio of spatial reduction of Spatial Reduction - Attention of PVT. Default: 1. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - attn_drop=0., - proj_drop=0., - dropout_layer=None, - batch_first=True, - qkv_bias=True, - norm_cfg=dict(type='LN'), - sr_ratio=1, - init_cfg=None): - super().__init__( - embed_dims, - num_heads, - attn_drop, - proj_drop, - batch_first=batch_first, - dropout_layer=dropout_layer, - bias=qkv_bias, - init_cfg=init_cfg) - - self.sr_ratio = sr_ratio - if sr_ratio > 1: - self.sr = Conv2d( - in_channels=embed_dims, - out_channels=embed_dims, - kernel_size=sr_ratio, - stride=sr_ratio) - # The ret[0] of build_norm_layer is norm name. - self.norm = build_norm_layer(norm_cfg, embed_dims)[1] - - # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa - from mmdet import digit_version, mmcv_version - if mmcv_version < digit_version('1.3.17'): - warnings.warn('The legacy version of forward function in' - 'SpatialReductionAttention is deprecated in' - 'mmcv>=1.3.17 and will no longer support in the' - 'future. Please upgrade your mmcv.') - self.forward = self.legacy_forward - - def forward(self, x, hw_shape, identity=None): - - x_q = x - if self.sr_ratio > 1: - x_kv = nlc_to_nchw(x, hw_shape) - x_kv = self.sr(x_kv) - x_kv = nchw_to_nlc(x_kv) - x_kv = self.norm(x_kv) - else: - x_kv = x - - if identity is None: - identity = x_q - - # Because the dataflow('key', 'query', 'value') of - # ``torch.nn.MultiheadAttention`` is (num_query, batch, - # embed_dims), We should adjust the shape of dataflow from - # batch_first (batch, num_query, embed_dims) to num_query_first - # (num_query ,batch, embed_dims), and recover ``attn_output`` - # from num_query_first to batch_first. - if self.batch_first: - x_q = x_q.transpose(0, 1) - x_kv = x_kv.transpose(0, 1) - - out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] - - if self.batch_first: - out = out.transpose(0, 1) - - return identity + self.dropout_layer(self.proj_drop(out)) - - def legacy_forward(self, x, hw_shape, identity=None): - """multi head attention forward in mmcv version < 1.3.17.""" - x_q = x - if self.sr_ratio > 1: - x_kv = nlc_to_nchw(x, hw_shape) - x_kv = self.sr(x_kv) - x_kv = nchw_to_nlc(x_kv) - x_kv = self.norm(x_kv) - else: - x_kv = x - - if identity is None: - identity = x_q - - out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] - - return identity + self.dropout_layer(self.proj_drop(out)) - - -class PVTEncoderLayer(BaseModule): - """Implements one encoder layer in PVT. - - Args: - embed_dims (int): The feature dimension. - num_heads (int): Parallel attention heads. - feedforward_channels (int): The hidden dimension for FFNs. - drop_rate (float): Probability of an element to be zeroed. - after the feed forward layer. Default: 0.0. - attn_drop_rate (float): The drop out rate for attention layer. - Default: 0.0. - drop_path_rate (float): stochastic depth rate. Default: 0.0. - qkv_bias (bool): enable bias for qkv if True. - Default: True. - act_cfg (dict): The activation config for FFNs. - Default: dict(type='GELU'). - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - sr_ratio (int): The ratio of spatial reduction of Spatial Reduction - Attention of PVT. Default: 1. - use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. - Default: False. - init_cfg (dict, optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - feedforward_channels, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - qkv_bias=True, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - sr_ratio=1, - use_conv_ffn=False, - init_cfg=None): - super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg) - - # The ret[0] of build_norm_layer is norm name. - self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] - - self.attn = SpatialReductionAttention( - embed_dims=embed_dims, - num_heads=num_heads, - attn_drop=attn_drop_rate, - proj_drop=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - qkv_bias=qkv_bias, - norm_cfg=norm_cfg, - sr_ratio=sr_ratio) - - # The ret[0] of build_norm_layer is norm name. - self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] - - self.ffn = MixFFN( - embed_dims=embed_dims, - feedforward_channels=feedforward_channels, - ffn_drop=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - use_conv=use_conv_ffn, - act_cfg=act_cfg) - - def forward(self, x, hw_shape): - x = self.attn(self.norm1(x), hw_shape, identity=x) - x = self.ffn(self.norm2(x), hw_shape, identity=x) - - return x - - -class AbsolutePositionEmbedding(BaseModule): - """An implementation of the absolute position embedding in PVT. - - Args: - pos_shape (int): The shape of the absolute position embedding. - pos_dim (int): The dimension of the absolute position embedding. - drop_rate (float): Probability of an element to be zeroed. - Default: 0.0. - """ - - def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None): - super().__init__(init_cfg=init_cfg) - - if isinstance(pos_shape, int): - pos_shape = to_2tuple(pos_shape) - elif isinstance(pos_shape, tuple): - if len(pos_shape) == 1: - pos_shape = to_2tuple(pos_shape[0]) - assert len(pos_shape) == 2, \ - f'The size of image should have length 1 or 2, ' \ - f'but got {len(pos_shape)}' - self.pos_shape = pos_shape - self.pos_dim = pos_dim - - self.pos_embed = nn.Parameter( - torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim)) - self.drop = nn.Dropout(p=drop_rate) - - def init_weights(self): - trunc_normal_(self.pos_embed, std=0.02) - - def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'): - """Resize pos_embed weights. - - Resize pos_embed using bilinear interpolate method. - - Args: - pos_embed (torch.Tensor): Position embedding weights. - input_shape (tuple): Tuple for (downsampled input image height, - downsampled input image width). - mode (str): Algorithm used for upsampling: - ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | - ``'trilinear'``. Default: ``'bilinear'``. - - Return: - torch.Tensor: The resized pos_embed of shape [B, L_new, C]. - """ - assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' - pos_h, pos_w = self.pos_shape - pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] - pos_embed_weight = pos_embed_weight.reshape( - 1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous() - pos_embed_weight = F.interpolate( - pos_embed_weight, size=input_shape, mode=mode) - pos_embed_weight = torch.flatten(pos_embed_weight, - 2).transpose(1, 2).contiguous() - pos_embed = pos_embed_weight - - return pos_embed - - def forward(self, x, hw_shape, mode='bilinear'): - pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode) - return self.drop(x + pos_embed) - - -@BACKBONES.register_module() -class PyramidVisionTransformer(BaseModule): - """Pyramid Vision Transformer (PVT) - - Implementation of `Pyramid Vision Transformer: A Versatile Backbone for - Dense Prediction without Convolutions - `_. - - Args: - pretrain_img_size (int | tuple[int]): The size of input image when - pretrain. Defaults: 224. - in_channels (int): Number of input channels. Default: 3. - embed_dims (int): Embedding dimension. Default: 64. - num_stags (int): The num of stages. Default: 4. - num_layers (Sequence[int]): The layer number of each transformer encode - layer. Default: [3, 4, 6, 3]. - num_heads (Sequence[int]): The attention heads of each transformer - encode layer. Default: [1, 2, 5, 8]. - patch_sizes (Sequence[int]): The patch_size of each patch embedding. - Default: [4, 2, 2, 2]. - strides (Sequence[int]): The stride of each patch embedding. - Default: [4, 2, 2, 2]. - paddings (Sequence[int]): The padding of each patch embedding. - Default: [0, 0, 0, 0]. - sr_ratios (Sequence[int]): The spatial reduction rate of each - transformer encode layer. Default: [8, 4, 2, 1]. - out_indices (Sequence[int] | int): Output from which stages. - Default: (0, 1, 2, 3). - mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the - embedding dim of each transformer encode layer. - Default: [8, 8, 4, 4]. - qkv_bias (bool): Enable bias for qkv if True. Default: True. - drop_rate (float): Probability of an element to be zeroed. - Default 0.0. - attn_drop_rate (float): The drop out rate for attention layer. - Default 0.0. - drop_path_rate (float): stochastic depth rate. Default 0.1. - use_abs_pos_embed (bool): If True, add absolute position embedding to - the patch embedding. Defaults: True. - use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. - Default: False. - act_cfg (dict): The activation config for FFNs. - Default: dict(type='GELU'). - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - pretrained (str, optional): model pretrained path. Default: None. - convert_weights (bool): The flag indicates whether the - pre-trained model is from the original repo. We may need - to convert some keys to make it compatible. - Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - pretrain_img_size=224, - in_channels=3, - embed_dims=64, - num_stages=4, - num_layers=[3, 4, 6, 3], - num_heads=[1, 2, 5, 8], - patch_sizes=[4, 2, 2, 2], - strides=[4, 2, 2, 2], - paddings=[0, 0, 0, 0], - sr_ratios=[8, 4, 2, 1], - out_indices=(0, 1, 2, 3), - mlp_ratios=[8, 8, 4, 4], - qkv_bias=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1, - use_abs_pos_embed=True, - norm_after_stage=False, - use_conv_ffn=False, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN', eps=1e-6), - pretrained=None, - convert_weights=True, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - - self.convert_weights = convert_weights - if isinstance(pretrain_img_size, int): - pretrain_img_size = to_2tuple(pretrain_img_size) - elif isinstance(pretrain_img_size, tuple): - if len(pretrain_img_size) == 1: - pretrain_img_size = to_2tuple(pretrain_img_size[0]) - assert len(pretrain_img_size) == 2, \ - f'The size of image should have length 1 or 2, ' \ - f'but got {len(pretrain_img_size)}' - - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be setting at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - self.init_cfg = init_cfg - else: - raise TypeError('pretrained must be a str or None') - - self.embed_dims = embed_dims - - self.num_stages = num_stages - self.num_layers = num_layers - self.num_heads = num_heads - self.patch_sizes = patch_sizes - self.strides = strides - self.sr_ratios = sr_ratios - assert num_stages == len(num_layers) == len(num_heads) \ - == len(patch_sizes) == len(strides) == len(sr_ratios) - - self.out_indices = out_indices - assert max(out_indices) < self.num_stages - self.pretrained = pretrained - - # transformer encoder - dpr = [ - x.item() - for x in torch.linspace(0, drop_path_rate, sum(num_layers)) - ] # stochastic num_layer decay rule - - cur = 0 - self.layers = ModuleList() - for i, num_layer in enumerate(num_layers): - embed_dims_i = embed_dims * num_heads[i] - patch_embed = PatchEmbed( - in_channels=in_channels, - embed_dims=embed_dims_i, - kernel_size=patch_sizes[i], - stride=strides[i], - padding=paddings[i], - bias=True, - norm_cfg=norm_cfg) - - layers = ModuleList() - if use_abs_pos_embed: - pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1]) - pos_embed = AbsolutePositionEmbedding( - pos_shape=pos_shape, - pos_dim=embed_dims_i, - drop_rate=drop_rate) - layers.append(pos_embed) - layers.extend([ - PVTEncoderLayer( - embed_dims=embed_dims_i, - num_heads=num_heads[i], - feedforward_channels=mlp_ratios[i] * embed_dims_i, - drop_rate=drop_rate, - attn_drop_rate=attn_drop_rate, - drop_path_rate=dpr[cur + idx], - qkv_bias=qkv_bias, - act_cfg=act_cfg, - norm_cfg=norm_cfg, - sr_ratio=sr_ratios[i], - use_conv_ffn=use_conv_ffn) for idx in range(num_layer) - ]) - in_channels = embed_dims_i - # The ret[0] of build_norm_layer is norm name. - if norm_after_stage: - norm = build_norm_layer(norm_cfg, embed_dims_i)[1] - else: - norm = nn.Identity() - self.layers.append(ModuleList([patch_embed, layers, norm])) - cur += num_layer - - def init_weights(self): - logger = get_root_logger() - if self.init_cfg is None: - logger.warn(f'No pre-trained weights for ' - f'{self.__class__.__name__}, ' - f'training start from scratch') - for m in self.modules(): - if isinstance(m, nn.Linear): - trunc_normal_init(m, std=.02, bias=0.) - elif isinstance(m, nn.LayerNorm): - constant_init(m, 1.0) - elif isinstance(m, nn.Conv2d): - fan_out = m.kernel_size[0] * m.kernel_size[ - 1] * m.out_channels - fan_out //= m.groups - normal_init(m, 0, math.sqrt(2.0 / fan_out)) - elif isinstance(m, AbsolutePositionEmbedding): - m.init_weights() - else: - assert 'checkpoint' in self.init_cfg, f'Only support ' \ - f'specify `Pretrained` in ' \ - f'`init_cfg` in ' \ - f'{self.__class__.__name__} ' - checkpoint = _load_checkpoint( - self.init_cfg.checkpoint, logger=logger, map_location='cpu') - logger.warn(f'Load pre-trained model for ' - f'{self.__class__.__name__} from original repo') - if 'state_dict' in checkpoint: - state_dict = checkpoint['state_dict'] - elif 'model' in checkpoint: - state_dict = checkpoint['model'] - else: - state_dict = checkpoint - if self.convert_weights: - # Because pvt backbones are not supported by mmcls, - # so we need to convert pre-trained weights to match this - # implementation. - state_dict = pvt_convert(state_dict) - load_state_dict(self, state_dict, strict=False, logger=logger) - - def forward(self, x): - outs = [] - - for i, layer in enumerate(self.layers): - x, hw_shape = layer[0](x) - - for block in layer[1]: - x = block(x, hw_shape) - x = layer[2](x) - x = nlc_to_nchw(x, hw_shape) - if i in self.out_indices: - outs.append(x) - - return outs - - -@BACKBONES.register_module() -class PyramidVisionTransformerV2(PyramidVisionTransformer): - """Implementation of `PVTv2: Improved Baselines with Pyramid Vision - Transformer `_.""" - - def __init__(self, **kwargs): - super(PyramidVisionTransformerV2, self).__init__( - patch_sizes=[7, 3, 3, 3], - paddings=[3, 1, 1, 1], - use_abs_pos_embed=False, - norm_after_stage=True, - use_conv_ffn=True, - **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/regnet.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/regnet.py deleted file mode 100644 index 63adc3c1deb3b48193c243eb4ec5178a0b62103b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/regnet.py +++ /dev/null @@ -1,356 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import numpy as np -import torch.nn as nn -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from .resnet import ResNet -from .resnext import Bottleneck - - -@BACKBONES.register_module() -class RegNet(ResNet): - """RegNet backbone. - - More details can be found in `paper `_ . - - Args: - arch (dict): The parameter of RegNets. - - - w0 (int): initial width - - wa (float): slope of width - - wm (float): quantization parameter to quantize the width - - depth (int): depth of the backbone - - group_w (int): width of group - - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. - strides (Sequence[int]): Strides of the first block of each stage. - base_channels (int): Base channels after stem layer. - in_channels (int): Number of input image channels. Default: 3. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - norm_cfg (dict): dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Example: - >>> from mmdet.models import RegNet - >>> import torch - >>> self = RegNet( - arch=dict( - w0=88, - wa=26.31, - wm=2.25, - group_w=48, - depth=25, - bot_mul=1.0)) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 96, 8, 8) - (1, 192, 4, 4) - (1, 432, 2, 2) - (1, 1008, 1, 1) - """ - arch_settings = { - 'regnetx_400mf': - dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), - 'regnetx_800mf': - dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), - 'regnetx_1.6gf': - dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), - 'regnetx_3.2gf': - dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), - 'regnetx_4.0gf': - dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), - 'regnetx_6.4gf': - dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), - 'regnetx_8.0gf': - dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), - 'regnetx_12gf': - dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), - } - - def __init__(self, - arch, - in_channels=3, - stem_channels=32, - base_channels=32, - strides=(2, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(0, 1, 2, 3), - style='pytorch', - deep_stem=False, - avg_down=False, - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - dcn=None, - stage_with_dcn=(False, False, False, False), - plugins=None, - with_cp=False, - zero_init_residual=True, - pretrained=None, - init_cfg=None): - super(ResNet, self).__init__(init_cfg) - - # Generate RegNet parameters first - if isinstance(arch, str): - assert arch in self.arch_settings, \ - f'"arch": "{arch}" is not one of the' \ - ' arch_settings' - arch = self.arch_settings[arch] - elif not isinstance(arch, dict): - raise ValueError('Expect "arch" to be either a string ' - f'or a dict, got {type(arch)}') - - widths, num_stages = self.generate_regnet( - arch['w0'], - arch['wa'], - arch['wm'], - arch['depth'], - ) - # Convert to per stage format - stage_widths, stage_blocks = self.get_stages_from_blocks(widths) - # Generate group widths and bot muls - group_widths = [arch['group_w'] for _ in range(num_stages)] - self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] - # Adjust the compatibility of stage_widths and group_widths - stage_widths, group_widths = self.adjust_width_group( - stage_widths, self.bottleneck_ratio, group_widths) - - # Group params by stage - self.stage_widths = stage_widths - self.group_widths = group_widths - self.depth = sum(stage_blocks) - self.stem_channels = stem_channels - self.base_channels = base_channels - self.num_stages = num_stages - assert num_stages >= 1 and num_stages <= 4 - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == num_stages - self.out_indices = out_indices - assert max(out_indices) < num_stages - self.style = style - self.deep_stem = deep_stem - self.avg_down = avg_down - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.with_cp = with_cp - self.norm_eval = norm_eval - self.dcn = dcn - self.stage_with_dcn = stage_with_dcn - if dcn is not None: - assert len(stage_with_dcn) == num_stages - self.plugins = plugins - self.zero_init_residual = zero_init_residual - self.block = Bottleneck - expansion_bak = self.block.expansion - self.block.expansion = 1 - self.stage_blocks = stage_blocks[:num_stages] - - self._make_stem_layer(in_channels, stem_channels) - - block_init_cfg = None - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - if init_cfg is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ] - if self.zero_init_residual: - block_init_cfg = dict( - type='Constant', val=0, override=dict(name='norm3')) - else: - raise TypeError('pretrained must be a str or None') - - self.inplanes = stem_channels - self.res_layers = [] - for i, num_blocks in enumerate(self.stage_blocks): - stride = self.strides[i] - dilation = self.dilations[i] - group_width = self.group_widths[i] - width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) - stage_groups = width // group_width - - dcn = self.dcn if self.stage_with_dcn[i] else None - if self.plugins is not None: - stage_plugins = self.make_stage_plugins(self.plugins, i) - else: - stage_plugins = None - - res_layer = self.make_res_layer( - block=self.block, - inplanes=self.inplanes, - planes=self.stage_widths[i], - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=self.with_cp, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - dcn=dcn, - plugins=stage_plugins, - groups=stage_groups, - base_width=group_width, - base_channels=self.stage_widths[i], - init_cfg=block_init_cfg) - self.inplanes = self.stage_widths[i] - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - self.feat_dim = stage_widths[-1] - self.block.expansion = expansion_bak - - def _make_stem_layer(self, in_channels, base_channels): - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - base_channels, - kernel_size=3, - stride=2, - padding=1, - bias=False) - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, base_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.relu = nn.ReLU(inplace=True) - - def generate_regnet(self, - initial_width, - width_slope, - width_parameter, - depth, - divisor=8): - """Generates per block width from RegNet parameters. - - Args: - initial_width ([int]): Initial width of the backbone - width_slope ([float]): Slope of the quantized linear function - width_parameter ([int]): Parameter used to quantize the width. - depth ([int]): Depth of the backbone. - divisor (int, optional): The divisor of channels. Defaults to 8. - - Returns: - list, int: return a list of widths of each stage and the number \ - of stages - """ - assert width_slope >= 0 - assert initial_width > 0 - assert width_parameter > 1 - assert initial_width % divisor == 0 - widths_cont = np.arange(depth) * width_slope + initial_width - ks = np.round( - np.log(widths_cont / initial_width) / np.log(width_parameter)) - widths = initial_width * np.power(width_parameter, ks) - widths = np.round(np.divide(widths, divisor)) * divisor - num_stages = len(np.unique(widths)) - widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() - return widths, num_stages - - @staticmethod - def quantize_float(number, divisor): - """Converts a float to closest non-zero int divisible by divisor. - - Args: - number (int): Original number to be quantized. - divisor (int): Divisor used to quantize the number. - - Returns: - int: quantized number that is divisible by devisor. - """ - return int(round(number / divisor) * divisor) - - def adjust_width_group(self, widths, bottleneck_ratio, groups): - """Adjusts the compatibility of widths and groups. - - Args: - widths (list[int]): Width of each stage. - bottleneck_ratio (float): Bottleneck ratio. - groups (int): number of groups in each stage - - Returns: - tuple(list): The adjusted widths and groups of each stage. - """ - bottleneck_width = [ - int(w * b) for w, b in zip(widths, bottleneck_ratio) - ] - groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] - bottleneck_width = [ - self.quantize_float(w_bot, g) - for w_bot, g in zip(bottleneck_width, groups) - ] - widths = [ - int(w_bot / b) - for w_bot, b in zip(bottleneck_width, bottleneck_ratio) - ] - return widths, groups - - def get_stages_from_blocks(self, widths): - """Gets widths/stage_blocks of network at each stage. - - Args: - widths (list[int]): Width in each stage. - - Returns: - tuple(list): width and depth of each stage - """ - width_diff = [ - width != width_prev - for width, width_prev in zip(widths + [0], [0] + widths) - ] - stage_widths = [ - width for width, diff in zip(widths, width_diff[:-1]) if diff - ] - stage_blocks = np.diff([ - depth for depth, diff in zip(range(len(width_diff)), width_diff) - if diff - ]).tolist() - return stage_widths, stage_blocks - - def forward(self, x): - """Forward function.""" - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/res2net.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/res2net.py deleted file mode 100644 index 96afb2fb2892f6e3973d48509071671bc8a5b7e0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/res2net.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcv.runner import Sequential - -from ..builder import BACKBONES -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNet - - -class Bottle2neck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - scales=4, - base_width=26, - base_channels=64, - stage_type='normal', - **kwargs): - """Bottle2neck block for Res2Net. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) - assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' - width = int(math.floor(self.planes * (base_width / base_channels))) - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width * scales, postfix=1) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width * scales, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - - if stage_type == 'stage' and self.conv2_stride != 1: - self.pool = nn.AvgPool2d( - kernel_size=3, stride=self.conv2_stride, padding=1) - convs = [] - bns = [] - - fallback_on_stride = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if not self.with_dcn or fallback_on_stride: - for i in range(scales - 1): - convs.append( - build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - bias=False)) - bns.append( - build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - for i in range(scales - 1): - convs.append( - build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - bias=False)) - bns.append( - build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - - self.conv3 = build_conv_layer( - self.conv_cfg, - width * scales, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - self.stage_type = stage_type - self.scales = scales - self.width = width - delattr(self, 'conv2') - delattr(self, self.norm2_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - spx = torch.split(out, self.width, 1) - sp = self.convs[0](spx[0].contiguous()) - sp = self.relu(self.bns[0](sp)) - out = sp - for i in range(1, self.scales - 1): - if self.stage_type == 'stage': - sp = spx[i] - else: - sp = sp + spx[i] - sp = self.convs[i](sp.contiguous()) - sp = self.relu(self.bns[i](sp)) - out = torch.cat((out, sp), 1) - - if self.stage_type == 'normal' or self.conv2_stride == 1: - out = torch.cat((out, spx[self.scales - 1]), 1) - elif self.stage_type == 'stage': - out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -class Res2Layer(Sequential): - """Res2Layer to build Res2Net style backbone. - - Args: - block (nn.Module): block used to build ResLayer. - inplanes (int): inplanes of block. - planes (int): planes of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottle2neck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - scales (int): Scales used in Res2Net. Default: 4 - base_width (int): Basic width of each scale. Default: 26 - """ - - def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - scales=4, - base_width=26, - **kwargs): - self.block = block - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False), - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=1, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1], - ) - - layers = [] - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - scales=scales, - base_width=base_width, - stage_type='stage', - **kwargs)) - inplanes = planes * block.expansion - for i in range(1, num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - scales=scales, - base_width=base_width, - **kwargs)) - super(Res2Layer, self).__init__(*layers) - - -@BACKBONES.register_module() -class Res2Net(ResNet): - """Res2Net backbone. - - Args: - scales (int): Scales used in Res2Net. Default: 4 - base_width (int): Basic width of each scale. Default: 26 - depth (int): Depth of res2net, from {50, 101, 152}. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Res2net stages. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottle2neck. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - norm_cfg (dict): Dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - plugins (list[dict]): List of plugins for stages, each dict contains: - - - cfg (dict, required): Cfg dict to build plugin. - - position (str, required): Position inside block to insert - plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - - stages (tuple[bool], optional): Stages to apply plugin, length - should be same as 'num_stages'. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Example: - >>> from mmdet.models import Res2Net - >>> import torch - >>> self = Res2Net(depth=50, scales=4, base_width=26) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 256, 8, 8) - (1, 512, 4, 4) - (1, 1024, 2, 2) - (1, 2048, 1, 1) - """ - - arch_settings = { - 50: (Bottle2neck, (3, 4, 6, 3)), - 101: (Bottle2neck, (3, 4, 23, 3)), - 152: (Bottle2neck, (3, 8, 36, 3)) - } - - def __init__(self, - scales=4, - base_width=26, - style='pytorch', - deep_stem=True, - avg_down=True, - pretrained=None, - init_cfg=None, - **kwargs): - self.scales = scales - self.base_width = base_width - super(Res2Net, self).__init__( - style='pytorch', - deep_stem=True, - avg_down=True, - pretrained=pretrained, - init_cfg=init_cfg, - **kwargs) - - def make_res_layer(self, **kwargs): - return Res2Layer( - scales=self.scales, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnest.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnest.py deleted file mode 100644 index 69629b96dfd44e4cbe53701fb14fb83fda4b6440..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnest.py +++ /dev/null @@ -1,322 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcv.runner import BaseModule - -from ..builder import BACKBONES -from ..utils import ResLayer -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNetV1d - - -class RSoftmax(nn.Module): - """Radix Softmax module in ``SplitAttentionConv2d``. - - Args: - radix (int): Radix of input. - groups (int): Groups of input. - """ - - def __init__(self, radix, groups): - super().__init__() - self.radix = radix - self.groups = groups - - def forward(self, x): - batch = x.size(0) - if self.radix > 1: - x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) - x = F.softmax(x, dim=1) - x = x.reshape(batch, -1) - else: - x = torch.sigmoid(x) - return x - - -class SplitAttentionConv2d(BaseModule): - """Split-Attention Conv2d in ResNeSt. - - Args: - in_channels (int): Number of channels in the input feature map. - channels (int): Number of intermediate channels. - kernel_size (int | tuple[int]): Size of the convolution kernel. - stride (int | tuple[int]): Stride of the convolution. - padding (int | tuple[int]): Zero-padding added to both sides of - dilation (int | tuple[int]): Spacing between kernel elements. - groups (int): Number of blocked connections from input channels to - output channels. - groups (int): Same as nn.Conv2d. - radix (int): Radix of SpltAtConv2d. Default: 2 - reduction_factor (int): Reduction factor of inter_channels. Default: 4. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. Default: None. - dcn (dict): Config dict for DCN. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - radix=2, - reduction_factor=4, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - init_cfg=None): - super(SplitAttentionConv2d, self).__init__(init_cfg) - inter_channels = max(in_channels * radix // reduction_factor, 32) - self.radix = radix - self.groups = groups - self.channels = channels - self.with_dcn = dcn is not None - self.dcn = dcn - fallback_on_stride = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if self.with_dcn and not fallback_on_stride: - assert conv_cfg is None, 'conv_cfg must be None for DCN' - conv_cfg = dcn - self.conv = build_conv_layer( - conv_cfg, - in_channels, - channels * radix, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups * radix, - bias=False) - # To be consistent with original implementation, starting from 0 - self.norm0_name, norm0 = build_norm_layer( - norm_cfg, channels * radix, postfix=0) - self.add_module(self.norm0_name, norm0) - self.relu = nn.ReLU(inplace=True) - self.fc1 = build_conv_layer( - None, channels, inter_channels, 1, groups=self.groups) - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, inter_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.fc2 = build_conv_layer( - None, inter_channels, channels * radix, 1, groups=self.groups) - self.rsoftmax = RSoftmax(radix, groups) - - @property - def norm0(self): - """nn.Module: the normalization layer named "norm0" """ - return getattr(self, self.norm0_name) - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - def forward(self, x): - x = self.conv(x) - x = self.norm0(x) - x = self.relu(x) - - batch, rchannel = x.shape[:2] - batch = x.size(0) - if self.radix > 1: - splits = x.view(batch, self.radix, -1, *x.shape[2:]) - gap = splits.sum(dim=1) - else: - gap = x - gap = F.adaptive_avg_pool2d(gap, 1) - gap = self.fc1(gap) - - gap = self.norm1(gap) - gap = self.relu(gap) - - atten = self.fc2(gap) - atten = self.rsoftmax(atten).view(batch, -1, 1, 1) - - if self.radix > 1: - attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) - out = torch.sum(attens * splits, dim=1) - else: - out = atten * x - return out.contiguous() - - -class Bottleneck(_Bottleneck): - """Bottleneck block for ResNeSt. - - Args: - inplane (int): Input planes of this block. - planes (int): Middle planes of this block. - groups (int): Groups of conv2. - base_width (int): Base of width in terms of base channels. Default: 4. - base_channels (int): Base of channels for calculating width. - Default: 64. - radix (int): Radix of SpltAtConv2d. Default: 2 - reduction_factor (int): Reduction factor of inter_channels in - SplitAttentionConv2d. Default: 4. - avg_down_stride (bool): Whether to use average pool for stride in - Bottleneck. Default: True. - kwargs (dict): Key word arguments for base class. - """ - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - radix=2, - reduction_factor=4, - avg_down_stride=True, - **kwargs): - """Bottleneck block for ResNeSt.""" - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - self.with_modulated_dcn = False - self.conv2 = SplitAttentionConv2d( - width, - width, - kernel_size=3, - stride=1 if self.avg_down_stride else self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - radix=radix, - reduction_factor=reduction_factor, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - dcn=self.dcn) - delattr(self, self.norm2_name) - - if self.avg_down_stride: - self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) - - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - def forward(self, x): - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - out = self.conv2(out) - - if self.avg_down_stride: - out = self.avd_layer(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -@BACKBONES.register_module() -class ResNeSt(ResNetV1d): - """ResNeSt backbone. - - Args: - groups (int): Number of groups of Bottleneck. Default: 1 - base_width (int): Base width of Bottleneck. Default: 4 - radix (int): Radix of SplitAttentionConv2d. Default: 2 - reduction_factor (int): Reduction factor of inter_channels in - SplitAttentionConv2d. Default: 4. - avg_down_stride (bool): Whether to use average pool for stride in - Bottleneck. Default: True. - kwargs (dict): Keyword arguments for ResNet. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)), - 200: (Bottleneck, (3, 24, 36, 3)) - } - - def __init__(self, - groups=1, - base_width=4, - radix=2, - reduction_factor=4, - avg_down_stride=True, - **kwargs): - self.groups = groups - self.base_width = base_width - self.radix = radix - self.reduction_factor = reduction_factor - self.avg_down_stride = avg_down_stride - super(ResNeSt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer``.""" - return ResLayer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - radix=self.radix, - reduction_factor=self.reduction_factor, - avg_down_stride=self.avg_down_stride, - **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnet.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnet.py deleted file mode 100644 index 1eaaae67c9dfab9458ce60d7ca1d7cbfe651a664..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnet.py +++ /dev/null @@ -1,672 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer -from mmcv.runner import BaseModule -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES -from ..utils import ResLayer - - -class BasicBlock(BaseModule): - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None, - init_cfg=None): - super(BasicBlock, self).__init__(init_cfg) - assert dcn is None, 'Not implemented yet.' - assert plugins is None, 'Not implemented yet.' - - self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) - self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) - - self.conv1 = build_conv_layer( - conv_cfg, - inplanes, - planes, - 3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=False) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - conv_cfg, planes, planes, 3, padding=1, bias=False) - self.add_module(self.norm2_name, norm2) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.with_cp = with_cp - - @property - def norm1(self): - """nn.Module: normalization layer after the first convolution layer""" - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: normalization layer after the second convolution layer""" - return getattr(self, self.norm2_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.norm2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -class Bottleneck(BaseModule): - expansion = 4 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None, - init_cfg=None): - """Bottleneck block for ResNet. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(init_cfg) - assert style in ['pytorch', 'caffe'] - assert dcn is None or isinstance(dcn, dict) - assert plugins is None or isinstance(plugins, list) - if plugins is not None: - allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] - assert all(p['position'] in allowed_position for p in plugins) - - self.inplanes = inplanes - self.planes = planes - self.stride = stride - self.dilation = dilation - self.style = style - self.with_cp = with_cp - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.dcn = dcn - self.with_dcn = dcn is not None - self.plugins = plugins - self.with_plugins = plugins is not None - - if self.with_plugins: - # collect plugins for conv1/conv2/conv3 - self.after_conv1_plugins = [ - plugin['cfg'] for plugin in plugins - if plugin['position'] == 'after_conv1' - ] - self.after_conv2_plugins = [ - plugin['cfg'] for plugin in plugins - if plugin['position'] == 'after_conv2' - ] - self.after_conv3_plugins = [ - plugin['cfg'] for plugin in plugins - if plugin['position'] == 'after_conv3' - ] - - if self.style == 'pytorch': - self.conv1_stride = 1 - self.conv2_stride = stride - else: - self.conv1_stride = stride - self.conv2_stride = 1 - - self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) - self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - norm_cfg, planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - conv_cfg, - inplanes, - planes, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - if self.with_dcn: - fallback_on_stride = dcn.pop('fallback_on_stride', False) - if not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - conv_cfg, - planes, - planes, - kernel_size=3, - stride=self.conv2_stride, - padding=dilation, - dilation=dilation, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - dcn, - planes, - planes, - kernel_size=3, - stride=self.conv2_stride, - padding=dilation, - dilation=dilation, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - conv_cfg, - planes, - planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - - if self.with_plugins: - self.after_conv1_plugin_names = self.make_block_plugins( - planes, self.after_conv1_plugins) - self.after_conv2_plugin_names = self.make_block_plugins( - planes, self.after_conv2_plugins) - self.after_conv3_plugin_names = self.make_block_plugins( - planes * self.expansion, self.after_conv3_plugins) - - def make_block_plugins(self, in_channels, plugins): - """make plugins for block. - - Args: - in_channels (int): Input channels of plugin. - plugins (list[dict]): List of plugins cfg to build. - - Returns: - list[str]: List of the names of plugin. - """ - assert isinstance(plugins, list) - plugin_names = [] - for plugin in plugins: - plugin = plugin.copy() - name, layer = build_plugin_layer( - plugin, - in_channels=in_channels, - postfix=plugin.pop('postfix', '')) - assert not hasattr(self, name), f'duplicate plugin {name}' - self.add_module(name, layer) - plugin_names.append(name) - return plugin_names - - def forward_plugin(self, x, plugin_names): - out = x - for name in plugin_names: - out = getattr(self, name)(out) - return out - - @property - def norm1(self): - """nn.Module: normalization layer after the first convolution layer""" - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: normalization layer after the second convolution layer""" - return getattr(self, self.norm2_name) - - @property - def norm3(self): - """nn.Module: normalization layer after the third convolution layer""" - return getattr(self, self.norm3_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - out = self.conv2(out) - out = self.norm2(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -@BACKBONES.register_module() -class ResNet(BaseModule): - """ResNet backbone. - - Args: - depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - stem_channels (int | None): Number of stem channels. If not specified, - it will be the same as `base_channels`. Default: None. - base_channels (int): Number of base channels of res layer. Default: 64. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Resnet stages. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - norm_cfg (dict): Dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - plugins (list[dict]): List of plugins for stages, each dict contains: - - - cfg (dict, required): Cfg dict to build plugin. - - position (str, required): Position inside block to insert - plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - - stages (tuple[bool], optional): Stages to apply plugin, length - should be same as 'num_stages'. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Example: - >>> from mmdet.models import ResNet - >>> import torch - >>> self = ResNet(depth=18) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 64, 8, 8) - (1, 128, 4, 4) - (1, 256, 2, 2) - (1, 512, 1, 1) - """ - - arch_settings = { - 18: (BasicBlock, (2, 2, 2, 2)), - 34: (BasicBlock, (3, 4, 6, 3)), - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, - depth, - in_channels=3, - stem_channels=None, - base_channels=64, - num_stages=4, - strides=(1, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(0, 1, 2, 3), - style='pytorch', - deep_stem=False, - avg_down=False, - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - dcn=None, - stage_with_dcn=(False, False, False, False), - plugins=None, - with_cp=False, - zero_init_residual=True, - pretrained=None, - init_cfg=None): - super(ResNet, self).__init__(init_cfg) - self.zero_init_residual = zero_init_residual - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for resnet') - - block_init_cfg = None - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - if init_cfg is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ] - block = self.arch_settings[depth][0] - if self.zero_init_residual: - if block is BasicBlock: - block_init_cfg = dict( - type='Constant', - val=0, - override=dict(name='norm2')) - elif block is Bottleneck: - block_init_cfg = dict( - type='Constant', - val=0, - override=dict(name='norm3')) - else: - raise TypeError('pretrained must be a str or None') - - self.depth = depth - if stem_channels is None: - stem_channels = base_channels - self.stem_channels = stem_channels - self.base_channels = base_channels - self.num_stages = num_stages - assert num_stages >= 1 and num_stages <= 4 - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == num_stages - self.out_indices = out_indices - assert max(out_indices) < num_stages - self.style = style - self.deep_stem = deep_stem - self.avg_down = avg_down - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.with_cp = with_cp - self.norm_eval = norm_eval - self.dcn = dcn - self.stage_with_dcn = stage_with_dcn - if dcn is not None: - assert len(stage_with_dcn) == num_stages - self.plugins = plugins - self.block, stage_blocks = self.arch_settings[depth] - self.stage_blocks = stage_blocks[:num_stages] - self.inplanes = stem_channels - - self._make_stem_layer(in_channels, stem_channels) - - self.res_layers = [] - for i, num_blocks in enumerate(self.stage_blocks): - stride = strides[i] - dilation = dilations[i] - dcn = self.dcn if self.stage_with_dcn[i] else None - if plugins is not None: - stage_plugins = self.make_stage_plugins(plugins, i) - else: - stage_plugins = None - planes = base_channels * 2**i - res_layer = self.make_res_layer( - block=self.block, - inplanes=self.inplanes, - planes=planes, - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=with_cp, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - dcn=dcn, - plugins=stage_plugins, - init_cfg=block_init_cfg) - self.inplanes = planes * self.block.expansion - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - self.feat_dim = self.block.expansion * base_channels * 2**( - len(self.stage_blocks) - 1) - - def make_stage_plugins(self, plugins, stage_idx): - """Make plugins for ResNet ``stage_idx`` th stage. - - Currently we support to insert ``context_block``, - ``empirical_attention_block``, ``nonlocal_block`` into the backbone - like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of - Bottleneck. - - An example of plugins format could be: - - Examples: - >>> plugins=[ - ... dict(cfg=dict(type='xxx', arg1='xxx'), - ... stages=(False, True, True, True), - ... position='after_conv2'), - ... dict(cfg=dict(type='yyy'), - ... stages=(True, True, True, True), - ... position='after_conv3'), - ... dict(cfg=dict(type='zzz', postfix='1'), - ... stages=(True, True, True, True), - ... position='after_conv3'), - ... dict(cfg=dict(type='zzz', postfix='2'), - ... stages=(True, True, True, True), - ... position='after_conv3') - ... ] - >>> self = ResNet(depth=18) - >>> stage_plugins = self.make_stage_plugins(plugins, 0) - >>> assert len(stage_plugins) == 3 - - Suppose ``stage_idx=0``, the structure of blocks in the stage would be: - - .. code-block:: none - - conv1-> conv2->conv3->yyy->zzz1->zzz2 - - Suppose 'stage_idx=1', the structure of blocks in the stage would be: - - .. code-block:: none - - conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 - - If stages is missing, the plugin would be applied to all stages. - - Args: - plugins (list[dict]): List of plugins cfg to build. The postfix is - required if multiple same type plugins are inserted. - stage_idx (int): Index of stage to build - - Returns: - list[dict]: Plugins for current stage - """ - stage_plugins = [] - for plugin in plugins: - plugin = plugin.copy() - stages = plugin.pop('stages', None) - assert stages is None or len(stages) == self.num_stages - # whether to insert plugin into current stage - if stages is None or stages[stage_idx]: - stage_plugins.append(plugin) - - return stage_plugins - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer``.""" - return ResLayer(**kwargs) - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - def _make_stem_layer(self, in_channels, stem_channels): - if self.deep_stem: - self.stem = nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels, - stem_channels // 2, - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, stem_channels // 2)[1], - nn.ReLU(inplace=True), - build_conv_layer( - self.conv_cfg, - stem_channels // 2, - stem_channels // 2, - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, stem_channels // 2)[1], - nn.ReLU(inplace=True), - build_conv_layer( - self.conv_cfg, - stem_channels // 2, - stem_channels, - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, stem_channels)[1], - nn.ReLU(inplace=True)) - else: - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - stem_channels, - kernel_size=7, - stride=2, - padding=3, - bias=False) - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, stem_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - if self.deep_stem: - self.stem.eval() - for param in self.stem.parameters(): - param.requires_grad = False - else: - self.norm1.eval() - for m in [self.conv1, self.norm1]: - for param in m.parameters(): - param.requires_grad = False - - for i in range(1, self.frozen_stages + 1): - m = getattr(self, f'layer{i}') - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def forward(self, x): - """Forward function.""" - if self.deep_stem: - x = self.stem(x) - else: - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - return tuple(outs) - - def train(self, mode=True): - """Convert the model into training mode while keep normalization layer - freezed.""" - super(ResNet, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() - - -@BACKBONES.register_module() -class ResNetV1d(ResNet): - r"""ResNetV1d variant described in `Bag of Tricks - `_. - - Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in - the input stem with three 3x3 convs. And in the downsampling block, a 2x2 - avg_pool with stride 2 is added before conv, whose stride is changed to 1. - """ - - def __init__(self, **kwargs): - super(ResNetV1d, self).__init__( - deep_stem=True, avg_down=True, **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnext.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnext.py deleted file mode 100644 index 8675d7c1149a321cbbba45fa93ea3cc3b79d0bd1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/resnext.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from ..utils import ResLayer -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNet - - -class Bottleneck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - **kwargs): - """Bottleneck block for ResNeXt. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, width, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - self.with_modulated_dcn = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - if self.with_plugins: - self._del_block_plugins(self.after_conv1_plugin_names + - self.after_conv2_plugin_names + - self.after_conv3_plugin_names) - self.after_conv1_plugin_names = self.make_block_plugins( - width, self.after_conv1_plugins) - self.after_conv2_plugin_names = self.make_block_plugins( - width, self.after_conv2_plugins) - self.after_conv3_plugin_names = self.make_block_plugins( - self.planes * self.expansion, self.after_conv3_plugins) - - def _del_block_plugins(self, plugin_names): - """delete plugins for block if exist. - - Args: - plugin_names (list[str]): List of plugins name to delete. - """ - assert isinstance(plugin_names, list) - for plugin_name in plugin_names: - del self._modules[plugin_name] - - -@BACKBONES.register_module() -class ResNeXt(ResNet): - """ResNeXt backbone. - - Args: - depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Resnet stages. Default: 4. - groups (int): Group of resnext. - base_width (int): Base width of resnext. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - norm_cfg (dict): dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, groups=1, base_width=4, **kwargs): - self.groups = groups - self.base_width = base_width - super(ResNeXt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer``""" - return ResLayer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/ssd_vgg.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/ssd_vgg.py deleted file mode 100644 index c15aeac00d004418a2a2c46e53add41b95a44815..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/ssd_vgg.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -from mmcv.cnn import VGG -from mmcv.runner import BaseModule - -from ..builder import BACKBONES -from ..necks import ssd_neck - - -@BACKBONES.register_module() -class SSDVGG(VGG, BaseModule): - """VGG Backbone network for single-shot-detection. - - Args: - depth (int): Depth of vgg, from {11, 13, 16, 19}. - with_last_pool (bool): Whether to add a pooling layer at the last - of the model - ceil_mode (bool): When True, will use `ceil` instead of `floor` - to compute the output shape. - out_indices (Sequence[int]): Output from which stages. - out_feature_indices (Sequence[int]): Output from which feature map. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - input_size (int, optional): Deprecated argumment. - Width and height of input, from {300, 512}. - l2_norm_scale (float, optional) : Deprecated argumment. - L2 normalization layer init scale. - - Example: - >>> self = SSDVGG(input_size=300, depth=11) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 300, 300) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 1024, 19, 19) - (1, 512, 10, 10) - (1, 256, 5, 5) - (1, 256, 3, 3) - (1, 256, 1, 1) - """ - extra_setting = { - 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), - 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), - } - - def __init__(self, - depth, - with_last_pool=False, - ceil_mode=True, - out_indices=(3, 4), - out_feature_indices=(22, 34), - pretrained=None, - init_cfg=None, - input_size=None, - l2_norm_scale=None): - # TODO: in_channels for mmcv.VGG - super(SSDVGG, self).__init__( - depth, - with_last_pool=with_last_pool, - ceil_mode=ceil_mode, - out_indices=out_indices) - - self.features.add_module( - str(len(self.features)), - nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) - self.features.add_module( - str(len(self.features)), - nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) - self.features.add_module( - str(len(self.features)), nn.ReLU(inplace=True)) - self.features.add_module( - str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) - self.features.add_module( - str(len(self.features)), nn.ReLU(inplace=True)) - self.out_feature_indices = out_feature_indices - - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - - if init_cfg is not None: - self.init_cfg = init_cfg - elif isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict(type='Constant', val=1, layer='BatchNorm2d'), - dict(type='Normal', std=0.01, layer='Linear'), - ] - else: - raise TypeError('pretrained must be a str or None') - - if input_size is not None: - warnings.warn('DeprecationWarning: input_size is deprecated') - if l2_norm_scale is not None: - warnings.warn('DeprecationWarning: l2_norm_scale in VGG is ' - 'deprecated, it has been moved to SSDNeck.') - - def init_weights(self, pretrained=None): - super(VGG, self).init_weights() - - def forward(self, x): - """Forward function.""" - outs = [] - for i, layer in enumerate(self.features): - x = layer(x) - if i in self.out_feature_indices: - outs.append(x) - - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - -class L2Norm(ssd_neck.L2Norm): - - def __init__(self, **kwargs): - super(L2Norm, self).__init__(**kwargs) - warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py ' - 'is deprecated, please use L2Norm in ' - 'mmdet/models/necks/ssd_neck.py instead') diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/swin.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/swin.py deleted file mode 100644 index 176a562a09fc26f0cda665e457d5eef97cdd1d08..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/swin.py +++ /dev/null @@ -1,772 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from collections import OrderedDict -from copy import deepcopy - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from mmcv.cnn import build_norm_layer, constant_init, trunc_normal_init -from mmcv.cnn.bricks.transformer import FFN, build_dropout -from mmcv.cnn.utils.weight_init import trunc_normal_ -from mmcv.runner import BaseModule, ModuleList, _load_checkpoint -from mmcv.utils import to_2tuple - -from ...utils import get_root_logger -from ..builder import BACKBONES -from ..utils.ckpt_convert import swin_converter -from ..utils.transformer import PatchEmbed, PatchMerging - - -class WindowMSA(BaseModule): - """Window based multi-head self-attention (W-MSA) module with relative - position bias. - - Args: - embed_dims (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (tuple[int]): The height and width of the window. - qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. - Default: True. - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - attn_drop_rate (float, optional): Dropout ratio of attention weight. - Default: 0.0 - proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. - init_cfg (dict | None, optional): The Config for initialization. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - window_size, - qkv_bias=True, - qk_scale=None, - attn_drop_rate=0., - proj_drop_rate=0., - init_cfg=None): - - super().__init__() - self.embed_dims = embed_dims - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_embed_dims = embed_dims // num_heads - self.scale = qk_scale or head_embed_dims**-0.5 - self.init_cfg = init_cfg - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), - num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # About 2x faster than original impl - Wh, Ww = self.window_size - rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) - rel_position_index = rel_index_coords + rel_index_coords.T - rel_position_index = rel_position_index.flip(1).contiguous() - self.register_buffer('relative_position_index', rel_position_index) - - self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop_rate) - self.proj = nn.Linear(embed_dims, embed_dims) - self.proj_drop = nn.Dropout(proj_drop_rate) - - self.softmax = nn.Softmax(dim=-1) - - def init_weights(self): - trunc_normal_(self.relative_position_bias_table, std=0.02) - - def forward(self, x, mask=None): - """ - Args: - - x (tensor): input features with shape of (num_windows*B, N, C) - mask (tensor | None, Optional): mask with shape of (num_windows, - Wh*Ww, Wh*Ww), value should be between (-inf, 0]. - """ - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, - C // self.num_heads).permute(2, 0, 3, 1, 4) - # make torchscript happy (cannot use tensor as tuple) - q, k, v = qkv[0], qkv[1], qkv[2] - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], - self.window_size[0] * self.window_size[1], - -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B // nW, nW, self.num_heads, N, - N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - @staticmethod - def double_step_seq(step1, len1, step2, len2): - seq1 = torch.arange(0, step1 * len1, step1) - seq2 = torch.arange(0, step2 * len2, step2) - return (seq1[:, None] + seq2[None, :]).reshape(1, -1) - - -class ShiftWindowMSA(BaseModule): - """Shifted Window Multihead Self-Attention Module. - - Args: - embed_dims (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): The height and width of the window. - shift_size (int, optional): The shift step of each window towards - right-bottom. If zero, act as regular window-msa. Defaults to 0. - qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. - Default: True - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Defaults: None. - attn_drop_rate (float, optional): Dropout ratio of attention weight. - Defaults: 0. - proj_drop_rate (float, optional): Dropout ratio of output. - Defaults: 0. - dropout_layer (dict, optional): The dropout_layer used before output. - Defaults: dict(type='DropPath', drop_prob=0.). - init_cfg (dict, optional): The extra config for initialization. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - window_size, - shift_size=0, - qkv_bias=True, - qk_scale=None, - attn_drop_rate=0, - proj_drop_rate=0, - dropout_layer=dict(type='DropPath', drop_prob=0.), - init_cfg=None): - super().__init__(init_cfg) - - self.window_size = window_size - self.shift_size = shift_size - assert 0 <= self.shift_size < self.window_size - - self.w_msa = WindowMSA( - embed_dims=embed_dims, - num_heads=num_heads, - window_size=to_2tuple(window_size), - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop_rate=attn_drop_rate, - proj_drop_rate=proj_drop_rate, - init_cfg=None) - - self.drop = build_dropout(dropout_layer) - - def forward(self, query, hw_shape): - B, L, C = query.shape - H, W = hw_shape - assert L == H * W, 'input feature has wrong size' - query = query.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) - H_pad, W_pad = query.shape[1], query.shape[2] - - # cyclic shift - if self.shift_size > 0: - shifted_query = torch.roll( - query, - shifts=(-self.shift_size, -self.shift_size), - dims=(1, 2)) - - # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, - -self.shift_size), slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, - -self.shift_size), slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - # nW, window_size, window_size, 1 - mask_windows = self.window_partition(img_mask) - mask_windows = mask_windows.view( - -1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, - float(-100.0)).masked_fill( - attn_mask == 0, float(0.0)) - else: - shifted_query = query - attn_mask = None - - # nW*B, window_size, window_size, C - query_windows = self.window_partition(shifted_query) - # nW*B, window_size*window_size, C - query_windows = query_windows.view(-1, self.window_size**2, C) - - # W-MSA/SW-MSA (nW*B, window_size*window_size, C) - attn_windows = self.w_msa(query_windows, mask=attn_mask) - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, - self.window_size, C) - - # B H' W' C - shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll( - shifted_x, - shifts=(self.shift_size, self.shift_size), - dims=(1, 2)) - else: - x = shifted_x - - if pad_r > 0 or pad_b: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - x = self.drop(x) - return x - - def window_reverse(self, windows, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - window_size = self.window_size - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, - window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - def window_partition(self, x): - """ - Args: - x: (B, H, W, C) - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - window_size = self.window_size - x = x.view(B, H // window_size, window_size, W // window_size, - window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() - windows = windows.view(-1, window_size, window_size, C) - return windows - - -class SwinBlock(BaseModule): - """" - Args: - embed_dims (int): The feature dimension. - num_heads (int): Parallel attention heads. - feedforward_channels (int): The hidden dimension for FFNs. - window_size (int, optional): The local window scale. Default: 7. - shift (bool, optional): whether to shift window or not. Default False. - qkv_bias (bool, optional): enable bias for qkv if True. Default: True. - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - drop_rate (float, optional): Dropout rate. Default: 0. - attn_drop_rate (float, optional): Attention dropout rate. Default: 0. - drop_path_rate (float, optional): Stochastic depth rate. Default: 0. - act_cfg (dict, optional): The config dict of activation function. - Default: dict(type='GELU'). - norm_cfg (dict, optional): The config dict of normalization. - Default: dict(type='LN'). - with_cp (bool, optional): Use checkpoint or not. Using checkpoint - will save some memory while slowing down the training speed. - Default: False. - init_cfg (dict | list | None, optional): The init config. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - feedforward_channels, - window_size=7, - shift=False, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - with_cp=False, - init_cfg=None): - - super(SwinBlock, self).__init__() - - self.init_cfg = init_cfg - self.with_cp = with_cp - - self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] - self.attn = ShiftWindowMSA( - embed_dims=embed_dims, - num_heads=num_heads, - window_size=window_size, - shift_size=window_size // 2 if shift else 0, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop_rate=attn_drop_rate, - proj_drop_rate=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - init_cfg=None) - - self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] - self.ffn = FFN( - embed_dims=embed_dims, - feedforward_channels=feedforward_channels, - num_fcs=2, - ffn_drop=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - act_cfg=act_cfg, - add_identity=True, - init_cfg=None) - - def forward(self, x, hw_shape): - - def _inner_forward(x): - identity = x - x = self.norm1(x) - x = self.attn(x, hw_shape) - - x = x + identity - - identity = x - x = self.norm2(x) - x = self.ffn(x, identity=identity) - - return x - - if self.with_cp and x.requires_grad: - x = cp.checkpoint(_inner_forward, x) - else: - x = _inner_forward(x) - - return x - - -class SwinBlockSequence(BaseModule): - """Implements one stage in Swin Transformer. - - Args: - embed_dims (int): The feature dimension. - num_heads (int): Parallel attention heads. - feedforward_channels (int): The hidden dimension for FFNs. - depth (int): The number of blocks in this stage. - window_size (int, optional): The local window scale. Default: 7. - qkv_bias (bool, optional): enable bias for qkv if True. Default: True. - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - drop_rate (float, optional): Dropout rate. Default: 0. - attn_drop_rate (float, optional): Attention dropout rate. Default: 0. - drop_path_rate (float | list[float], optional): Stochastic depth - rate. Default: 0. - downsample (BaseModule | None, optional): The downsample operation - module. Default: None. - act_cfg (dict, optional): The config dict of activation function. - Default: dict(type='GELU'). - norm_cfg (dict, optional): The config dict of normalization. - Default: dict(type='LN'). - with_cp (bool, optional): Use checkpoint or not. Using checkpoint - will save some memory while slowing down the training speed. - Default: False. - init_cfg (dict | list | None, optional): The init config. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - feedforward_channels, - depth, - window_size=7, - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - downsample=None, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - with_cp=False, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - - if isinstance(drop_path_rate, list): - drop_path_rates = drop_path_rate - assert len(drop_path_rates) == depth - else: - drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] - - self.blocks = ModuleList() - for i in range(depth): - block = SwinBlock( - embed_dims=embed_dims, - num_heads=num_heads, - feedforward_channels=feedforward_channels, - window_size=window_size, - shift=False if i % 2 == 0 else True, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop_rate=drop_rate, - attn_drop_rate=attn_drop_rate, - drop_path_rate=drop_path_rates[i], - act_cfg=act_cfg, - norm_cfg=norm_cfg, - with_cp=with_cp, - init_cfg=None) - self.blocks.append(block) - - self.downsample = downsample - - def forward(self, x, hw_shape): - for block in self.blocks: - x = block(x, hw_shape) - - if self.downsample: - x_down, down_hw_shape = self.downsample(x, hw_shape) - return x_down, down_hw_shape, x, hw_shape - else: - return x, hw_shape, x, hw_shape - - -@BACKBONES.register_module() -class SwinTransformer(BaseModule): - """ Swin Transformer - A PyTorch implement of : `Swin Transformer: - Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/abs/2103.14030 - - Inspiration from - https://github.com/microsoft/Swin-Transformer - - Args: - pretrain_img_size (int | tuple[int]): The size of input image when - pretrain. Defaults: 224. - in_channels (int): The num of input channels. - Defaults: 3. - embed_dims (int): The feature dimension. Default: 96. - patch_size (int | tuple[int]): Patch size. Default: 4. - window_size (int): Window size. Default: 7. - mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. - Default: 4. - depths (tuple[int]): Depths of each Swin Transformer stage. - Default: (2, 2, 6, 2). - num_heads (tuple[int]): Parallel attention heads of each Swin - Transformer stage. Default: (3, 6, 12, 24). - strides (tuple[int]): The patch merging or patch embedding stride of - each Swin Transformer stage. (In swin, we set kernel size equal to - stride.) Default: (4, 2, 2, 2). - out_indices (tuple[int]): Output from which stages. - Default: (0, 1, 2, 3). - qkv_bias (bool, optional): If True, add a learnable bias to query, key, - value. Default: True - qk_scale (float | None, optional): Override default qk scale of - head_dim ** -0.5 if set. Default: None. - patch_norm (bool): If add a norm layer for patch embed and patch - merging. Default: True. - drop_rate (float): Dropout rate. Defaults: 0. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. - use_abs_pos_embed (bool): If True, add absolute position embedding to - the patch embedding. Defaults: False. - act_cfg (dict): Config dict for activation layer. - Default: dict(type='GELU'). - norm_cfg (dict): Config dict for normalization layer at - output of backone. Defaults: dict(type='LN'). - with_cp (bool, optional): Use checkpoint or not. Using checkpoint - will save some memory while slowing down the training speed. - Default: False. - pretrained (str, optional): model pretrained path. Default: None. - convert_weights (bool): The flag indicates whether the - pre-trained model is from the original repo. We may need - to convert some keys to make it compatible. - Default: False. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - Default: -1 (-1 means not freezing any parameters). - init_cfg (dict, optional): The Config for initialization. - Defaults to None. - """ - - def __init__(self, - pretrain_img_size=224, - in_channels=3, - embed_dims=96, - patch_size=4, - window_size=7, - mlp_ratio=4, - depths=(2, 2, 6, 2), - num_heads=(3, 6, 12, 24), - strides=(4, 2, 2, 2), - out_indices=(0, 1, 2, 3), - qkv_bias=True, - qk_scale=None, - patch_norm=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1, - use_abs_pos_embed=False, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - with_cp=False, - pretrained=None, - convert_weights=False, - frozen_stages=-1, - init_cfg=None): - self.convert_weights = convert_weights - self.frozen_stages = frozen_stages - if isinstance(pretrain_img_size, int): - pretrain_img_size = to_2tuple(pretrain_img_size) - elif isinstance(pretrain_img_size, tuple): - if len(pretrain_img_size) == 1: - pretrain_img_size = to_2tuple(pretrain_img_size[0]) - assert len(pretrain_img_size) == 2, \ - f'The size of image should have length 1 or 2, ' \ - f'but got {len(pretrain_img_size)}' - - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - self.init_cfg = init_cfg - else: - raise TypeError('pretrained must be a str or None') - - super(SwinTransformer, self).__init__(init_cfg=init_cfg) - - num_layers = len(depths) - self.out_indices = out_indices - self.use_abs_pos_embed = use_abs_pos_embed - - assert strides[0] == patch_size, 'Use non-overlapping patch embed.' - - self.patch_embed = PatchEmbed( - in_channels=in_channels, - embed_dims=embed_dims, - conv_type='Conv2d', - kernel_size=patch_size, - stride=strides[0], - norm_cfg=norm_cfg if patch_norm else None, - init_cfg=None) - - if self.use_abs_pos_embed: - patch_row = pretrain_img_size[0] // patch_size - patch_col = pretrain_img_size[1] // patch_size - self.absolute_pos_embed = nn.Parameter( - torch.zeros((1, embed_dims, patch_row, patch_col))) - - self.drop_after_pos = nn.Dropout(p=drop_rate) - - # set stochastic depth decay rule - total_depth = sum(depths) - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, total_depth) - ] - - self.stages = ModuleList() - in_channels = embed_dims - for i in range(num_layers): - if i < num_layers - 1: - downsample = PatchMerging( - in_channels=in_channels, - out_channels=2 * in_channels, - stride=strides[i + 1], - norm_cfg=norm_cfg if patch_norm else None, - init_cfg=None) - else: - downsample = None - - stage = SwinBlockSequence( - embed_dims=in_channels, - num_heads=num_heads[i], - feedforward_channels=mlp_ratio * in_channels, - depth=depths[i], - window_size=window_size, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop_rate=drop_rate, - attn_drop_rate=attn_drop_rate, - drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], - downsample=downsample, - act_cfg=act_cfg, - norm_cfg=norm_cfg, - with_cp=with_cp, - init_cfg=None) - self.stages.append(stage) - if downsample: - in_channels = downsample.out_channels - - self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] - # Add a norm layer for each output - for i in out_indices: - layer = build_norm_layer(norm_cfg, self.num_features[i])[1] - layer_name = f'norm{i}' - self.add_module(layer_name, layer) - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(SwinTransformer, self).train(mode) - self._freeze_stages() - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - if self.use_abs_pos_embed: - self.absolute_pos_embed.requires_grad = False - self.drop_after_pos.eval() - - for i in range(1, self.frozen_stages + 1): - - if (i - 1) in self.out_indices: - norm_layer = getattr(self, f'norm{i-1}') - norm_layer.eval() - for param in norm_layer.parameters(): - param.requires_grad = False - - m = self.stages[i - 1] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def init_weights(self): - logger = get_root_logger() - if self.init_cfg is None: - logger.warn(f'No pre-trained weights for ' - f'{self.__class__.__name__}, ' - f'training start from scratch') - if self.use_abs_pos_embed: - trunc_normal_(self.absolute_pos_embed, std=0.02) - for m in self.modules(): - if isinstance(m, nn.Linear): - trunc_normal_init(m, std=.02, bias=0.) - elif isinstance(m, nn.LayerNorm): - constant_init(m, 1.0) - else: - assert 'checkpoint' in self.init_cfg, f'Only support ' \ - f'specify `Pretrained` in ' \ - f'`init_cfg` in ' \ - f'{self.__class__.__name__} ' - ckpt = _load_checkpoint( - self.init_cfg.checkpoint, logger=logger, map_location='cpu') - if 'state_dict' in ckpt: - _state_dict = ckpt['state_dict'] - elif 'model' in ckpt: - _state_dict = ckpt['model'] - else: - _state_dict = ckpt - if self.convert_weights: - # supported loading weight from original repo, - _state_dict = swin_converter(_state_dict) - - state_dict = OrderedDict() - for k, v in _state_dict.items(): - if k.startswith('backbone.'): - state_dict[k[9:]] = v - - # strip prefix of state_dict - if list(state_dict.keys())[0].startswith('module.'): - state_dict = {k[7:]: v for k, v in state_dict.items()} - - # reshape absolute position embedding - if state_dict.get('absolute_pos_embed') is not None: - absolute_pos_embed = state_dict['absolute_pos_embed'] - N1, L, C1 = absolute_pos_embed.size() - N2, C2, H, W = self.absolute_pos_embed.size() - if N1 != N2 or C1 != C2 or L != H * W: - logger.warning('Error in loading absolute_pos_embed, pass') - else: - state_dict['absolute_pos_embed'] = absolute_pos_embed.view( - N2, H, W, C2).permute(0, 3, 1, 2).contiguous() - - # interpolate position bias table if needed - relative_position_bias_table_keys = [ - k for k in state_dict.keys() - if 'relative_position_bias_table' in k - ] - for table_key in relative_position_bias_table_keys: - table_pretrained = state_dict[table_key] - table_current = self.state_dict()[table_key] - L1, nH1 = table_pretrained.size() - L2, nH2 = table_current.size() - if nH1 != nH2: - logger.warning(f'Error in loading {table_key}, pass') - elif L1 != L2: - S1 = int(L1**0.5) - S2 = int(L2**0.5) - table_pretrained_resized = F.interpolate( - table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), - size=(S2, S2), - mode='bicubic') - state_dict[table_key] = table_pretrained_resized.view( - nH2, L2).permute(1, 0).contiguous() - - # load state_dict - self.load_state_dict(state_dict, False) - - def forward(self, x): - x, hw_shape = self.patch_embed(x) - - if self.use_abs_pos_embed: - h, w = self.absolute_pos_embed.shape[1:3] - if hw_shape[0] != h or hw_shape[1] != w: - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, - size=hw_shape, - mode='bicubic', - align_corners=False).flatten(2).transpose(1, 2) - else: - absolute_pos_embed = self.absolute_pos_embed.flatten( - 2).transpose(1, 2) - x = x + absolute_pos_embed - x = self.drop_after_pos(x) - - outs = [] - for i, stage in enumerate(self.stages): - x, hw_shape, out, out_hw_shape = stage(x, hw_shape) - if i in self.out_indices: - norm_layer = getattr(self, f'norm{i}') - out = norm_layer(out) - out = out.view(-1, *out_hw_shape, - self.num_features[i]).permute(0, 3, 1, - 2).contiguous() - outs.append(out) - - return outs diff --git a/cv/detection/co-detr/pytorch/mmdet/models/backbones/trident_resnet.py b/cv/detection/co-detr/pytorch/mmdet/models/backbones/trident_resnet.py deleted file mode 100644 index 013ba64b59d81e5be3a3f00b65c6a76915247c9d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/backbones/trident_resnet.py +++ /dev/null @@ -1,298 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcv.runner import BaseModule -from torch.nn.modules.utils import _pair - -from mmdet.models.backbones.resnet import Bottleneck, ResNet -from mmdet.models.builder import BACKBONES - - -class TridentConv(BaseModule): - """Trident Convolution Module. - - Args: - in_channels (int): Number of channels in input. - out_channels (int): Number of channels in output. - kernel_size (int): Size of convolution kernel. - stride (int, optional): Convolution stride. Default: 1. - trident_dilations (tuple[int, int, int], optional): Dilations of - different trident branch. Default: (1, 2, 3). - test_branch_idx (int, optional): In inference, all 3 branches will - be used if `test_branch_idx==-1`, otherwise only branch with - index `test_branch_idx` will be used. Default: 1. - bias (bool, optional): Whether to use bias in convolution or not. - Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - trident_dilations=(1, 2, 3), - test_branch_idx=1, - bias=False, - init_cfg=None): - super(TridentConv, self).__init__(init_cfg) - self.num_branch = len(trident_dilations) - self.with_bias = bias - self.test_branch_idx = test_branch_idx - self.stride = _pair(stride) - self.kernel_size = _pair(kernel_size) - self.paddings = _pair(trident_dilations) - self.dilations = trident_dilations - self.in_channels = in_channels - self.out_channels = out_channels - self.bias = bias - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels, *self.kernel_size)) - if bias: - self.bias = nn.Parameter(torch.Tensor(out_channels)) - else: - self.bias = None - - def extra_repr(self): - tmpstr = f'in_channels={self.in_channels}' - tmpstr += f', out_channels={self.out_channels}' - tmpstr += f', kernel_size={self.kernel_size}' - tmpstr += f', num_branch={self.num_branch}' - tmpstr += f', test_branch_idx={self.test_branch_idx}' - tmpstr += f', stride={self.stride}' - tmpstr += f', paddings={self.paddings}' - tmpstr += f', dilations={self.dilations}' - tmpstr += f', bias={self.bias}' - return tmpstr - - def forward(self, inputs): - if self.training or self.test_branch_idx == -1: - outputs = [ - F.conv2d(input, self.weight, self.bias, self.stride, padding, - dilation) for input, dilation, padding in zip( - inputs, self.dilations, self.paddings) - ] - else: - assert len(inputs) == 1 - outputs = [ - F.conv2d(inputs[0], self.weight, self.bias, self.stride, - self.paddings[self.test_branch_idx], - self.dilations[self.test_branch_idx]) - ] - - return outputs - - -# Since TridentNet is defined over ResNet50 and ResNet101, here we -# only support TridentBottleneckBlock. -class TridentBottleneck(Bottleneck): - """BottleBlock for TridentResNet. - - Args: - trident_dilations (tuple[int, int, int]): Dilations of different - trident branch. - test_branch_idx (int): In inference, all 3 branches will be used - if `test_branch_idx==-1`, otherwise only branch with index - `test_branch_idx` will be used. - concat_output (bool): Whether to concat the output list to a Tensor. - `True` only in the last Block. - """ - - def __init__(self, trident_dilations, test_branch_idx, concat_output, - **kwargs): - - super(TridentBottleneck, self).__init__(**kwargs) - self.trident_dilations = trident_dilations - self.num_branch = len(trident_dilations) - self.concat_output = concat_output - self.test_branch_idx = test_branch_idx - self.conv2 = TridentConv( - self.planes, - self.planes, - kernel_size=3, - stride=self.conv2_stride, - bias=False, - trident_dilations=self.trident_dilations, - test_branch_idx=test_branch_idx, - init_cfg=dict( - type='Kaiming', - distribution='uniform', - mode='fan_in', - override=dict(name='conv2'))) - - def forward(self, x): - - def _inner_forward(x): - num_branch = ( - self.num_branch - if self.training or self.test_branch_idx == -1 else 1) - identity = x - if not isinstance(x, list): - x = (x, ) * num_branch - identity = x - if self.downsample is not None: - identity = [self.downsample(b) for b in x] - - out = [self.conv1(b) for b in x] - out = [self.norm1(b) for b in out] - out = [self.relu(b) for b in out] - - if self.with_plugins: - for k in range(len(out)): - out[k] = self.forward_plugin(out[k], - self.after_conv1_plugin_names) - - out = self.conv2(out) - out = [self.norm2(b) for b in out] - out = [self.relu(b) for b in out] - if self.with_plugins: - for k in range(len(out)): - out[k] = self.forward_plugin(out[k], - self.after_conv2_plugin_names) - - out = [self.conv3(b) for b in out] - out = [self.norm3(b) for b in out] - - if self.with_plugins: - for k in range(len(out)): - out[k] = self.forward_plugin(out[k], - self.after_conv3_plugin_names) - - out = [ - out_b + identity_b for out_b, identity_b in zip(out, identity) - ] - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = [self.relu(b) for b in out] - if self.concat_output: - out = torch.cat(out, dim=0) - return out - - -def make_trident_res_layer(block, - inplanes, - planes, - num_blocks, - stride=1, - trident_dilations=(1, 2, 3), - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None, - test_branch_idx=-1): - """Build Trident Res Layers.""" - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = [] - conv_stride = stride - downsample.extend([ - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - for i in range(num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride if i == 0 else 1, - trident_dilations=trident_dilations, - downsample=downsample if i == 0 else None, - style=style, - with_cp=with_cp, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - dcn=dcn, - plugins=plugins, - test_branch_idx=test_branch_idx, - concat_output=True if i == num_blocks - 1 else False)) - inplanes = planes * block.expansion - return nn.Sequential(*layers) - - -@BACKBONES.register_module() -class TridentResNet(ResNet): - """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to - ResNet, while in stage 3, Trident BottleBlock is utilized to replace the - normal BottleBlock to yield trident output. Different branch shares the - convolution weight but uses different dilations to achieve multi-scale - output. - - / stage3(b0) \ - x - stem - stage1 - stage2 - stage3(b1) - output - \ stage3(b2) / - - Args: - depth (int): Depth of resnet, from {50, 101, 152}. - num_branch (int): Number of branches in TridentNet. - test_branch_idx (int): In inference, all 3 branches will be used - if `test_branch_idx==-1`, otherwise only branch with index - `test_branch_idx` will be used. - trident_dilations (tuple[int]): Dilations of different trident branch. - len(trident_dilations) should be equal to num_branch. - """ # noqa - - def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, - **kwargs): - - assert num_branch == len(trident_dilations) - assert depth in (50, 101, 152) - super(TridentResNet, self).__init__(depth, **kwargs) - assert self.num_stages == 3 - self.test_branch_idx = test_branch_idx - self.num_branch = num_branch - - last_stage_idx = self.num_stages - 1 - stride = self.strides[last_stage_idx] - dilation = trident_dilations - dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None - if self.plugins is not None: - stage_plugins = self.make_stage_plugins(self.plugins, - last_stage_idx) - else: - stage_plugins = None - planes = self.base_channels * 2**last_stage_idx - res_layer = make_trident_res_layer( - TridentBottleneck, - inplanes=(self.block.expansion * self.base_channels * - 2**(last_stage_idx - 1)), - planes=planes, - num_blocks=self.stage_blocks[last_stage_idx], - stride=stride, - trident_dilations=dilation, - style=self.style, - with_cp=self.with_cp, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - dcn=dcn, - plugins=stage_plugins, - test_branch_idx=self.test_branch_idx) - - layer_name = f'layer{last_stage_idx + 1}' - - self.__setattr__(layer_name, res_layer) - self.res_layers.pop(last_stage_idx) - self.res_layers.insert(last_stage_idx, layer_name) - - self._freeze_stages() diff --git a/cv/detection/co-detr/pytorch/mmdet/models/builder.py b/cv/detection/co-detr/pytorch/mmdet/models/builder.py deleted file mode 100644 index ace6209f71f96676b87a6c046a4fc77bed100062..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/builder.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -from mmcv.cnn import MODELS as MMCV_MODELS -from mmcv.utils import Registry - -MODELS = Registry('models', parent=MMCV_MODELS) - -BACKBONES = MODELS -NECKS = MODELS -ROI_EXTRACTORS = MODELS -SHARED_HEADS = MODELS -HEADS = MODELS -LOSSES = MODELS -DETECTORS = MODELS - - -def build_backbone(cfg): - """Build backbone.""" - return BACKBONES.build(cfg) - - -def build_neck(cfg): - """Build neck.""" - return NECKS.build(cfg) - - -def build_roi_extractor(cfg): - """Build roi extractor.""" - return ROI_EXTRACTORS.build(cfg) - - -def build_shared_head(cfg): - """Build shared head.""" - return SHARED_HEADS.build(cfg) - - -def build_head(cfg): - """Build head.""" - return HEADS.build(cfg) - - -def build_loss(cfg): - """Build loss.""" - return LOSSES.build(cfg) - - -def build_detector(cfg, train_cfg=None, test_cfg=None): - """Build detector.""" - if train_cfg is not None or test_cfg is not None: - warnings.warn( - 'train_cfg and test_cfg is deprecated, ' - 'please specify them in model', UserWarning) - assert cfg.get('train_cfg') is None or train_cfg is None, \ - 'train_cfg specified in both outer field and model field ' - assert cfg.get('test_cfg') is None or test_cfg is None, \ - 'test_cfg specified in both outer field and model field ' - return DETECTORS.build( - cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/__init__.py deleted file mode 100644 index 1c2286996e712fb6d681b60b14a0cab802e84fd0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .anchor_free_head import AnchorFreeHead -from .anchor_head import AnchorHead -from .atss_head import ATSSHead -from .autoassign_head import AutoAssignHead -from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead -from .centernet_head import CenterNetHead -from .centripetal_head import CentripetalHead -from .corner_head import CornerHead -from .ddod_head import DDODHead -from .deformable_detr_head import DeformableDETRHead -from .detr_head import DETRHead -from .embedding_rpn_head import EmbeddingRPNHead -from .fcos_head import FCOSHead -from .fovea_head import FoveaHead -from .free_anchor_retina_head import FreeAnchorRetinaHead -from .fsaf_head import FSAFHead -from .ga_retina_head import GARetinaHead -from .ga_rpn_head import GARPNHead -from .gfl_head import GFLHead -from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead -from .lad_head import LADHead -from .ld_head import LDHead -from .mask2former_head import Mask2FormerHead -from .maskformer_head import MaskFormerHead -from .nasfcos_head import NASFCOSHead -from .paa_head import PAAHead -from .pisa_retinanet_head import PISARetinaHead -from .pisa_ssd_head import PISASSDHead -from .reppoints_head import RepPointsHead -from .retina_head import RetinaHead -from .retina_sepbn_head import RetinaSepBNHead -from .rpn_head import RPNHead -from .sabl_retina_head import SABLRetinaHead -from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead -from .solov2_head import SOLOV2Head -from .ssd_head import SSDHead -from .tood_head import TOODHead -from .vfnet_head import VFNetHead -from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead -from .yolo_head import YOLOV3Head -from .yolof_head import YOLOFHead -from .yolox_head import YOLOXHead - -__all__ = [ - 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', - 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', - 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead', - 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead', - 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', - 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', - 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', - 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', - 'DETRHead', 'YOLOFHead', 'DeformableDETRHead', 'SOLOHead', - 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead', - 'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead', - 'Mask2FormerHead', 'SOLOV2Head', 'DDODHead' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/anchor_free_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/anchor_free_head.py deleted file mode 100644 index b0460b945ca43b663553ab081d100edb76d8496a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/anchor_free_head.py +++ /dev/null @@ -1,350 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from abc import abstractmethod - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import force_fp32 - -from mmdet.core import build_bbox_coder, multi_apply -from mmdet.core.anchor.point_generator import MlvlPointGenerator -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): - """Anchor-free head (FCOS, Fovea, RepPoints, etc.). - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels. Used in child classes. - stacked_convs (int): Number of stacking convs of the head. - strides (tuple): Downsample factor of each feature map. - dcn_on_last_conv (bool): If true, use dcn in the last layer of - towers. Default: False. - conv_bias (bool | str): If specified as `auto`, it will be decided by - the norm_cfg. Bias of conv will be set as True if `norm_cfg` is - None, otherwise False. Default: "auto". - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - bbox_coder (dict): Config of bbox coder. Defaults - 'DistancePointBBoxCoder'. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ # noqa: W605 - - _version = 1 - - def __init__(self, - num_classes, - in_channels, - feat_channels=256, - stacked_convs=4, - strides=(4, 8, 16, 32, 64), - dcn_on_last_conv=False, - conv_bias='auto', - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - bbox_coder=dict(type='DistancePointBBoxCoder'), - conv_cfg=None, - norm_cfg=None, - train_cfg=None, - test_cfg=None, - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='conv_cls', - std=0.01, - bias_prob=0.01))): - super(AnchorFreeHead, self).__init__(init_cfg) - self.num_classes = num_classes - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - if self.use_sigmoid_cls: - self.cls_out_channels = num_classes - else: - self.cls_out_channels = num_classes + 1 - self.in_channels = in_channels - self.feat_channels = feat_channels - self.stacked_convs = stacked_convs - self.strides = strides - self.dcn_on_last_conv = dcn_on_last_conv - assert conv_bias == 'auto' or isinstance(conv_bias, bool) - self.conv_bias = conv_bias - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.bbox_coder = build_bbox_coder(bbox_coder) - - self.prior_generator = MlvlPointGenerator(strides) - - # In order to keep a more general interface and be consistent with - # anchor_head. We can think of point like one anchor - self.num_base_priors = self.prior_generator.num_base_priors[0] - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.fp16_enabled = False - - self._init_layers() - - def _init_layers(self): - """Initialize layers of the head.""" - self._init_cls_convs() - self._init_reg_convs() - self._init_predictor() - - def _init_cls_convs(self): - """Initialize classification conv layers of the head.""" - self.cls_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - if self.dcn_on_last_conv and i == self.stacked_convs - 1: - conv_cfg = dict(type='DCNv2') - else: - conv_cfg = self.conv_cfg - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias)) - - def _init_reg_convs(self): - """Initialize bbox regression conv layers of the head.""" - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - if self.dcn_on_last_conv and i == self.stacked_convs - 1: - conv_cfg = dict(type='DCNv2') - else: - conv_cfg = self.conv_cfg - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias)) - - def _init_predictor(self): - """Initialize predictor layers of the head.""" - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """Hack some keys of the model state dict so that can load checkpoints - of previous version.""" - version = local_metadata.get('version', None) - if version is None: - # the key is different in early versions - # for example, 'fcos_cls' become 'conv_cls' now - bbox_head_keys = [ - k for k in state_dict.keys() if k.startswith(prefix) - ] - ori_predictor_keys = [] - new_predictor_keys = [] - # e.g. 'fcos_cls' or 'fcos_reg' - for key in bbox_head_keys: - ori_predictor_keys.append(key) - key = key.split('.') - conv_name = None - if key[1].endswith('cls'): - conv_name = 'conv_cls' - elif key[1].endswith('reg'): - conv_name = 'conv_reg' - elif key[1].endswith('centerness'): - conv_name = 'conv_centerness' - else: - assert NotImplementedError - if conv_name is not None: - key[1] = conv_name - new_predictor_keys.append('.'.join(key)) - else: - ori_predictor_keys.pop(-1) - for i in range(len(new_predictor_keys)): - state_dict[new_predictor_keys[i]] = state_dict.pop( - ori_predictor_keys[i]) - super()._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, unexpected_keys, - error_msgs) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually contain classification scores and bbox predictions. - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - """ - return multi_apply(self.forward_single, feats)[:2] - - def forward_single(self, x): - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - - Returns: - tuple: Scores for each class, bbox predictions, features - after classification and regression conv layers, some - models needs these features like FCOS. - """ - cls_feat = x - reg_feat = x - - for cls_layer in self.cls_convs: - cls_feat = cls_layer(cls_feat) - cls_score = self.conv_cls(cls_feat) - - for reg_layer in self.reg_convs: - reg_feat = reg_layer(reg_feat) - bbox_pred = self.conv_reg(reg_feat) - return cls_score, bbox_pred, cls_feat, reg_feat - - @abstractmethod - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - """ - - raise NotImplementedError - - @abstractmethod - def get_targets(self, points, gt_bboxes_list, gt_labels_list): - """Compute regression, classification and centerness targets for points - in multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels_list (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - """ - raise NotImplementedError - - def _get_points_single(self, - featmap_size, - stride, - dtype, - device, - flatten=False): - """Get points of a single scale level. - - This function will be deprecated soon. - """ - - warnings.warn( - '`_get_points_single` in `AnchorFreeHead` will be ' - 'deprecated soon, we support a multi level point generator now' - 'you can get points of a single level feature map ' - 'with `self.prior_generator.single_level_grid_priors` ') - - h, w = featmap_size - # First create Range with the default dtype, than convert to - # target `dtype` for onnx exporting. - x_range = torch.arange(w, device=device).to(dtype) - y_range = torch.arange(h, device=device).to(dtype) - y, x = torch.meshgrid(y_range, x_range) - if flatten: - y = y.flatten() - x = x.flatten() - return y, x - - def get_points(self, featmap_sizes, dtype, device, flatten=False): - """Get points according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - dtype (torch.dtype): Type of points. - device (torch.device): Device of points. - - Returns: - tuple: points of each image. - """ - warnings.warn( - '`get_points` in `AnchorFreeHead` will be ' - 'deprecated soon, we support a multi level point generator now' - 'you can get points of all levels ' - 'with `self.prior_generator.grid_priors` ') - - mlvl_points = [] - for i in range(len(featmap_sizes)): - mlvl_points.append( - self._get_points_single(featmap_sizes[i], self.strides[i], - dtype, device, flatten)) - return mlvl_points - - def aug_test(self, feats, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/anchor_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/anchor_head.py deleted file mode 100644 index d1bfab62de230feaccc83b935573b87d1d8061df..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/anchor_head.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn as nn -from mmcv.runner import force_fp32 - -from mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder, - build_prior_generator, build_sampler, images_to_levels, - multi_apply, unmap) -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class AnchorHead(BaseDenseHead, BBoxTestMixin): - """Anchor-based head (RPN, RetinaNet, SSD, etc.). - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels. Used in child classes. - anchor_generator (dict): Config dict for anchor generator - bbox_coder (dict): Config of bounding box coder. - reg_decoded_bbox (bool): If true, the regression loss would be - applied directly on decoded bounding boxes, converting both - the predicted boxes and regression targets to absolute - coordinates format. Default False. It should be `True` when - using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ # noqa: W605 - - def __init__(self, - num_classes, - in_channels, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - clip_border=True, - target_means=(.0, .0, .0, .0), - target_stds=(1.0, 1.0, 1.0, 1.0)), - reg_decoded_bbox=False, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_bbox=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), - train_cfg=None, - test_cfg=None, - init_cfg=dict(type='Normal', layer='Conv2d', std=0.01)): - super(AnchorHead, self).__init__(init_cfg) - self.in_channels = in_channels - self.num_classes = num_classes - self.feat_channels = feat_channels - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - if self.use_sigmoid_cls: - self.cls_out_channels = num_classes - else: - self.cls_out_channels = num_classes + 1 - - if self.cls_out_channels <= 0: - raise ValueError(f'num_classes={num_classes} is too small') - self.reg_decoded_bbox = reg_decoded_bbox - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - if hasattr(self.train_cfg, - 'sampler') and self.train_cfg.sampler.type.split( - '.')[-1] != 'PseudoSampler': - self.sampling = True - sampler_cfg = self.train_cfg.sampler - # avoid BC-breaking - if loss_cls['type'] in [ - 'FocalLoss', 'GHMC', 'QualityFocalLoss' - ]: - warnings.warn( - 'DeprecationWarning: Determining whether to sampling' - 'by loss type is deprecated, please delete sampler in' - 'your config when using `FocalLoss`, `GHMC`, ' - '`QualityFocalLoss` or other FocalLoss variant.') - self.sampling = False - sampler_cfg = dict(type='PseudoSampler') - else: - self.sampling = False - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.fp16_enabled = False - - self.prior_generator = build_prior_generator(anchor_generator) - - # Usually the numbers of anchors for each level are the same - # except SSD detectors. So it is an int in the most dense - # heads but a list of int in SSDHead - self.num_base_priors = self.prior_generator.num_base_priors[0] - self._init_layers() - - @property - def num_anchors(self): - warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' - 'for consistency or also use ' - '`num_base_priors` instead') - return self.prior_generator.num_base_priors[0] - - @property - def anchor_generator(self): - warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' - 'please use "prior_generator" instead') - return self.prior_generator - - def _init_layers(self): - """Initialize layers of the head.""" - self.conv_cls = nn.Conv2d(self.in_channels, - self.num_base_priors * self.cls_out_channels, - 1) - self.conv_reg = nn.Conv2d(self.in_channels, self.num_base_priors * 4, - 1) - - def forward_single(self, x): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level \ - the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale \ - level, the channels number is num_base_priors * 4. - """ - cls_score = self.conv_cls(x) - bbox_pred = self.conv_reg(x) - return cls_score, bbox_pred - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: A tuple of classification scores and bbox prediction. - - - cls_scores (list[Tensor]): Classification scores for all \ - scale levels, each is a 4D-tensor, the channels number \ - is num_base_priors * num_classes. - - bbox_preds (list[Tensor]): Box energies / deltas for all \ - scale levels, each is a 4D-tensor, the channels number \ - is num_base_priors * 4. - """ - return multi_apply(self.forward_single, feats) - - def get_anchors(self, featmap_sizes, img_metas, device='cuda'): - """Get anchors according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - img_metas (list[dict]): Image meta info. - device (torch.device | str): Device for returned tensors - - Returns: - tuple: - anchor_list (list[Tensor]): Anchors of each image. - valid_flag_list (list[Tensor]): Valid flags of each image. - """ - num_imgs = len(img_metas) - - # since feature map sizes of all images are the same, we only compute - # anchors for one time - multi_level_anchors = self.prior_generator.grid_priors( - featmap_sizes, device=device) - anchor_list = [multi_level_anchors for _ in range(num_imgs)] - - # for each image, we compute valid flags of multi level anchors - valid_flag_list = [] - for img_id, img_meta in enumerate(img_metas): - multi_level_flags = self.prior_generator.valid_flags( - featmap_sizes, img_meta['pad_shape'], device) - valid_flag_list.append(multi_level_flags) - - return anchor_list, valid_flag_list - - def _get_targets_single(self, - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - Args: - flat_anchors (Tensor): Multi-level anchors of the image, which are - concatenated into a single tensor of shape (num_anchors ,4) - valid_flags (Tensor): Multi level valid flags of the image, - which are concatenated into a single tensor of - shape (num_anchors,). - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - img_meta (dict): Meta info of the image. - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: - labels_list (list[Tensor]): Labels of each level - label_weights_list (list[Tensor]): Label weights of each level - bbox_targets_list (list[Tensor]): BBox targets of each level - bbox_weights_list (list[Tensor]): BBox weights of each level - num_total_pos (int): Number of positive samples in all images - num_total_neg (int): Number of negative samples in all images - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 7 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - - assign_result = self.assigner.assign( - anchors, gt_bboxes, gt_bboxes_ignore, - None if self.sampling else gt_labels) - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - else: - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class since v2.5.0 - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - labels = unmap( - labels, num_total_anchors, inside_flags, - fill=self.num_classes) # fill bg label - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds, sampling_result) - - def get_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True, - return_sampling_results=False): - """Compute regression and classification targets for anchors in - multiple images. - - Args: - anchor_list (list[list[Tensor]]): Multi level anchors of each - image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, 4). - valid_flag_list (list[list[Tensor]]): Multi level valid flags of - each image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be - ignored. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - - labels_list (list[Tensor]): Labels of each level. - - label_weights_list (list[Tensor]): Label weights of each - level. - - bbox_targets_list (list[Tensor]): BBox targets of each level. - - bbox_weights_list (list[Tensor]): BBox weights of each level. - - num_total_pos (int): Number of positive samples in all - images. - - num_total_neg (int): Number of negative samples in all - images. - - additional_returns: This function enables user-defined returns from - `self._get_targets_single`. These returns are currently refined - to properties at each feature map (i.e. having HxW dimension). - The results will be concatenated after the end - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors to a single tensor - concat_anchor_list = [] - concat_valid_flag_list = [] - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - concat_anchor_list.append(torch.cat(anchor_list[i])) - concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - results = multi_apply( - self._get_targets_single, - concat_anchor_list, - concat_valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, - pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] - rest_results = list(results[7:]) # user-added return values - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors) - res = (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - if return_sampling_results: - res = res + (sampling_results_list, ) - for i, r in enumerate(rest_results): # user-added return values - rest_results[i] = images_to_levels(r, num_level_anchors) - - return res + tuple(rest_results) - - def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, - bbox_targets, bbox_weights, num_total_samples): - """Compute loss of a single scale level. - - Args: - cls_score (Tensor): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W). - bbox_pred (Tensor): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W). - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - bbox_targets (Tensor): BBox regression targets of each anchor - weight shape (N, num_total_anchors, 4). - bbox_weights (Tensor): BBox regression loss weights of each anchor - with shape (N, num_total_anchors, 4). - num_total_samples (int): If sampling, num total samples equal to - the number of total anchors; Otherwise, it is the number of - positive anchors. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - # classification loss - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - # regression loss - bbox_targets = bbox_targets.reshape(-1, 4) - bbox_weights = bbox_weights.reshape(-1, 4) - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, it - # decodes the already encoded coordinates to absolute format. - anchors = anchors.reshape(-1, 4) - bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) - loss_bbox = self.loss_bbox( - bbox_pred, - bbox_targets, - bbox_weights, - avg_factor=num_total_samples) - return loss_cls, loss_bbox - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - - losses_cls, losses_bbox = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - all_anchor_list, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - num_total_samples=num_total_samples) - return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - - def aug_test(self, feats, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is ``bboxes`` with shape (n, 5), where - 5 represent (tl_x, tl_y, br_x, br_y, score). - The shape of the second tensor in the tuple is ``labels`` - with shape (n,), The length of list should always be 1. - """ - return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/atss_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/atss_head.py deleted file mode 100644 index e8f401caa1a83cf6f6b62a642fb1d42c379a4e11..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/atss_head.py +++ /dev/null @@ -1,501 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, Scale -from mmcv.runner import force_fp32 - -from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, - images_to_levels, multi_apply, reduce_mean, unmap) -from ..builder import HEADS, build_loss -from .anchor_head import AnchorHead - - -@HEADS.register_module() -class ATSSHead(AnchorHead): - """Bridging the Gap Between Anchor-based and Anchor-free Detection via - Adaptive Training Sample Selection. - - ATSS head structure is similar with FCOS, however ATSS use anchor boxes - and assign label by Adaptive Training Sample Selection instead max-iou. - - https://arxiv.org/abs/1912.02424 - """ - - def __init__(self, - num_classes, - in_channels, - pred_kernel_size=3, - stacked_convs=4, - conv_cfg=None, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - reg_decoded_bbox=True, - loss_centerness=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='atss_cls', - std=0.01, - bias_prob=0.01)), - **kwargs): - self.pred_kernel_size = pred_kernel_size - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - super(ATSSHead, self).__init__( - num_classes, - in_channels, - reg_decoded_bbox=reg_decoded_bbox, - init_cfg=init_cfg, - **kwargs) - - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # SSD sampling=False so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.loss_centerness = build_loss(loss_centerness) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - pred_pad_size = self.pred_kernel_size // 2 - self.atss_cls = nn.Conv2d( - self.feat_channels, - self.num_anchors * self.cls_out_channels, - self.pred_kernel_size, - padding=pred_pad_size) - self.atss_reg = nn.Conv2d( - self.feat_channels, - self.num_base_priors * 4, - self.pred_kernel_size, - padding=pred_pad_size) - self.atss_centerness = nn.Conv2d( - self.feat_channels, - self.num_base_priors * 1, - self.pred_kernel_size, - padding=pred_pad_size) - self.scales = nn.ModuleList( - [Scale(1.0) for _ in self.prior_generator.strides]) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * 4. - """ - return multi_apply(self.forward_single, feats, self.scales) - - def forward_single(self, x, scale): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level - the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale - level, the channels number is num_anchors * 4. - centerness (Tensor): Centerness for a single scale level, the - channel number is (N, num_anchors * 1, H, W). - """ - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.atss_cls(cls_feat) - # we just follow atss, not apply exp in bbox_pred - bbox_pred = scale(self.atss_reg(reg_feat)).float() - centerness = self.atss_centerness(reg_feat) - return cls_score, bbox_pred, centerness - - def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels, - label_weights, bbox_targets, num_total_samples): - """Compute loss of a single scale level. - - Args: - cls_score (Tensor): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W). - bbox_pred (Tensor): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W). - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - bbox_targets (Tensor): BBox regression targets of each anchor - weight shape (N, num_total_anchors, 4). - num_total_samples (int): Number os positive samples that is - reduced over all GPUs. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - - anchors = anchors.reshape(-1, 4) - cls_score = cls_score.permute(0, 2, 3, 1).reshape( - -1, self.cls_out_channels).contiguous() - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - centerness = centerness.permute(0, 2, 3, 1).reshape(-1) - bbox_targets = bbox_targets.reshape(-1, 4) - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - - # classification loss - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - - if len(pos_inds) > 0: - pos_bbox_targets = bbox_targets[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_anchors = anchors[pos_inds] - pos_centerness = centerness[pos_inds] - - centerness_targets = self.centerness_target( - pos_anchors, pos_bbox_targets) - pos_decode_bbox_pred = self.bbox_coder.decode( - pos_anchors, pos_bbox_pred) - - # regression loss - loss_bbox = self.loss_bbox( - pos_decode_bbox_pred, - pos_bbox_targets, - weight=centerness_targets, - avg_factor=1.0) - - # centerness loss - loss_centerness = self.loss_centerness( - pos_centerness, - centerness_targets, - avg_factor=num_total_samples) - - else: - loss_bbox = bbox_pred.sum() * 0 - loss_centerness = centerness.sum() * 0 - centerness_targets = bbox_targets.new_tensor(0.) - - return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) - def loss(self, - cls_scores, - bbox_preds, - centernesses, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - centernesses (list[Tensor]): Centerness for each scale - level with shape (N, num_anchors * 1, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets - - num_total_samples = reduce_mean( - torch.tensor(num_total_pos, dtype=torch.float, - device=device)).item() - num_total_samples = max(num_total_samples, 1.0) - - losses_cls, losses_bbox, loss_centerness,\ - bbox_avg_factor = multi_apply( - self.loss_single, - anchor_list, - cls_scores, - bbox_preds, - centernesses, - labels_list, - label_weights_list, - bbox_targets_list, - num_total_samples=num_total_samples) - - bbox_avg_factor = sum(bbox_avg_factor) - bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() - losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) - return dict( - loss_cls=losses_cls, - loss_bbox=losses_bbox, - loss_centerness=loss_centerness) - - def centerness_target(self, anchors, gts): - # only calculate pos centerness targets, otherwise there may be nan - anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 - anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 - l_ = anchors_cx - gts[:, 0] - t_ = anchors_cy - gts[:, 1] - r_ = gts[:, 2] - anchors_cx - b_ = gts[:, 3] - anchors_cy - - left_right = torch.stack([l_, r_], dim=1) - top_bottom = torch.stack([t_, b_], dim=1) - centerness = torch.sqrt( - (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * - (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) - assert not torch.isnan(centerness).any() - return centerness - - def get_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """Get targets for ATSS head. - - This method is almost the same as `AnchorHead.get_targets()`. Besides - returning the targets as the parent method does, it also returns the - anchors as the first element of the returned tuple. - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - num_level_anchors_list = [num_level_anchors] * num_imgs - - # concat all level anchors and flags to a single tensor - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - anchor_list[i] = torch.cat(anchor_list[i]) - valid_flag_list[i] = torch.cat(valid_flag_list[i]) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - (all_anchors, all_labels, all_label_weights, all_bbox_targets, - all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, - anchor_list, - valid_flag_list, - num_level_anchors_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - anchors_list = images_to_levels(all_anchors, num_level_anchors) - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors) - return (anchors_list, labels_list, label_weights_list, - bbox_targets_list, bbox_weights_list, num_total_pos, - num_total_neg) - - def _get_target_single(self, - flat_anchors, - valid_flags, - num_level_anchors, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression, classification targets for anchors in a single - image. - - Args: - flat_anchors (Tensor): Multi-level anchors of the image, which are - concatenated into a single tensor of shape (num_anchors ,4) - valid_flags (Tensor): Multi level valid flags of the image, - which are concatenated into a single tensor of - shape (num_anchors,). - num_level_anchors Tensor): Number of anchors of each scale level. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - img_meta (dict): Meta info of the image. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: N is the number of total anchors in the image. - labels (Tensor): Labels of all anchors in the image with shape - (N,). - label_weights (Tensor): Label weights of all anchor in the - image with shape (N,). - bbox_targets (Tensor): BBox targets of all anchors in the - image with shape (N, 4). - bbox_weights (Tensor): BBox weights of all anchors in the - image with shape (N, 4) - pos_inds (Tensor): Indices of positive anchor with shape - (num_pos,). - neg_inds (Tensor): Indices of negative anchor with shape - (num_neg,). - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 7 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - - num_level_anchors_inside = self.get_num_level_anchors_inside( - num_level_anchors, inside_flags) - assign_result = self.assigner.assign(anchors, num_level_anchors_inside, - gt_bboxes, gt_bboxes_ignore, - gt_labels) - - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if self.reg_decoded_bbox: - pos_bbox_targets = sampling_result.pos_gt_bboxes - else: - pos_bbox_targets = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class since v2.5.0 - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - anchors = unmap(anchors, num_total_anchors, inside_flags) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - - return (anchors, labels, label_weights, bbox_targets, bbox_weights, - pos_inds, neg_inds) - - def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): - split_inside_flags = torch.split(inside_flags, num_level_anchors) - num_level_anchors_inside = [ - int(flags.sum()) for flags in split_inside_flags - ] - return num_level_anchors_inside diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/autoassign_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/autoassign_head.py deleted file mode 100644 index 446da244b9e78a4e64d8633477600ad6d732e327..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/autoassign_head.py +++ /dev/null @@ -1,527 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import bias_init_with_prob, normal_init -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply -from mmdet.core.anchor.point_generator import MlvlPointGenerator -from mmdet.core.bbox import bbox_overlaps -from mmdet.models import HEADS -from mmdet.models.dense_heads.atss_head import reduce_mean -from mmdet.models.dense_heads.fcos_head import FCOSHead -from mmdet.models.dense_heads.paa_head import levels_to_images - -EPS = 1e-12 - - -class CenterPrior(nn.Module): - """Center Weighting module to adjust the category-specific prior - distributions. - - Args: - force_topk (bool): When no point falls into gt_bbox, forcibly - select the k points closest to the center to calculate - the center prior. Defaults to False. - topk (int): The number of points used to calculate the - center prior when no point falls in gt_bbox. Only work when - force_topk if True. Defaults to 9. - num_classes (int): The class number of dataset. Defaults to 80. - strides (tuple[int]): The stride of each input feature map. Defaults - to (8, 16, 32, 64, 128). - """ - - def __init__(self, - force_topk=False, - topk=9, - num_classes=80, - strides=(8, 16, 32, 64, 128)): - super(CenterPrior, self).__init__() - self.mean = nn.Parameter(torch.zeros(num_classes, 2)) - self.sigma = nn.Parameter(torch.ones(num_classes, 2)) - self.strides = strides - self.force_topk = force_topk - self.topk = topk - - def forward(self, anchor_points_list, gt_bboxes, labels, - inside_gt_bbox_mask): - """Get the center prior of each point on the feature map for each - instance. - - Args: - anchor_points_list (list[Tensor]): list of coordinate - of points on feature map. Each with shape - (num_points, 2). - gt_bboxes (Tensor): The gt_bboxes with shape of - (num_gt, 4). - labels (Tensor): The gt_labels with shape of (num_gt). - inside_gt_bbox_mask (Tensor): Tensor of bool type, - with shape of (num_points, num_gt), each - value is used to mark whether this point falls - within a certain gt. - - Returns: - tuple(Tensor): - - - center_prior_weights(Tensor): Float tensor with shape \ - of (num_points, num_gt). Each value represents \ - the center weighting coefficient. - - inside_gt_bbox_mask (Tensor): Tensor of bool type, \ - with shape of (num_points, num_gt), each \ - value is used to mark whether this point falls \ - within a certain gt or is the topk nearest points for \ - a specific gt_bbox. - """ - inside_gt_bbox_mask = inside_gt_bbox_mask.clone() - num_gts = len(labels) - num_points = sum([len(item) for item in anchor_points_list]) - if num_gts == 0: - return gt_bboxes.new_zeros(num_points, - num_gts), inside_gt_bbox_mask - center_prior_list = [] - for slvl_points, stride in zip(anchor_points_list, self.strides): - # slvl_points: points from single level in FPN, has shape (h*w, 2) - # single_level_points has shape (h*w, num_gt, 2) - single_level_points = slvl_points[:, None, :].expand( - (slvl_points.size(0), len(gt_bboxes), 2)) - gt_center_x = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2) - gt_center_y = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2) - gt_center = torch.stack((gt_center_x, gt_center_y), dim=1) - gt_center = gt_center[None] - # instance_center has shape (1, num_gt, 2) - instance_center = self.mean[labels][None] - # instance_sigma has shape (1, num_gt, 2) - instance_sigma = self.sigma[labels][None] - # distance has shape (num_points, num_gt, 2) - distance = (((single_level_points - gt_center) / float(stride) - - instance_center)**2) - center_prior = torch.exp(-distance / - (2 * instance_sigma**2)).prod(dim=-1) - center_prior_list.append(center_prior) - center_prior_weights = torch.cat(center_prior_list, dim=0) - - if self.force_topk: - gt_inds_no_points_inside = torch.nonzero( - inside_gt_bbox_mask.sum(0) == 0).reshape(-1) - if gt_inds_no_points_inside.numel(): - topk_center_index = \ - center_prior_weights[:, gt_inds_no_points_inside].topk( - self.topk, - dim=0)[1] - temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside] - inside_gt_bbox_mask[:, gt_inds_no_points_inside] = \ - torch.scatter(temp_mask, - dim=0, - index=topk_center_index, - src=torch.ones_like( - topk_center_index, - dtype=torch.bool)) - - center_prior_weights[~inside_gt_bbox_mask] = 0 - return center_prior_weights, inside_gt_bbox_mask - - -@HEADS.register_module() -class AutoAssignHead(FCOSHead): - """AutoAssignHead head used in AutoAssign. - - More details can be found in the `paper - `_ . - - Args: - force_topk (bool): Used in center prior initialization to - handle extremely small gt. Default is False. - topk (int): The number of points used to calculate the - center prior when no point falls in gt_bbox. Only work when - force_topk if True. Defaults to 9. - pos_loss_weight (float): The loss weight of positive loss - and with default value 0.25. - neg_loss_weight (float): The loss weight of negative loss - and with default value 0.75. - center_loss_weight (float): The loss weight of center prior - loss and with default value 0.75. - """ - - def __init__(self, - *args, - force_topk=False, - topk=9, - pos_loss_weight=0.25, - neg_loss_weight=0.75, - center_loss_weight=0.75, - **kwargs): - super().__init__(*args, conv_bias=True, **kwargs) - self.center_prior = CenterPrior( - force_topk=force_topk, - topk=topk, - num_classes=self.num_classes, - strides=self.strides) - self.pos_loss_weight = pos_loss_weight - self.neg_loss_weight = neg_loss_weight - self.center_loss_weight = center_loss_weight - self.prior_generator = MlvlPointGenerator(self.strides, offset=0) - - def init_weights(self): - """Initialize weights of the head. - - In particular, we have special initialization for classified conv's and - regression conv's bias - """ - - super(AutoAssignHead, self).init_weights() - bias_cls = bias_init_with_prob(0.02) - normal_init(self.conv_cls, std=0.01, bias=bias_cls) - normal_init(self.conv_reg, std=0.01, bias=4.0) - - def forward_single(self, x, scale, stride): - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - stride (int): The corresponding stride for feature maps, only - used to normalize the bbox prediction when self.norm_on_bbox - is True. - - Returns: - tuple: scores for each class, bbox predictions and centerness \ - predictions of input feature maps. - """ - cls_score, bbox_pred, cls_feat, reg_feat = super( - FCOSHead, self).forward_single(x) - centerness = self.conv_centerness(reg_feat) - # scale the bbox_pred of different level - # float to avoid overflow when enabling FP16 - bbox_pred = scale(bbox_pred).float() - # bbox_pred needed for gradient computation has been modified - # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace - # F.relu(bbox_pred) with bbox_pred.clamp(min=0) - bbox_pred = bbox_pred.clamp(min=0) - bbox_pred *= stride - return cls_score, bbox_pred, centerness - - def get_pos_loss_single(self, cls_score, objectness, reg_loss, gt_labels, - center_prior_weights): - """Calculate the positive loss of all points in gt_bboxes. - - Args: - cls_score (Tensor): All category scores for each point on - the feature map. The shape is (num_points, num_class). - objectness (Tensor): Foreground probability of all points, - has shape (num_points, 1). - reg_loss (Tensor): The regression loss of each gt_bbox and each - prediction box, has shape of (num_points, num_gt). - gt_labels (Tensor): The zeros based gt_labels of all gt - with shape of (num_gt,). - center_prior_weights (Tensor): Float tensor with shape - of (num_points, num_gt). Each value represents - the center weighting coefficient. - - Returns: - tuple[Tensor]: - - - pos_loss (Tensor): The positive loss of all points - in the gt_bboxes. - """ - # p_loc: localization confidence - p_loc = torch.exp(-reg_loss) - # p_cls: classification confidence - p_cls = (cls_score * objectness)[:, gt_labels] - # p_pos: joint confidence indicator - p_pos = p_cls * p_loc - - # 3 is a hyper-parameter to control the contributions of high and - # low confidence locations towards positive losses. - confidence_weight = torch.exp(p_pos * 3) - p_pos_weight = (confidence_weight * center_prior_weights) / ( - (confidence_weight * center_prior_weights).sum( - 0, keepdim=True)).clamp(min=EPS) - reweighted_p_pos = (p_pos * p_pos_weight).sum(0) - pos_loss = F.binary_cross_entropy( - reweighted_p_pos, - torch.ones_like(reweighted_p_pos), - reduction='none') - pos_loss = pos_loss.sum() * self.pos_loss_weight - return pos_loss, - - def get_neg_loss_single(self, cls_score, objectness, gt_labels, ious, - inside_gt_bbox_mask): - """Calculate the negative loss of all points in feature map. - - Args: - cls_score (Tensor): All category scores for each point on - the feature map. The shape is (num_points, num_class). - objectness (Tensor): Foreground probability of all points - and is shape of (num_points, 1). - gt_labels (Tensor): The zeros based label of all gt with shape of - (num_gt). - ious (Tensor): Float tensor with shape of (num_points, num_gt). - Each value represent the iou of pred_bbox and gt_bboxes. - inside_gt_bbox_mask (Tensor): Tensor of bool type, - with shape of (num_points, num_gt), each - value is used to mark whether this point falls - within a certain gt. - - Returns: - tuple[Tensor]: - - - neg_loss (Tensor): The negative loss of all points - in the feature map. - """ - num_gts = len(gt_labels) - joint_conf = (cls_score * objectness) - p_neg_weight = torch.ones_like(joint_conf) - if num_gts > 0: - # the order of dinmension would affect the value of - # p_neg_weight, we strictly follow the original - # implementation. - inside_gt_bbox_mask = inside_gt_bbox_mask.permute(1, 0) - ious = ious.permute(1, 0) - - foreground_idxs = torch.nonzero(inside_gt_bbox_mask, as_tuple=True) - temp_weight = (1 / (1 - ious[foreground_idxs]).clamp_(EPS)) - - def normalize(x): - return (x - x.min() + EPS) / (x.max() - x.min() + EPS) - - for instance_idx in range(num_gts): - idxs = foreground_idxs[0] == instance_idx - if idxs.any(): - temp_weight[idxs] = normalize(temp_weight[idxs]) - - p_neg_weight[foreground_idxs[1], - gt_labels[foreground_idxs[0]]] = 1 - temp_weight - - logits = (joint_conf * p_neg_weight) - neg_loss = ( - logits**2 * F.binary_cross_entropy( - logits, torch.zeros_like(logits), reduction='none')) - neg_loss = neg_loss.sum() * self.neg_loss_weight - return neg_loss, - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) - def loss(self, - cls_scores, - bbox_preds, - objectnesses, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - objectnesses (list[Tensor]): objectness for each scale level, each - is a 4D-tensor, the channel number is num_points * 1. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - - assert len(cls_scores) == len(bbox_preds) == len(objectnesses) - all_num_gt = sum([len(item) for item in gt_bboxes]) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - all_level_points = self.prior_generator.grid_priors( - featmap_sizes, - dtype=bbox_preds[0].dtype, - device=bbox_preds[0].device) - inside_gt_bbox_mask_list, bbox_targets_list = self.get_targets( - all_level_points, gt_bboxes) - - center_prior_weight_list = [] - temp_inside_gt_bbox_mask_list = [] - for gt_bboxe, gt_label, inside_gt_bbox_mask in zip( - gt_bboxes, gt_labels, inside_gt_bbox_mask_list): - center_prior_weight, inside_gt_bbox_mask = \ - self.center_prior(all_level_points, gt_bboxe, gt_label, - inside_gt_bbox_mask) - center_prior_weight_list.append(center_prior_weight) - temp_inside_gt_bbox_mask_list.append(inside_gt_bbox_mask) - inside_gt_bbox_mask_list = temp_inside_gt_bbox_mask_list - mlvl_points = torch.cat(all_level_points, dim=0) - bbox_preds = levels_to_images(bbox_preds) - cls_scores = levels_to_images(cls_scores) - objectnesses = levels_to_images(objectnesses) - - reg_loss_list = [] - ious_list = [] - num_points = len(mlvl_points) - - for bbox_pred, encoded_targets, inside_gt_bbox_mask in zip( - bbox_preds, bbox_targets_list, inside_gt_bbox_mask_list): - temp_num_gt = encoded_targets.size(1) - expand_mlvl_points = mlvl_points[:, None, :].expand( - num_points, temp_num_gt, 2).reshape(-1, 2) - encoded_targets = encoded_targets.reshape(-1, 4) - expand_bbox_pred = bbox_pred[:, None, :].expand( - num_points, temp_num_gt, 4).reshape(-1, 4) - decoded_bbox_preds = self.bbox_coder.decode( - expand_mlvl_points, expand_bbox_pred) - decoded_target_preds = self.bbox_coder.decode( - expand_mlvl_points, encoded_targets) - with torch.no_grad(): - ious = bbox_overlaps( - decoded_bbox_preds, decoded_target_preds, is_aligned=True) - ious = ious.reshape(num_points, temp_num_gt) - if temp_num_gt: - ious = ious.max( - dim=-1, keepdim=True).values.repeat(1, temp_num_gt) - else: - ious = ious.new_zeros(num_points, temp_num_gt) - ious[~inside_gt_bbox_mask] = 0 - ious_list.append(ious) - loss_bbox = self.loss_bbox( - decoded_bbox_preds, - decoded_target_preds, - weight=None, - reduction_override='none') - reg_loss_list.append(loss_bbox.reshape(num_points, temp_num_gt)) - - cls_scores = [item.sigmoid() for item in cls_scores] - objectnesses = [item.sigmoid() for item in objectnesses] - pos_loss_list, = multi_apply(self.get_pos_loss_single, cls_scores, - objectnesses, reg_loss_list, gt_labels, - center_prior_weight_list) - pos_avg_factor = reduce_mean( - bbox_pred.new_tensor(all_num_gt)).clamp_(min=1) - pos_loss = sum(pos_loss_list) / pos_avg_factor - - neg_loss_list, = multi_apply(self.get_neg_loss_single, cls_scores, - objectnesses, gt_labels, ious_list, - inside_gt_bbox_mask_list) - neg_avg_factor = sum(item.data.sum() - for item in center_prior_weight_list) - neg_avg_factor = reduce_mean(neg_avg_factor).clamp_(min=1) - neg_loss = sum(neg_loss_list) / neg_avg_factor - - center_loss = [] - for i in range(len(img_metas)): - - if inside_gt_bbox_mask_list[i].any(): - center_loss.append( - len(gt_bboxes[i]) / - center_prior_weight_list[i].sum().clamp_(min=EPS)) - # when width or height of gt_bbox is smaller than stride of p3 - else: - center_loss.append(center_prior_weight_list[i].sum() * 0) - - center_loss = torch.stack(center_loss).mean() * self.center_loss_weight - - # avoid dead lock in DDP - if all_num_gt == 0: - pos_loss = bbox_preds[0].sum() * 0 - dummy_center_prior_loss = self.center_prior.mean.sum( - ) * 0 + self.center_prior.sigma.sum() * 0 - center_loss = objectnesses[0].sum() * 0 + dummy_center_prior_loss - - loss = dict( - loss_pos=pos_loss, loss_neg=neg_loss, loss_center=center_loss) - - return loss - - def get_targets(self, points, gt_bboxes_list): - """Compute regression targets and each point inside or outside gt_bbox - in multiple images. - - Args: - points (list[Tensor]): Points of all fpn level, each has shape - (num_points, 2). - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - - Returns: - tuple(list[Tensor]): - - - inside_gt_bbox_mask_list (list[Tensor]): Each - Tensor is with bool type and shape of - (num_points, num_gt), each value - is used to mark whether this point falls - within a certain gt. - - concat_lvl_bbox_targets (list[Tensor]): BBox - targets of each level. Each tensor has shape - (num_points, num_gt, 4). - """ - - concat_points = torch.cat(points, dim=0) - # the number of points per img, per lvl - inside_gt_bbox_mask_list, bbox_targets_list = multi_apply( - self._get_target_single, gt_bboxes_list, points=concat_points) - return inside_gt_bbox_mask_list, bbox_targets_list - - def _get_target_single(self, gt_bboxes, points): - """Compute regression targets and each point inside or outside gt_bbox - for a single image. - - Args: - gt_bboxes (Tensor): gt_bbox of single image, has shape - (num_gt, 4). - points (Tensor): Points of all fpn level, has shape - (num_points, 2). - - Returns: - tuple[Tensor]: Containing the following Tensors: - - - inside_gt_bbox_mask (Tensor): Bool tensor with shape - (num_points, num_gt), each value is used to mark - whether this point falls within a certain gt. - - bbox_targets (Tensor): BBox targets of each points with - each gt_bboxes, has shape (num_points, num_gt, 4). - """ - num_points = points.size(0) - num_gts = gt_bboxes.size(0) - gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) - xs, ys = points[:, 0], points[:, 1] - xs = xs[:, None] - ys = ys[:, None] - left = xs - gt_bboxes[..., 0] - right = gt_bboxes[..., 2] - xs - top = ys - gt_bboxes[..., 1] - bottom = gt_bboxes[..., 3] - ys - bbox_targets = torch.stack((left, top, right, bottom), -1) - if num_gts: - inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 - else: - inside_gt_bbox_mask = bbox_targets.new_zeros((num_points, num_gts), - dtype=torch.bool) - - return inside_gt_bbox_mask, bbox_targets - - def _get_points_single(self, - featmap_size, - stride, - dtype, - device, - flatten=False): - """Almost the same as the implementation in fcos, we remove half stride - offset to align with the original implementation. - - This function will be deprecated soon. - """ - warnings.warn( - '`_get_points_single` in `AutoAssignHead` will be ' - 'deprecated soon, we support a multi level point generator now' - 'you can get points of a single level feature map ' - 'with `self.prior_generator.single_level_grid_priors` ') - y, x = super(FCOSHead, - self)._get_points_single(featmap_size, stride, dtype, - device) - points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), - dim=-1) - return points diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/base_dense_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/base_dense_head.py deleted file mode 100644 index 0c7abb7b9b83f034afe06482a659b39ac1d63139..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/base_dense_head.py +++ /dev/null @@ -1,526 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -import torch -from mmcv.cnn.utils.weight_init import constant_init -from mmcv.ops import batched_nms -from mmcv.runner import BaseModule, force_fp32 - -from mmdet.core.utils import filter_scores_and_topk, select_single_mlvl - - -class BaseDenseHead(BaseModule, metaclass=ABCMeta): - """Base class for DenseHeads.""" - - def __init__(self, init_cfg=None): - super(BaseDenseHead, self).__init__(init_cfg) - - def init_weights(self): - super(BaseDenseHead, self).init_weights() - # avoid init_cfg overwrite the initialization of `conv_offset` - for m in self.modules(): - # DeformConv2dPack, ModulatedDeformConv2dPack - if hasattr(m, 'conv_offset'): - constant_init(m.conv_offset, 0) - - @abstractmethod - def loss(self, **kwargs): - """Compute losses of the head.""" - pass - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - score_factors=None, - img_metas=None, - cfg=None, - rescale=False, - with_nms=True, - **kwargs): - """Transform network outputs of a batch into bbox results. - - Note: When score_factors is not None, the cls_scores are - usually multiplied by it then obtain the real score used in NMS, - such as CenterNess in FCOS, IoU branch in ATSS. - - Args: - cls_scores (list[Tensor]): Classification scores for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * 4, H, W). - score_factors (list[Tensor], Optional): Score factor for - all scale level, each is a 4D-tensor, has shape - (batch_size, num_priors * 1, H, W). Default None. - img_metas (list[dict], Optional): Image meta info. Default None. - cfg (mmcv.Config, Optional): Test / postprocessing configuration, - if None, test_cfg would be used. Default None. - rescale (bool): If True, return boxes in original image space. - Default False. - with_nms (bool): If True, do nms before return boxes. - Default True. - - Returns: - list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. The second item is a - (n,) tensor where each item is the predicted class label of - the corresponding box. - """ - assert len(cls_scores) == len(bbox_preds) - - if score_factors is None: - # e.g. Retina, FreeAnchor, Foveabox, etc. - with_score_factors = False - else: - # e.g. FCOS, PAA, ATSS, AutoAssign, etc. - with_score_factors = True - assert len(cls_scores) == len(score_factors) - - num_levels = len(cls_scores) - - featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] - mlvl_priors = self.prior_generator.grid_priors( - featmap_sizes, - dtype=cls_scores[0].dtype, - device=cls_scores[0].device) - - result_list = [] - - for img_id in range(len(img_metas)): - img_meta = img_metas[img_id] - cls_score_list = select_single_mlvl(cls_scores, img_id) - bbox_pred_list = select_single_mlvl(bbox_preds, img_id) - if with_score_factors: - score_factor_list = select_single_mlvl(score_factors, img_id) - else: - score_factor_list = [None for _ in range(num_levels)] - - results = self._get_bboxes_single(cls_score_list, bbox_pred_list, - score_factor_list, mlvl_priors, - img_meta, cfg, rescale, with_nms, - **kwargs) - result_list.append(results) - return result_list - - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - score_factor_list, - mlvl_priors, - img_meta, - cfg, - rescale=False, - with_nms=True, - **kwargs): - """Transform outputs of a single image into bbox predictions. - - Args: - cls_score_list (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_priors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has shape - (num_priors * 4, H, W). - score_factor_list (list[Tensor]): Score factor from all scale - levels of a single image, each item has shape - (num_priors * 1, H, W). - mlvl_priors (list[Tensor]): Each element in the list is - the priors of a single level in feature pyramid. In all - anchor-based methods, it has shape (num_priors, 4). In - all anchor-free methods, it has shape (num_priors, 2) - when `with_stride=True`, otherwise it still has shape - (num_priors, 4). - img_meta (dict): Image meta info. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape \ - [num_bboxes, 5], where the first 4 columns are bounding \ - box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding \ - box with shape [num_bboxes]. - """ - if score_factor_list[0] is None: - # e.g. Retina, FreeAnchor, etc. - with_score_factors = False - else: - # e.g. FCOS, PAA, ATSS, etc. - with_score_factors = True - - cfg = self.test_cfg if cfg is None else cfg - img_shape = img_meta['img_shape'] - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_labels = [] - if with_score_factors: - mlvl_score_factors = [] - else: - mlvl_score_factors = None - for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ - enumerate(zip(cls_score_list, bbox_pred_list, - score_factor_list, mlvl_priors)): - - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - if with_score_factors: - score_factor = score_factor.permute(1, 2, - 0).reshape(-1).sigmoid() - cls_score = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - scores = cls_score.softmax(-1)[:, :-1] - - # After https://github.com/open-mmlab/mmdetection/pull/6268/, - # this operation keeps fewer bboxes under the same `nms_pre`. - # There is no difference in performance for most models. If you - # find a slight drop in performance, you can set a larger - # `nms_pre` than before. - results = filter_scores_and_topk( - scores, cfg.score_thr, nms_pre, - dict(bbox_pred=bbox_pred, priors=priors)) - scores, labels, keep_idxs, filtered_results = results - - bbox_pred = filtered_results['bbox_pred'] - priors = filtered_results['priors'] - - if with_score_factors: - score_factor = score_factor[keep_idxs] - - bboxes = self.bbox_coder.decode( - priors, bbox_pred, max_shape=img_shape) - - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_labels.append(labels) - if with_score_factors: - mlvl_score_factors.append(score_factor) - - return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, - img_meta['scale_factor'], cfg, rescale, - with_nms, mlvl_score_factors, **kwargs) - - def _bbox_post_process(self, - mlvl_scores, - mlvl_labels, - mlvl_bboxes, - scale_factor, - cfg, - rescale=False, - with_nms=True, - mlvl_score_factors=None, - **kwargs): - """bbox post-processing method. - - The boxes would be rescaled to the original image scale and do - the nms operation. Usually `with_nms` is False is used for aug test. - - Args: - mlvl_scores (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_bboxes, ). - mlvl_labels (list[Tensor]): Box class labels from all scale - levels of a single image, each item has shape - (num_bboxes, ). - mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale - levels of a single image, each item has shape (num_bboxes, 4). - scale_factor (ndarray, optional): Scale factor of the image arange - as (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - mlvl_score_factors (list[Tensor], optional): Score factor from - all scale levels of a single image, each item has shape - (num_bboxes, ). Default: None. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape \ - [num_bboxes, 5], where the first 4 columns are bounding \ - box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding \ - box with shape [num_bboxes]. - """ - assert len(mlvl_scores) == len(mlvl_bboxes) == len(mlvl_labels) - - mlvl_bboxes = torch.cat(mlvl_bboxes) - if rescale: - mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) - mlvl_scores = torch.cat(mlvl_scores) - mlvl_labels = torch.cat(mlvl_labels) - - if mlvl_score_factors is not None: - # TODO: Add sqrt operation in order to be consistent with - # the paper. - mlvl_score_factors = torch.cat(mlvl_score_factors) - mlvl_scores = mlvl_scores * mlvl_score_factors - - if with_nms: - if mlvl_bboxes.numel() == 0: - det_bboxes = torch.cat([mlvl_bboxes, mlvl_scores[:, None]], -1) - return det_bboxes, mlvl_labels - - det_bboxes, keep_idxs = batched_nms(mlvl_bboxes, mlvl_scores, - mlvl_labels, cfg.nms) - det_bboxes = det_bboxes[:cfg.max_per_img] - det_labels = mlvl_labels[keep_idxs][:cfg.max_per_img] - return det_bboxes, det_labels - else: - return mlvl_bboxes, mlvl_scores, mlvl_labels - - def forward_train(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None, - **kwargs): - """ - Args: - x (list[Tensor]): Features from FPN. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - proposal_cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used - - Returns: - tuple: - losses: (dict[str, Tensor]): A dictionary of loss components. - proposal_list (list[Tensor]): Proposals of each image. - """ - outs = self(x) - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, img_metas) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) - losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - if proposal_cfg is None: - return losses - else: - proposal_list = self.get_bboxes( - *outs, img_metas=img_metas, cfg=proposal_cfg) - return losses, proposal_list - - def simple_test(self, feats, img_metas, rescale=False): - """Test function without test-time augmentation. - - Args: - feats (tuple[torch.Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is ``bboxes`` with shape (n, 5), - where 5 represent (tl_x, tl_y, br_x, br_y, score). - The shape of the second tensor in the tuple is ``labels`` - with shape (n, ). - """ - return self.simple_test_bboxes(feats, img_metas, rescale=rescale) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def onnx_export(self, - cls_scores, - bbox_preds, - score_factors=None, - img_metas=None, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - with shape (N, num_points * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_points * 4, H, W). - score_factors (list[Tensor]): score_factors for each s - cale level with shape (N, num_points * 1, H, W). - Default: None. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. Default: None. - with_nms (bool): Whether apply nms to the bboxes. Default: True. - - Returns: - tuple[Tensor, Tensor] | list[tuple]: When `with_nms` is True, - it is tuple[Tensor, Tensor], first tensor bboxes with shape - [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score) - and second element is class labels of shape [N, num_det]. - When `with_nms` is False, first tensor is bboxes with - shape [N, num_det, 4], second tensor is raw score has - shape [N, num_det, num_classes]. - """ - assert len(cls_scores) == len(bbox_preds) - - num_levels = len(cls_scores) - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - mlvl_priors = self.prior_generator.grid_priors( - featmap_sizes, - dtype=bbox_preds[0].dtype, - device=bbox_preds[0].device) - - mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)] - mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] - - assert len( - img_metas - ) == 1, 'Only support one input image while in exporting to ONNX' - img_shape = img_metas[0]['img_shape_for_onnx'] - - cfg = self.test_cfg - assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors) - device = cls_scores[0].device - batch_size = cls_scores[0].shape[0] - # convert to tensor to keep tracing - nms_pre_tensor = torch.tensor( - cfg.get('nms_pre', -1), device=device, dtype=torch.long) - - # e.g. Retina, FreeAnchor, etc. - if score_factors is None: - with_score_factors = False - mlvl_score_factor = [None for _ in range(num_levels)] - else: - # e.g. FCOS, PAA, ATSS, etc. - with_score_factors = True - mlvl_score_factor = [ - score_factors[i].detach() for i in range(num_levels) - ] - mlvl_score_factors = [] - - mlvl_batch_bboxes = [] - mlvl_scores = [] - - for cls_score, bbox_pred, score_factors, priors in zip( - mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor, - mlvl_priors): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - - scores = cls_score.permute(0, 2, 3, - 1).reshape(batch_size, -1, - self.cls_out_channels) - if self.use_sigmoid_cls: - scores = scores.sigmoid() - nms_pre_score = scores - else: - scores = scores.softmax(-1) - nms_pre_score = scores - - if with_score_factors: - score_factors = score_factors.permute(0, 2, 3, 1).reshape( - batch_size, -1).sigmoid() - bbox_pred = bbox_pred.permute(0, 2, 3, - 1).reshape(batch_size, -1, 4) - priors = priors.expand(batch_size, -1, priors.size(-1)) - # Get top-k predictions - from mmdet.core.export import get_k_for_topk - nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) - if nms_pre > 0: - - if with_score_factors: - nms_pre_score = (nms_pre_score * score_factors[..., None]) - else: - nms_pre_score = nms_pre_score - - # Get maximum scores for foreground classes. - if self.use_sigmoid_cls: - max_scores, _ = nms_pre_score.max(-1) - else: - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - max_scores, _ = nms_pre_score[..., :-1].max(-1) - _, topk_inds = max_scores.topk(nms_pre) - - batch_inds = torch.arange( - batch_size, device=bbox_pred.device).view( - -1, 1).expand_as(topk_inds).long() - # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 - transformed_inds = bbox_pred.shape[1] * batch_inds + topk_inds - priors = priors.reshape( - -1, priors.size(-1))[transformed_inds, :].reshape( - batch_size, -1, priors.size(-1)) - bbox_pred = bbox_pred.reshape(-1, - 4)[transformed_inds, :].reshape( - batch_size, -1, 4) - scores = scores.reshape( - -1, self.cls_out_channels)[transformed_inds, :].reshape( - batch_size, -1, self.cls_out_channels) - if with_score_factors: - score_factors = score_factors.reshape( - -1, 1)[transformed_inds].reshape(batch_size, -1) - - bboxes = self.bbox_coder.decode( - priors, bbox_pred, max_shape=img_shape) - - mlvl_batch_bboxes.append(bboxes) - mlvl_scores.append(scores) - if with_score_factors: - mlvl_score_factors.append(score_factors) - - batch_bboxes = torch.cat(mlvl_batch_bboxes, dim=1) - batch_scores = torch.cat(mlvl_scores, dim=1) - if with_score_factors: - batch_score_factors = torch.cat(mlvl_score_factors, dim=1) - - # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment - - from mmdet.core.export import add_dummy_nms_for_onnx - - if not self.use_sigmoid_cls: - batch_scores = batch_scores[..., :self.num_classes] - - if with_score_factors: - batch_scores = batch_scores * (batch_score_factors.unsqueeze(2)) - - if with_nms: - max_output_boxes_per_class = cfg.nms.get( - 'max_output_boxes_per_class', 200) - iou_threshold = cfg.nms.get('iou_threshold', 0.5) - score_threshold = cfg.score_thr - nms_pre = cfg.get('deploy_nms_pre', -1) - return add_dummy_nms_for_onnx(batch_bboxes, batch_scores, - max_output_boxes_per_class, - iou_threshold, score_threshold, - nms_pre, cfg.max_per_img) - else: - return batch_bboxes, batch_scores diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/base_mask_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/base_mask_head.py deleted file mode 100644 index 5eb94fb287e223888c0181f1debae0d84b306bf2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/base_mask_head.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -from mmcv.runner import BaseModule - - -class BaseMaskHead(BaseModule, metaclass=ABCMeta): - """Base class for mask heads used in One-Stage Instance Segmentation.""" - - def __init__(self, init_cfg): - super(BaseMaskHead, self).__init__(init_cfg) - - @abstractmethod - def loss(self, **kwargs): - pass - - @abstractmethod - def get_results(self, **kwargs): - """Get precessed :obj:`InstanceData` of multiple images.""" - pass - - def forward_train(self, - x, - gt_labels, - gt_masks, - img_metas, - gt_bboxes=None, - gt_bboxes_ignore=None, - positive_infos=None, - **kwargs): - """ - Args: - x (list[Tensor] | tuple[Tensor]): Features from FPN. - Each has a shape (B, C, H, W). - gt_labels (list[Tensor]): Ground truth labels of all images. - each has a shape (num_gts,). - gt_masks (list[Tensor]) : Masks for each bbox, has a shape - (num_gts, h , w). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (list[Tensor]): Ground truth bboxes of the image, - each item has a shape (num_gts, 4). - gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be - ignored, each item has a shape (num_ignored_gts, 4). - positive_infos (list[:obj:`InstanceData`], optional): Information - of positive samples. Used when the label assignment is - done outside the MaskHead, e.g., in BboxHead in - YOLACT or CondInst, etc. When the label assignment is done in - MaskHead, it would be None, like SOLO. All values - in it should have shape (num_positive_samples, *). - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - if positive_infos is None: - outs = self(x) - else: - outs = self(x, positive_infos) - - assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \ - 'even if only one item is returned' - loss = self.loss( - *outs, - gt_labels=gt_labels, - gt_masks=gt_masks, - img_metas=img_metas, - gt_bboxes=gt_bboxes, - gt_bboxes_ignore=gt_bboxes_ignore, - positive_infos=positive_infos, - **kwargs) - return loss - - def simple_test(self, - feats, - img_metas, - rescale=False, - instances_list=None, - **kwargs): - """Test function without test-time augmentation. - - Args: - feats (tuple[torch.Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - instances_list (list[obj:`InstanceData`], optional): Detection - results of each image after the post process. Only exist - if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc. - - Returns: - list[obj:`InstanceData`]: Instance segmentation \ - results of each image after the post process. \ - Each item usually contains following keys. \ - - - scores (Tensor): Classification scores, has a shape - (num_instance,) - - labels (Tensor): Has a shape (num_instances,). - - masks (Tensor): Processed mask results, has a - shape (num_instances, h, w). - """ - if instances_list is None: - outs = self(feats) - else: - outs = self(feats, instances_list=instances_list) - mask_inputs = outs + (img_metas, ) - results_list = self.get_results( - *mask_inputs, - rescale=rescale, - instances_list=instances_list, - **kwargs) - return results_list - - def onnx_export(self, img, img_metas): - raise NotImplementedError(f'{self.__class__.__name__} does ' - f'not support ONNX EXPORT') diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/cascade_rpn_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/cascade_rpn_head.py deleted file mode 100644 index 69347e00c436430b57413a81cb5cb49bb52f1841..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/cascade_rpn_head.py +++ /dev/null @@ -1,801 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from __future__ import division -import copy -import warnings - -import torch -import torch.nn as nn -from mmcv import ConfigDict -from mmcv.ops import DeformConv2d, batched_nms -from mmcv.runner import BaseModule, ModuleList - -from mmdet.core import (RegionAssigner, build_assigner, build_sampler, - images_to_levels, multi_apply) -from mmdet.core.utils import select_single_mlvl -from ..builder import HEADS, build_head -from .base_dense_head import BaseDenseHead -from .rpn_head import RPNHead - - -class AdaptiveConv(BaseModule): - """AdaptiveConv used to adapt the sampling location with the anchors. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the conv kernel. Default: 3 - stride (int or tuple, optional): Stride of the convolution. Default: 1 - padding (int or tuple, optional): Zero-padding added to both sides of - the input. Default: 1 - dilation (int or tuple, optional): Spacing between kernel elements. - Default: 3 - groups (int, optional): Number of blocked connections from input - channels to output channels. Default: 1 - bias (bool, optional): If set True, adds a learnable bias to the - output. Default: False. - type (str, optional): Type of adaptive conv, can be either 'offset' - (arbitrary anchors) or 'dilation' (uniform anchor). - Default: 'dilation'. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - dilation=3, - groups=1, - bias=False, - type='dilation', - init_cfg=dict( - type='Normal', std=0.01, override=dict(name='conv'))): - super(AdaptiveConv, self).__init__(init_cfg) - assert type in ['offset', 'dilation'] - self.adapt_type = type - - assert kernel_size == 3, 'Adaptive conv only supports kernels 3' - if self.adapt_type == 'offset': - assert stride == 1 and padding == 1 and groups == 1, \ - 'Adaptive conv offset mode only supports padding: {1}, ' \ - f'stride: {1}, groups: {1}' - self.conv = DeformConv2d( - in_channels, - out_channels, - kernel_size, - padding=padding, - stride=stride, - groups=groups, - bias=bias) - else: - self.conv = nn.Conv2d( - in_channels, - out_channels, - kernel_size, - padding=dilation, - dilation=dilation) - - def forward(self, x, offset): - """Forward function.""" - if self.adapt_type == 'offset': - N, _, H, W = x.shape - assert offset is not None - assert H * W == offset.shape[1] - # reshape [N, NA, 18] to (N, 18, H, W) - offset = offset.permute(0, 2, 1).reshape(N, -1, H, W) - offset = offset.contiguous() - x = self.conv(x, offset) - else: - assert offset is None - x = self.conv(x) - return x - - -@HEADS.register_module() -class StageCascadeRPNHead(RPNHead): - """Stage of CascadeRPNHead. - - Args: - in_channels (int): Number of channels in the input feature map. - anchor_generator (dict): anchor generator config. - adapt_cfg (dict): adaptation config. - bridged_feature (bool, optional): whether update rpn feature. - Default: False. - with_cls (bool, optional): whether use classification branch. - Default: True. - sampling (bool, optional): whether use sampling. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[1.0], - strides=[4, 8, 16, 32, 64]), - adapt_cfg=dict(type='dilation', dilation=3), - bridged_feature=False, - with_cls=True, - sampling=True, - init_cfg=None, - **kwargs): - self.with_cls = with_cls - self.anchor_strides = anchor_generator['strides'] - self.anchor_scales = anchor_generator['scales'] - self.bridged_feature = bridged_feature - self.adapt_cfg = adapt_cfg - super(StageCascadeRPNHead, self).__init__( - in_channels, - anchor_generator=anchor_generator, - init_cfg=init_cfg, - **kwargs) - - # override sampling and sampler - self.sampling = sampling - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # use PseudoSampler when sampling is False - if self.sampling and hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - if init_cfg is None: - self.init_cfg = dict( - type='Normal', std=0.01, override=[dict(name='rpn_reg')]) - if self.with_cls: - self.init_cfg['override'].append(dict(name='rpn_cls')) - - def _init_layers(self): - """Init layers of a CascadeRPN stage.""" - self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels, - **self.adapt_cfg) - if self.with_cls: - self.rpn_cls = nn.Conv2d(self.feat_channels, - self.num_anchors * self.cls_out_channels, - 1) - self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) - self.relu = nn.ReLU(inplace=True) - - def forward_single(self, x, offset): - """Forward function of single scale.""" - bridged_x = x - x = self.relu(self.rpn_conv(x, offset)) - if self.bridged_feature: - bridged_x = x # update feature - cls_score = self.rpn_cls(x) if self.with_cls else None - bbox_pred = self.rpn_reg(x) - return bridged_x, cls_score, bbox_pred - - def forward(self, feats, offset_list=None): - """Forward function.""" - if offset_list is None: - offset_list = [None for _ in range(len(feats))] - return multi_apply(self.forward_single, feats, offset_list) - - def _region_targets_single(self, - anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - featmap_sizes, - label_channels=1): - """Get anchor targets based on region for single level.""" - assign_result = self.assigner.assign( - anchors, - valid_flags, - gt_bboxes, - img_meta, - featmap_sizes, - self.anchor_scales[0], - self.anchor_strides, - gt_bboxes_ignore=gt_bboxes_ignore, - gt_labels=None, - allowed_border=self.train_cfg.allowed_border) - flat_anchors = torch.cat(anchors) - sampling_result = self.sampler.sample(assign_result, flat_anchors, - gt_bboxes) - - num_anchors = flat_anchors.shape[0] - bbox_targets = torch.zeros_like(flat_anchors) - bbox_weights = torch.zeros_like(flat_anchors) - labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long) - label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - else: - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - if gt_labels is None: - labels[pos_inds] = 1 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds) - - def region_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - featmap_sizes, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """See :func:`StageCascadeRPNHead.get_targets`.""" - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, - pos_inds_list, neg_inds_list) = multi_apply( - self._region_targets_single, - anchor_list, - valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - featmap_sizes=featmap_sizes, - label_channels=label_channels) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors) - return (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - - def get_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - featmap_sizes, - gt_bboxes_ignore=None, - label_channels=1): - """Compute regression and classification targets for anchors. - - Args: - anchor_list (list[list]): Multi level anchors of each image. - valid_flag_list (list[list]): Multi level valid flags of each - image. - gt_bboxes (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - featmap_sizes (list[Tensor]): Feature mapsize each level - gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images - label_channels (int): Channel of label. - - Returns: - cls_reg_targets (tuple) - """ - if isinstance(self.assigner, RegionAssigner): - cls_reg_targets = self.region_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - featmap_sizes, - gt_bboxes_ignore_list=gt_bboxes_ignore, - label_channels=label_channels) - else: - cls_reg_targets = super(StageCascadeRPNHead, self).get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - label_channels=label_channels) - return cls_reg_targets - - def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes): - """ Get offset for deformable conv based on anchor shape - NOTE: currently support deformable kernel_size=3 and dilation=1 - - Args: - anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of - multi-level anchors - anchor_strides (list[int]): anchor stride of each level - - Returns: - offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv - kernel. - """ - - def _shape_offset(anchors, stride, ks=3, dilation=1): - # currently support kernel_size=3 and dilation=1 - assert ks == 3 and dilation == 1 - pad = (ks - 1) // 2 - idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device) - yy, xx = torch.meshgrid(idx, idx) # return order matters - xx = xx.reshape(-1) - yy = yy.reshape(-1) - w = (anchors[:, 2] - anchors[:, 0]) / stride - h = (anchors[:, 3] - anchors[:, 1]) / stride - w = w / (ks - 1) - dilation - h = h / (ks - 1) - dilation - offset_x = w[:, None] * xx # (NA, ks**2) - offset_y = h[:, None] * yy # (NA, ks**2) - return offset_x, offset_y - - def _ctr_offset(anchors, stride, featmap_size): - feat_h, feat_w = featmap_size - assert len(anchors) == feat_h * feat_w - - x = (anchors[:, 0] + anchors[:, 2]) * 0.5 - y = (anchors[:, 1] + anchors[:, 3]) * 0.5 - # compute centers on feature map - x = x / stride - y = y / stride - # compute predefine centers - xx = torch.arange(0, feat_w, device=anchors.device) - yy = torch.arange(0, feat_h, device=anchors.device) - yy, xx = torch.meshgrid(yy, xx) - xx = xx.reshape(-1).type_as(x) - yy = yy.reshape(-1).type_as(y) - - offset_x = x - xx # (NA, ) - offset_y = y - yy # (NA, ) - return offset_x, offset_y - - num_imgs = len(anchor_list) - num_lvls = len(anchor_list[0]) - dtype = anchor_list[0][0].dtype - device = anchor_list[0][0].device - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - - offset_list = [] - for i in range(num_imgs): - mlvl_offset = [] - for lvl in range(num_lvls): - c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl], - anchor_strides[lvl], - featmap_sizes[lvl]) - s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl], - anchor_strides[lvl]) - - # offset = ctr_offset + shape_offset - offset_x = s_offset_x + c_offset_x[:, None] - offset_y = s_offset_y + c_offset_y[:, None] - - # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9) - offset = torch.stack([offset_y, offset_x], dim=-1) - offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2] - mlvl_offset.append(offset) - offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2] - offset_list = images_to_levels(offset_list, num_level_anchors) - return offset_list - - def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, - bbox_targets, bbox_weights, num_total_samples): - """Loss function on single scale.""" - # classification loss - if self.with_cls: - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - # regression loss - bbox_targets = bbox_targets.reshape(-1, 4) - bbox_weights = bbox_weights.reshape(-1, 4) - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, it - # decodes the already encoded coordinates to absolute format. - anchors = anchors.reshape(-1, 4) - bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) - loss_reg = self.loss_bbox( - bbox_pred, - bbox_targets, - bbox_weights, - avg_factor=num_total_samples) - if self.with_cls: - return loss_cls, loss_reg - return None, loss_reg - - def loss(self, - anchor_list, - valid_flag_list, - cls_scores, - bbox_preds, - gt_bboxes, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - anchor_list (list[list]): Multi level anchors of each image. - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds] - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - featmap_sizes, - gt_bboxes_ignore=gt_bboxes_ignore, - label_channels=label_channels) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - if self.sampling: - num_total_samples = num_total_pos + num_total_neg - else: - # 200 is hard-coded average factor, - # which follows guided anchoring. - num_total_samples = sum([label.numel() - for label in labels_list]) / 200.0 - - # change per image, per level anchor_list to per_level, per_image - mlvl_anchor_list = list(zip(*anchor_list)) - # concat mlvl_anchor_list - mlvl_anchor_list = [ - torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list - ] - - losses = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - mlvl_anchor_list, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - num_total_samples=num_total_samples) - if self.with_cls: - return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1]) - return dict(loss_rpn_reg=losses[1]) - - def get_bboxes(self, - anchor_list, - cls_scores, - bbox_preds, - img_metas, - cfg, - rescale=False): - """Get proposal predict. - - Args: - anchor_list (list[list]): Multi level anchors of each image. - cls_scores (list[Tensor]): Classification scores for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * 4, H, W). - img_metas (list[dict], Optional): Image meta info. Default None. - cfg (mmcv.Config, Optional): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - - Returns: - Tensor: Labeled boxes in shape (n, 5), where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. - """ - assert len(cls_scores) == len(bbox_preds) - - result_list = [] - for img_id in range(len(img_metas)): - cls_score_list = select_single_mlvl(cls_scores, img_id) - bbox_pred_list = select_single_mlvl(bbox_preds, img_id) - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, - anchor_list[img_id], img_shape, - scale_factor, cfg, rescale) - result_list.append(proposals) - return result_list - - def _get_bboxes_single(self, - cls_scores, - bbox_preds, - mlvl_anchors, - img_shape, - scale_factor, - cfg, - rescale=False): - """Transform outputs of a single image into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_anchors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has - shape (num_anchors * 4, H, W). - mlvl_anchors (list[Tensor]): Box reference from all scale - levels of a single image, each item has shape - (num_total_anchors, 4). - img_shape (tuple[int]): Shape of the input image, - (height, width, 3). - scale_factor (ndarray): Scale factor of the image arange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default False. - - Returns: - Tensor: Labeled boxes in shape (n, 5), where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. - """ - cfg = self.test_cfg if cfg is None else cfg - cfg = copy.deepcopy(cfg) - # bboxes from different level should be independent during NMS, - # level_ids are used as labels for batched NMS to separate them - level_ids = [] - mlvl_scores = [] - mlvl_bbox_preds = [] - mlvl_valid_anchors = [] - nms_pre = cfg.get('nms_pre', -1) - for idx in range(len(cls_scores)): - rpn_cls_score = cls_scores[idx] - rpn_bbox_pred = bbox_preds[idx] - assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] - rpn_cls_score = rpn_cls_score.permute(1, 2, 0) - if self.use_sigmoid_cls: - rpn_cls_score = rpn_cls_score.reshape(-1) - scores = rpn_cls_score.sigmoid() - else: - rpn_cls_score = rpn_cls_score.reshape(-1, 2) - # We set FG labels to [0, num_class-1] and BG label to - # num_class in RPN head since mmdet v2.5, which is unified to - # be consistent with other head since mmdet v2.0. In mmdet v2.0 - # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. - scores = rpn_cls_score.softmax(dim=1)[:, 0] - rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) - anchors = mlvl_anchors[idx] - - if 0 < nms_pre < scores.shape[0]: - # sort is faster than topk - # _, topk_inds = scores.topk(cfg.nms_pre) - ranked_scores, rank_inds = scores.sort(descending=True) - topk_inds = rank_inds[:nms_pre] - scores = ranked_scores[:nms_pre] - rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] - anchors = anchors[topk_inds, :] - mlvl_scores.append(scores) - mlvl_bbox_preds.append(rpn_bbox_pred) - mlvl_valid_anchors.append(anchors) - level_ids.append( - scores.new_full((scores.size(0), ), idx, dtype=torch.long)) - - scores = torch.cat(mlvl_scores) - anchors = torch.cat(mlvl_valid_anchors) - rpn_bbox_pred = torch.cat(mlvl_bbox_preds) - proposals = self.bbox_coder.decode( - anchors, rpn_bbox_pred, max_shape=img_shape) - ids = torch.cat(level_ids) - - if cfg.min_bbox_size >= 0: - w = proposals[:, 2] - proposals[:, 0] - h = proposals[:, 3] - proposals[:, 1] - valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) - if not valid_mask.all(): - proposals = proposals[valid_mask] - scores = scores[valid_mask] - ids = ids[valid_mask] - - # deprecate arguments warning - if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: - warnings.warn( - 'In rpn_proposal or test_cfg, ' - 'nms_thr has been moved to a dict named nms as ' - 'iou_threshold, max_num has been renamed as max_per_img, ' - 'name of original arguments and the way to specify ' - 'iou_threshold of NMS will be deprecated.') - if 'nms' not in cfg: - cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) - if 'max_num' in cfg: - if 'max_per_img' in cfg: - assert cfg.max_num == cfg.max_per_img, f'You ' \ - f'set max_num and ' \ - f'max_per_img at the same time, but get {cfg.max_num} ' \ - f'and {cfg.max_per_img} respectively' \ - 'Please delete max_num which will be deprecated.' - else: - cfg.max_per_img = cfg.max_num - if 'nms_thr' in cfg: - assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \ - f' iou_threshold in nms and ' \ - f'nms_thr at the same time, but get' \ - f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \ - f' respectively. Please delete the nms_thr ' \ - f'which will be deprecated.' - - if proposals.numel() > 0: - dets, _ = batched_nms(proposals, scores, ids, cfg.nms) - else: - return proposals.new_zeros(0, 5) - - return dets[:cfg.max_per_img] - - def refine_bboxes(self, anchor_list, bbox_preds, img_metas): - """Refine bboxes through stages.""" - num_levels = len(bbox_preds) - new_anchor_list = [] - for img_id in range(len(img_metas)): - mlvl_anchors = [] - for i in range(num_levels): - bbox_pred = bbox_preds[i][img_id].detach() - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - img_shape = img_metas[img_id]['img_shape'] - bboxes = self.bbox_coder.decode(anchor_list[img_id][i], - bbox_pred, img_shape) - mlvl_anchors.append(bboxes) - new_anchor_list.append(mlvl_anchors) - return new_anchor_list - - -@HEADS.register_module() -class CascadeRPNHead(BaseDenseHead): - """The CascadeRPNHead will predict more accurate region proposals, which is - required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN - consists of a sequence of RPNStage to progressively improve the accuracy of - the detected proposals. - - More details can be found in ``https://arxiv.org/abs/1909.06720``. - - Args: - num_stages (int): number of CascadeRPN stages. - stages (list[dict]): list of configs to build the stages. - train_cfg (list[dict]): list of configs at training time each stage. - test_cfg (dict): config at testing time. - """ - - def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=None): - super(CascadeRPNHead, self).__init__(init_cfg) - assert num_stages == len(stages) - self.num_stages = num_stages - # Be careful! Pretrained weights cannot be loaded when use - # nn.ModuleList - self.stages = ModuleList() - for i in range(len(stages)): - train_cfg_i = train_cfg[i] if train_cfg is not None else None - stages[i].update(train_cfg=train_cfg_i) - stages[i].update(test_cfg=test_cfg) - self.stages.append(build_head(stages[i])) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - def loss(self): - """loss() is implemented in StageCascadeRPNHead.""" - pass - - def get_bboxes(self): - """get_bboxes() is implemented in StageCascadeRPNHead.""" - pass - - def forward_train(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None): - """Forward train function.""" - assert gt_labels is None, 'RPN does not require gt_labels' - - featmap_sizes = [featmap.size()[-2:] for featmap in x] - device = x[0].device - anchor_list, valid_flag_list = self.stages[0].get_anchors( - featmap_sizes, img_metas, device=device) - - losses = dict() - - for i in range(self.num_stages): - stage = self.stages[i] - - if stage.adapt_cfg['type'] == 'offset': - offset_list = stage.anchor_offset(anchor_list, - stage.anchor_strides, - featmap_sizes) - else: - offset_list = None - x, cls_score, bbox_pred = stage(x, offset_list) - rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score, - bbox_pred, gt_bboxes, img_metas) - stage_loss = stage.loss(*rpn_loss_inputs) - for name, value in stage_loss.items(): - losses['s{}.{}'.format(i, name)] = value - - # refine boxes - if i < self.num_stages - 1: - anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, - img_metas) - if proposal_cfg is None: - return losses - else: - proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, - bbox_pred, img_metas, - self.test_cfg) - return losses, proposal_list - - def simple_test_rpn(self, x, img_metas): - """Simple forward test function.""" - featmap_sizes = [featmap.size()[-2:] for featmap in x] - device = x[0].device - anchor_list, _ = self.stages[0].get_anchors( - featmap_sizes, img_metas, device=device) - - for i in range(self.num_stages): - stage = self.stages[i] - if stage.adapt_cfg['type'] == 'offset': - offset_list = stage.anchor_offset(anchor_list, - stage.anchor_strides, - featmap_sizes) - else: - offset_list = None - x, cls_score, bbox_pred = stage(x, offset_list) - if i < self.num_stages - 1: - anchor_list = stage.refine_bboxes(anchor_list, bbox_pred, - img_metas) - - proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score, - bbox_pred, img_metas, - self.test_cfg) - return proposal_list - - def aug_test_rpn(self, x, img_metas): - """Augmented forward test function.""" - raise NotImplementedError( - 'CascadeRPNHead does not support test-time augmentation') diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/centernet_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/centernet_head.py deleted file mode 100644 index b9d5d2f01fb1cc2494739262517082f6a52b7297..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/centernet_head.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.cnn import bias_init_with_prob, normal_init -from mmcv.ops import batched_nms -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply -from mmdet.models import HEADS, build_loss -from mmdet.models.utils import gaussian_radius, gen_gaussian_target -from ..utils.gaussian_target import (get_local_maximum, get_topk_from_heatmap, - transpose_and_gather_feat) -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class CenterNetHead(BaseDenseHead, BBoxTestMixin): - """Objects as Points Head. CenterHead use center_point to indicate object's - position. Paper link - - Args: - in_channel (int): Number of channel in the input feature map. - feat_channel (int): Number of channel in the intermediate feature map. - num_classes (int): Number of categories excluding the background - category. - loss_center_heatmap (dict | None): Config of center heatmap loss. - Default: GaussianFocalLoss. - loss_wh (dict | None): Config of wh loss. Default: L1Loss. - loss_offset (dict | None): Config of offset loss. Default: L1Loss. - train_cfg (dict | None): Training config. Useless in CenterNet, - but we keep this variable for SingleStageDetector. Default: None. - test_cfg (dict | None): Testing config of CenterNet. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channel, - feat_channel, - num_classes, - loss_center_heatmap=dict( - type='GaussianFocalLoss', loss_weight=1.0), - loss_wh=dict(type='L1Loss', loss_weight=0.1), - loss_offset=dict(type='L1Loss', loss_weight=1.0), - train_cfg=None, - test_cfg=None, - init_cfg=None): - super(CenterNetHead, self).__init__(init_cfg) - self.num_classes = num_classes - self.heatmap_head = self._build_head(in_channel, feat_channel, - num_classes) - self.wh_head = self._build_head(in_channel, feat_channel, 2) - self.offset_head = self._build_head(in_channel, feat_channel, 2) - - self.loss_center_heatmap = build_loss(loss_center_heatmap) - self.loss_wh = build_loss(loss_wh) - self.loss_offset = build_loss(loss_offset) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self.fp16_enabled = False - - def _build_head(self, in_channel, feat_channel, out_channel): - """Build head for each branch.""" - layer = nn.Sequential( - nn.Conv2d(in_channel, feat_channel, kernel_size=3, padding=1), - nn.ReLU(inplace=True), - nn.Conv2d(feat_channel, out_channel, kernel_size=1)) - return layer - - def init_weights(self): - """Initialize weights of the head.""" - bias_init = bias_init_with_prob(0.1) - self.heatmap_head[-1].bias.data.fill_(bias_init) - for head in [self.wh_head, self.offset_head]: - for m in head.modules(): - if isinstance(m, nn.Conv2d): - normal_init(m, std=0.001) - - def forward(self, feats): - """Forward features. Notice CenterNet head does not use FPN. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - center_heatmap_preds (List[Tensor]): center predict heatmaps for - all levels, the channels number is num_classes. - wh_preds (List[Tensor]): wh predicts for all levels, the channels - number is 2. - offset_preds (List[Tensor]): offset predicts for all levels, the - channels number is 2. - """ - return multi_apply(self.forward_single, feats) - - def forward_single(self, feat): - """Forward feature of a single level. - - Args: - feat (Tensor): Feature of a single level. - - Returns: - center_heatmap_pred (Tensor): center predict heatmaps, the - channels number is num_classes. - wh_pred (Tensor): wh predicts, the channels number is 2. - offset_pred (Tensor): offset predicts, the channels number is 2. - """ - center_heatmap_pred = self.heatmap_head(feat).sigmoid() - wh_pred = self.wh_head(feat) - offset_pred = self.offset_head(feat) - return center_heatmap_pred, wh_pred, offset_pred - - @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds')) - def loss(self, - center_heatmap_preds, - wh_preds, - offset_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - center_heatmap_preds (list[Tensor]): center predict heatmaps for - all levels with shape (B, num_classes, H, W). - wh_preds (list[Tensor]): wh predicts for all levels with - shape (B, 2, H, W). - offset_preds (list[Tensor]): offset predicts for all levels - with shape (B, 2, H, W). - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - dict[str, Tensor]: which has components below: - - loss_center_heatmap (Tensor): loss of center heatmap. - - loss_wh (Tensor): loss of hw heatmap - - loss_offset (Tensor): loss of offset heatmap. - """ - assert len(center_heatmap_preds) == len(wh_preds) == len( - offset_preds) == 1 - center_heatmap_pred = center_heatmap_preds[0] - wh_pred = wh_preds[0] - offset_pred = offset_preds[0] - - target_result, avg_factor = self.get_targets(gt_bboxes, gt_labels, - center_heatmap_pred.shape, - img_metas[0]['pad_shape']) - - center_heatmap_target = target_result['center_heatmap_target'] - wh_target = target_result['wh_target'] - offset_target = target_result['offset_target'] - wh_offset_target_weight = target_result['wh_offset_target_weight'] - - # Since the channel of wh_target and offset_target is 2, the avg_factor - # of loss_center_heatmap is always 1/2 of loss_wh and loss_offset. - loss_center_heatmap = self.loss_center_heatmap( - center_heatmap_pred, center_heatmap_target, avg_factor=avg_factor) - loss_wh = self.loss_wh( - wh_pred, - wh_target, - wh_offset_target_weight, - avg_factor=avg_factor * 2) - loss_offset = self.loss_offset( - offset_pred, - offset_target, - wh_offset_target_weight, - avg_factor=avg_factor * 2) - return dict( - loss_center_heatmap=loss_center_heatmap, - loss_wh=loss_wh, - loss_offset=loss_offset) - - def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape): - """Compute regression and classification targets in multiple images. - - Args: - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box. - feat_shape (list[int]): feature map shape with value [B, _, H, W] - img_shape (list[int]): image shape in [h, w] format. - - Returns: - tuple[dict,float]: The float value is mean avg_factor, the dict has - components below: - - center_heatmap_target (Tensor): targets of center heatmap, \ - shape (B, num_classes, H, W). - - wh_target (Tensor): targets of wh predict, shape \ - (B, 2, H, W). - - offset_target (Tensor): targets of offset predict, shape \ - (B, 2, H, W). - - wh_offset_target_weight (Tensor): weights of wh and offset \ - predict, shape (B, 2, H, W). - """ - img_h, img_w = img_shape[:2] - bs, _, feat_h, feat_w = feat_shape - - width_ratio = float(feat_w / img_w) - height_ratio = float(feat_h / img_h) - - center_heatmap_target = gt_bboxes[-1].new_zeros( - [bs, self.num_classes, feat_h, feat_w]) - wh_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) - offset_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w]) - wh_offset_target_weight = gt_bboxes[-1].new_zeros( - [bs, 2, feat_h, feat_w]) - - for batch_id in range(bs): - gt_bbox = gt_bboxes[batch_id] - gt_label = gt_labels[batch_id] - center_x = (gt_bbox[:, [0]] + gt_bbox[:, [2]]) * width_ratio / 2 - center_y = (gt_bbox[:, [1]] + gt_bbox[:, [3]]) * height_ratio / 2 - gt_centers = torch.cat((center_x, center_y), dim=1) - - for j, ct in enumerate(gt_centers): - ctx_int, cty_int = ct.int() - ctx, cty = ct - scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio - scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio - radius = gaussian_radius([scale_box_h, scale_box_w], - min_overlap=0.3) - radius = max(0, int(radius)) - ind = gt_label[j] - gen_gaussian_target(center_heatmap_target[batch_id, ind], - [ctx_int, cty_int], radius) - - wh_target[batch_id, 0, cty_int, ctx_int] = scale_box_w - wh_target[batch_id, 1, cty_int, ctx_int] = scale_box_h - - offset_target[batch_id, 0, cty_int, ctx_int] = ctx - ctx_int - offset_target[batch_id, 1, cty_int, ctx_int] = cty - cty_int - - wh_offset_target_weight[batch_id, :, cty_int, ctx_int] = 1 - - avg_factor = max(1, center_heatmap_target.eq(1).sum()) - target_result = dict( - center_heatmap_target=center_heatmap_target, - wh_target=wh_target, - offset_target=offset_target, - wh_offset_target_weight=wh_offset_target_weight) - return target_result, avg_factor - - @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds')) - def get_bboxes(self, - center_heatmap_preds, - wh_preds, - offset_preds, - img_metas, - rescale=True, - with_nms=False): - """Transform network output for a batch into bbox predictions. - - Args: - center_heatmap_preds (list[Tensor]): Center predict heatmaps for - all levels with shape (B, num_classes, H, W). - wh_preds (list[Tensor]): WH predicts for all levels with - shape (B, 2, H, W). - offset_preds (list[Tensor]): Offset predicts for all levels - with shape (B, 2, H, W). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: True. - with_nms (bool): If True, do nms before return boxes. - Default: False. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - """ - assert len(center_heatmap_preds) == len(wh_preds) == len( - offset_preds) == 1 - result_list = [] - for img_id in range(len(img_metas)): - result_list.append( - self._get_bboxes_single( - center_heatmap_preds[0][img_id:img_id + 1, ...], - wh_preds[0][img_id:img_id + 1, ...], - offset_preds[0][img_id:img_id + 1, ...], - img_metas[img_id], - rescale=rescale, - with_nms=with_nms)) - return result_list - - def _get_bboxes_single(self, - center_heatmap_pred, - wh_pred, - offset_pred, - img_meta, - rescale=False, - with_nms=True): - """Transform outputs of a single image into bbox results. - - Args: - center_heatmap_pred (Tensor): Center heatmap for current level with - shape (1, num_classes, H, W). - wh_pred (Tensor): WH heatmap for current level with shape - (1, num_classes, H, W). - offset_pred (Tensor): Offset for current level with shape - (1, corner_offset_channels, H, W). - img_meta (dict): Meta information of current image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple[Tensor, Tensor]: The first item is an (n, 5) tensor, where - 5 represent (tl_x, tl_y, br_x, br_y, score) and the score - between 0 and 1. The shape of the second tensor in the tuple - is (n,), and each element represents the class label of the - corresponding box. - """ - batch_det_bboxes, batch_labels = self.decode_heatmap( - center_heatmap_pred, - wh_pred, - offset_pred, - img_meta['batch_input_shape'], - k=self.test_cfg.topk, - kernel=self.test_cfg.local_maximum_kernel) - - det_bboxes = batch_det_bboxes.view([-1, 5]) - det_labels = batch_labels.view(-1) - - batch_border = det_bboxes.new_tensor(img_meta['border'])[..., - [2, 0, 2, 0]] - det_bboxes[..., :4] -= batch_border - - if rescale: - det_bboxes[..., :4] /= det_bboxes.new_tensor( - img_meta['scale_factor']) - - if with_nms: - det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels, - self.test_cfg) - return det_bboxes, det_labels - - def decode_heatmap(self, - center_heatmap_pred, - wh_pred, - offset_pred, - img_shape, - k=100, - kernel=3): - """Transform outputs into detections raw bbox prediction. - - Args: - center_heatmap_pred (Tensor): center predict heatmap, - shape (B, num_classes, H, W). - wh_pred (Tensor): wh predict, shape (B, 2, H, W). - offset_pred (Tensor): offset predict, shape (B, 2, H, W). - img_shape (list[int]): image shape in [h, w] format. - k (int): Get top k center keypoints from heatmap. Default 100. - kernel (int): Max pooling kernel for extract local maximum pixels. - Default 3. - - Returns: - tuple[torch.Tensor]: Decoded output of CenterNetHead, containing - the following Tensors: - - - batch_bboxes (Tensor): Coords of each box with shape (B, k, 5) - - batch_topk_labels (Tensor): Categories of each box with \ - shape (B, k) - """ - height, width = center_heatmap_pred.shape[2:] - inp_h, inp_w = img_shape - - center_heatmap_pred = get_local_maximum( - center_heatmap_pred, kernel=kernel) - - *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap( - center_heatmap_pred, k=k) - batch_scores, batch_index, batch_topk_labels = batch_dets - - wh = transpose_and_gather_feat(wh_pred, batch_index) - offset = transpose_and_gather_feat(offset_pred, batch_index) - topk_xs = topk_xs + offset[..., 0] - topk_ys = topk_ys + offset[..., 1] - tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width) - tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height) - br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width) - br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height) - - batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2) - batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]), - dim=-1) - return batch_bboxes, batch_topk_labels - - def _bboxes_nms(self, bboxes, labels, cfg): - if labels.numel() > 0: - max_num = cfg.max_per_img - bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, - -1].contiguous(), - labels, cfg.nms) - if max_num > 0: - bboxes = bboxes[:max_num] - labels = labels[keep][:max_num] - - return bboxes, labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/centripetal_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/centripetal_head.py deleted file mode 100644 index ebc721b7623236c0b95679c762725574687ee56f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/centripetal_head.py +++ /dev/null @@ -1,430 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule, normal_init -from mmcv.ops import DeformConv2d -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply -from ..builder import HEADS, build_loss -from .corner_head import CornerHead - - -@HEADS.register_module() -class CentripetalHead(CornerHead): - """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object - Detection. - - CentripetalHead inherits from :class:`CornerHead`. It removes the - embedding branch and adds guiding shift and centripetal shift branches. - More details can be found in the `paper - `_ . - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - num_feat_levels (int): Levels of feature from the previous module. 2 - for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104 - outputs the final feature and intermediate supervision feature and - HourglassNet-52 only outputs the final feature. Default: 2. - corner_emb_channels (int): Channel of embedding vector. Default: 1. - train_cfg (dict | None): Training config. Useless in CornerHead, - but we keep this variable for SingleStageDetector. Default: None. - test_cfg (dict | None): Testing config of CornerHead. Default: None. - loss_heatmap (dict | None): Config of corner heatmap loss. Default: - GaussianFocalLoss. - loss_embedding (dict | None): Config of corner embedding loss. Default: - AssociativeEmbeddingLoss. - loss_offset (dict | None): Config of corner offset loss. Default: - SmoothL1Loss. - loss_guiding_shift (dict): Config of guiding shift loss. Default: - SmoothL1Loss. - loss_centripetal_shift (dict): Config of centripetal shift loss. - Default: SmoothL1Loss. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - *args, - centripetal_shift_channels=2, - guiding_shift_channels=2, - feat_adaption_conv_kernel=3, - loss_guiding_shift=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=0.05), - loss_centripetal_shift=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1), - init_cfg=None, - **kwargs): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - assert centripetal_shift_channels == 2, ( - 'CentripetalHead only support centripetal_shift_channels == 2') - self.centripetal_shift_channels = centripetal_shift_channels - assert guiding_shift_channels == 2, ( - 'CentripetalHead only support guiding_shift_channels == 2') - self.guiding_shift_channels = guiding_shift_channels - self.feat_adaption_conv_kernel = feat_adaption_conv_kernel - super(CentripetalHead, self).__init__( - *args, init_cfg=init_cfg, **kwargs) - self.loss_guiding_shift = build_loss(loss_guiding_shift) - self.loss_centripetal_shift = build_loss(loss_centripetal_shift) - - def _init_centripetal_layers(self): - """Initialize centripetal layers. - - Including feature adaption deform convs (feat_adaption), deform offset - prediction convs (dcn_off), guiding shift (guiding_shift) and - centripetal shift ( centripetal_shift). Each branch has two parts: - prefix `tl_` for top-left and `br_` for bottom-right. - """ - self.tl_feat_adaption = nn.ModuleList() - self.br_feat_adaption = nn.ModuleList() - self.tl_dcn_offset = nn.ModuleList() - self.br_dcn_offset = nn.ModuleList() - self.tl_guiding_shift = nn.ModuleList() - self.br_guiding_shift = nn.ModuleList() - self.tl_centripetal_shift = nn.ModuleList() - self.br_centripetal_shift = nn.ModuleList() - - for _ in range(self.num_feat_levels): - self.tl_feat_adaption.append( - DeformConv2d(self.in_channels, self.in_channels, - self.feat_adaption_conv_kernel, 1, 1)) - self.br_feat_adaption.append( - DeformConv2d(self.in_channels, self.in_channels, - self.feat_adaption_conv_kernel, 1, 1)) - - self.tl_guiding_shift.append( - self._make_layers( - out_channels=self.guiding_shift_channels, - in_channels=self.in_channels)) - self.br_guiding_shift.append( - self._make_layers( - out_channels=self.guiding_shift_channels, - in_channels=self.in_channels)) - - self.tl_dcn_offset.append( - ConvModule( - self.guiding_shift_channels, - self.feat_adaption_conv_kernel**2 * - self.guiding_shift_channels, - 1, - bias=False, - act_cfg=None)) - self.br_dcn_offset.append( - ConvModule( - self.guiding_shift_channels, - self.feat_adaption_conv_kernel**2 * - self.guiding_shift_channels, - 1, - bias=False, - act_cfg=None)) - - self.tl_centripetal_shift.append( - self._make_layers( - out_channels=self.centripetal_shift_channels, - in_channels=self.in_channels)) - self.br_centripetal_shift.append( - self._make_layers( - out_channels=self.centripetal_shift_channels, - in_channels=self.in_channels)) - - def _init_layers(self): - """Initialize layers for CentripetalHead. - - Including two parts: CornerHead layers and CentripetalHead layers - """ - super()._init_layers() # using _init_layers in CornerHead - self._init_centripetal_layers() - - def init_weights(self): - super(CentripetalHead, self).init_weights() - for i in range(self.num_feat_levels): - normal_init(self.tl_feat_adaption[i], std=0.01) - normal_init(self.br_feat_adaption[i], std=0.01) - normal_init(self.tl_dcn_offset[i].conv, std=0.1) - normal_init(self.br_dcn_offset[i].conv, std=0.1) - _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]] - _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]] - _ = [ - x.conv.reset_parameters() for x in self.tl_centripetal_shift[i] - ] - _ = [ - x.conv.reset_parameters() for x in self.br_centripetal_shift[i] - ] - - def forward_single(self, x, lvl_ind): - """Forward feature of a single level. - - Args: - x (Tensor): Feature of a single level. - lvl_ind (int): Level index of current feature. - - Returns: - tuple[Tensor]: A tuple of CentripetalHead's output for current - feature level. Containing the following Tensors: - - - tl_heat (Tensor): Predicted top-left corner heatmap. - - br_heat (Tensor): Predicted bottom-right corner heatmap. - - tl_off (Tensor): Predicted top-left offset heatmap. - - br_off (Tensor): Predicted bottom-right offset heatmap. - - tl_guiding_shift (Tensor): Predicted top-left guiding shift - heatmap. - - br_guiding_shift (Tensor): Predicted bottom-right guiding - shift heatmap. - - tl_centripetal_shift (Tensor): Predicted top-left centripetal - shift heatmap. - - br_centripetal_shift (Tensor): Predicted bottom-right - centripetal shift heatmap. - """ - tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super( - ).forward_single( - x, lvl_ind, return_pool=True) - - tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool) - br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool) - - tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach()) - br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach()) - - tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool, - tl_dcn_offset) - br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool, - br_dcn_offset) - - tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind]( - tl_feat_adaption) - br_centripetal_shift = self.br_centripetal_shift[lvl_ind]( - br_feat_adaption) - - result_list = [ - tl_heat, br_heat, tl_off, br_off, tl_guiding_shift, - br_guiding_shift, tl_centripetal_shift, br_centripetal_shift - ] - return result_list - - @force_fp32() - def loss(self, - tl_heats, - br_heats, - tl_offs, - br_offs, - tl_guiding_shifts, - br_guiding_shifts, - tl_centripetal_shifts, - br_centripetal_shifts, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - tl_heats (list[Tensor]): Top-left corner heatmaps for each level - with shape (N, num_classes, H, W). - br_heats (list[Tensor]): Bottom-right corner heatmaps for each - level with shape (N, num_classes, H, W). - tl_offs (list[Tensor]): Top-left corner offsets for each level - with shape (N, corner_offset_channels, H, W). - br_offs (list[Tensor]): Bottom-right corner offsets for each level - with shape (N, corner_offset_channels, H, W). - tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each - level with shape (N, guiding_shift_channels, H, W). - br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for - each level with shape (N, guiding_shift_channels, H, W). - tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts - for each level with shape (N, centripetal_shift_channels, H, - W). - br_centripetal_shifts (list[Tensor]): Bottom-right centripetal - shifts for each level with shape (N, - centripetal_shift_channels, H, W). - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [left, top, right, bottom] format. - gt_labels (list[Tensor]): Class indices corresponding to each box. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): Specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. Containing the - following losses: - - - det_loss (list[Tensor]): Corner keypoint losses of all - feature levels. - - off_loss (list[Tensor]): Corner offset losses of all feature - levels. - - guiding_loss (list[Tensor]): Guiding shift losses of all - feature levels. - - centripetal_loss (list[Tensor]): Centripetal shift losses of - all feature levels. - """ - targets = self.get_targets( - gt_bboxes, - gt_labels, - tl_heats[-1].shape, - img_metas[0]['pad_shape'], - with_corner_emb=self.with_corner_emb, - with_guiding_shift=True, - with_centripetal_shift=True) - mlvl_targets = [targets for _ in range(self.num_feat_levels)] - [det_losses, off_losses, guiding_losses, centripetal_losses - ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs, - br_offs, tl_guiding_shifts, br_guiding_shifts, - tl_centripetal_shifts, br_centripetal_shifts, - mlvl_targets) - loss_dict = dict( - det_loss=det_losses, - off_loss=off_losses, - guiding_loss=guiding_losses, - centripetal_loss=centripetal_losses) - return loss_dict - - def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift, - br_guiding_shift, tl_centripetal_shift, - br_centripetal_shift, targets): - """Compute losses for single level. - - Args: - tl_hmp (Tensor): Top-left corner heatmap for current level with - shape (N, num_classes, H, W). - br_hmp (Tensor): Bottom-right corner heatmap for current level with - shape (N, num_classes, H, W). - tl_off (Tensor): Top-left corner offset for current level with - shape (N, corner_offset_channels, H, W). - br_off (Tensor): Bottom-right corner offset for current level with - shape (N, corner_offset_channels, H, W). - tl_guiding_shift (Tensor): Top-left guiding shift for current level - with shape (N, guiding_shift_channels, H, W). - br_guiding_shift (Tensor): Bottom-right guiding shift for current - level with shape (N, guiding_shift_channels, H, W). - tl_centripetal_shift (Tensor): Top-left centripetal shift for - current level with shape (N, centripetal_shift_channels, H, W). - br_centripetal_shift (Tensor): Bottom-right centripetal shift for - current level with shape (N, centripetal_shift_channels, H, W). - targets (dict): Corner target generated by `get_targets`. - - Returns: - tuple[torch.Tensor]: Losses of the head's different branches - containing the following losses: - - - det_loss (Tensor): Corner keypoint loss. - - off_loss (Tensor): Corner offset loss. - - guiding_loss (Tensor): Guiding shift loss. - - centripetal_loss (Tensor): Centripetal shift loss. - """ - targets['corner_embedding'] = None - - det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None, - None, tl_off, br_off, - targets) - - gt_tl_guiding_shift = targets['topleft_guiding_shift'] - gt_br_guiding_shift = targets['bottomright_guiding_shift'] - gt_tl_centripetal_shift = targets['topleft_centripetal_shift'] - gt_br_centripetal_shift = targets['bottomright_centripetal_shift'] - - gt_tl_heatmap = targets['topleft_heatmap'] - gt_br_heatmap = targets['bottomright_heatmap'] - # We only compute the offset loss at the real corner position. - # The value of real corner would be 1 in heatmap ground truth. - # The mask is computed in class agnostic mode and its shape is - # batch * 1 * width * height. - tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( - gt_tl_heatmap) - br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as( - gt_br_heatmap) - - # Guiding shift loss - tl_guiding_loss = self.loss_guiding_shift( - tl_guiding_shift, - gt_tl_guiding_shift, - tl_mask, - avg_factor=tl_mask.sum()) - br_guiding_loss = self.loss_guiding_shift( - br_guiding_shift, - gt_br_guiding_shift, - br_mask, - avg_factor=br_mask.sum()) - guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0 - # Centripetal shift loss - tl_centripetal_loss = self.loss_centripetal_shift( - tl_centripetal_shift, - gt_tl_centripetal_shift, - tl_mask, - avg_factor=tl_mask.sum()) - br_centripetal_loss = self.loss_centripetal_shift( - br_centripetal_shift, - gt_br_centripetal_shift, - br_mask, - avg_factor=br_mask.sum()) - centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0 - - return det_loss, off_loss, guiding_loss, centripetal_loss - - @force_fp32() - def get_bboxes(self, - tl_heats, - br_heats, - tl_offs, - br_offs, - tl_guiding_shifts, - br_guiding_shifts, - tl_centripetal_shifts, - br_centripetal_shifts, - img_metas, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - tl_heats (list[Tensor]): Top-left corner heatmaps for each level - with shape (N, num_classes, H, W). - br_heats (list[Tensor]): Bottom-right corner heatmaps for each - level with shape (N, num_classes, H, W). - tl_offs (list[Tensor]): Top-left corner offsets for each level - with shape (N, corner_offset_channels, H, W). - br_offs (list[Tensor]): Bottom-right corner offsets for each level - with shape (N, corner_offset_channels, H, W). - tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each - level with shape (N, guiding_shift_channels, H, W). Useless in - this function, we keep this arg because it's the raw output - from CentripetalHead. - br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for - each level with shape (N, guiding_shift_channels, H, W). - Useless in this function, we keep this arg because it's the - raw output from CentripetalHead. - tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts - for each level with shape (N, centripetal_shift_channels, H, - W). - br_centripetal_shifts (list[Tensor]): Bottom-right centripetal - shifts for each level with shape (N, - centripetal_shift_channels, H, W). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - """ - assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) - result_list = [] - for img_id in range(len(img_metas)): - result_list.append( - self._get_bboxes_single( - tl_heats[-1][img_id:img_id + 1, :], - br_heats[-1][img_id:img_id + 1, :], - tl_offs[-1][img_id:img_id + 1, :], - br_offs[-1][img_id:img_id + 1, :], - img_metas[img_id], - tl_emb=None, - br_emb=None, - tl_centripetal_shift=tl_centripetal_shifts[-1][ - img_id:img_id + 1, :], - br_centripetal_shift=br_centripetal_shifts[-1][ - img_id:img_id + 1, :], - rescale=rescale, - with_nms=with_nms)) - - return result_list diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/corner_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/corner_head.py deleted file mode 100644 index c6a2866f94ab25922bc47db0ef0df530f93f6f79..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/corner_head.py +++ /dev/null @@ -1,1086 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from logging import warning -from math import ceil, log - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, bias_init_with_prob -from mmcv.ops import CornerPool, batched_nms -from mmcv.runner import BaseModule, force_fp32 - -from mmdet.core import multi_apply -from ..builder import HEADS, build_loss -from ..utils import gaussian_radius, gen_gaussian_target -from ..utils.gaussian_target import (gather_feat, get_local_maximum, - get_topk_from_heatmap, - transpose_and_gather_feat) -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -class BiCornerPool(BaseModule): - """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.) - - Args: - in_channels (int): Input channels of module. - out_channels (int): Output channels of module. - feat_channels (int): Feature channels of module. - directions (list[str]): Directions of two CornerPools. - norm_cfg (dict): Dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - directions, - feat_channels=128, - out_channels=128, - norm_cfg=dict(type='BN', requires_grad=True), - init_cfg=None): - super(BiCornerPool, self).__init__(init_cfg) - self.direction1_conv = ConvModule( - in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) - self.direction2_conv = ConvModule( - in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg) - - self.aftpool_conv = ConvModule( - feat_channels, - out_channels, - 3, - padding=1, - norm_cfg=norm_cfg, - act_cfg=None) - - self.conv1 = ConvModule( - in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) - self.conv2 = ConvModule( - in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg) - - self.direction1_pool = CornerPool(directions[0]) - self.direction2_pool = CornerPool(directions[1]) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - """Forward features from the upstream network. - - Args: - x (tensor): Input feature of BiCornerPool. - - Returns: - conv2 (tensor): Output feature of BiCornerPool. - """ - direction1_conv = self.direction1_conv(x) - direction2_conv = self.direction2_conv(x) - direction1_feat = self.direction1_pool(direction1_conv) - direction2_feat = self.direction2_pool(direction2_conv) - aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat) - conv1 = self.conv1(x) - relu = self.relu(aftpool_conv + conv1) - conv2 = self.conv2(relu) - return conv2 - - -@HEADS.register_module() -class CornerHead(BaseDenseHead, BBoxTestMixin): - """Head of CornerNet: Detecting Objects as Paired Keypoints. - - Code is modified from the `official github repo - `_ . - - More details can be found in the `paper - `_ . - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - num_feat_levels (int): Levels of feature from the previous module. 2 - for HourglassNet-104 and 1 for HourglassNet-52. Because - HourglassNet-104 outputs the final feature and intermediate - supervision feature and HourglassNet-52 only outputs the final - feature. Default: 2. - corner_emb_channels (int): Channel of embedding vector. Default: 1. - train_cfg (dict | None): Training config. Useless in CornerHead, - but we keep this variable for SingleStageDetector. Default: None. - test_cfg (dict | None): Testing config of CornerHead. Default: None. - loss_heatmap (dict | None): Config of corner heatmap loss. Default: - GaussianFocalLoss. - loss_embedding (dict | None): Config of corner embedding loss. Default: - AssociativeEmbeddingLoss. - loss_offset (dict | None): Config of corner offset loss. Default: - SmoothL1Loss. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - num_classes, - in_channels, - num_feat_levels=2, - corner_emb_channels=1, - train_cfg=None, - test_cfg=None, - loss_heatmap=dict( - type='GaussianFocalLoss', - alpha=2.0, - gamma=4.0, - loss_weight=1), - loss_embedding=dict( - type='AssociativeEmbeddingLoss', - pull_weight=0.25, - push_weight=0.25), - loss_offset=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1), - init_cfg=None): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super(CornerHead, self).__init__(init_cfg) - self.num_classes = num_classes - self.in_channels = in_channels - self.corner_emb_channels = corner_emb_channels - self.with_corner_emb = self.corner_emb_channels > 0 - self.corner_offset_channels = 2 - self.num_feat_levels = num_feat_levels - self.loss_heatmap = build_loss( - loss_heatmap) if loss_heatmap is not None else None - self.loss_embedding = build_loss( - loss_embedding) if loss_embedding is not None else None - self.loss_offset = build_loss( - loss_offset) if loss_offset is not None else None - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - self.fp16_enabled = False - self._init_layers() - - def _make_layers(self, out_channels, in_channels=256, feat_channels=256): - """Initialize conv sequential for CornerHead.""" - return nn.Sequential( - ConvModule(in_channels, feat_channels, 3, padding=1), - ConvModule( - feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None)) - - def _init_corner_kpt_layers(self): - """Initialize corner keypoint layers. - - Including corner heatmap branch and corner offset branch. Each branch - has two parts: prefix `tl_` for top-left and `br_` for bottom-right. - """ - self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList() - self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList() - self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList() - - for _ in range(self.num_feat_levels): - self.tl_pool.append( - BiCornerPool( - self.in_channels, ['top', 'left'], - out_channels=self.in_channels)) - self.br_pool.append( - BiCornerPool( - self.in_channels, ['bottom', 'right'], - out_channels=self.in_channels)) - - self.tl_heat.append( - self._make_layers( - out_channels=self.num_classes, - in_channels=self.in_channels)) - self.br_heat.append( - self._make_layers( - out_channels=self.num_classes, - in_channels=self.in_channels)) - - self.tl_off.append( - self._make_layers( - out_channels=self.corner_offset_channels, - in_channels=self.in_channels)) - self.br_off.append( - self._make_layers( - out_channels=self.corner_offset_channels, - in_channels=self.in_channels)) - - def _init_corner_emb_layers(self): - """Initialize corner embedding layers. - - Only include corner embedding branch with two parts: prefix `tl_` for - top-left and `br_` for bottom-right. - """ - self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList() - - for _ in range(self.num_feat_levels): - self.tl_emb.append( - self._make_layers( - out_channels=self.corner_emb_channels, - in_channels=self.in_channels)) - self.br_emb.append( - self._make_layers( - out_channels=self.corner_emb_channels, - in_channels=self.in_channels)) - - def _init_layers(self): - """Initialize layers for CornerHead. - - Including two parts: corner keypoint layers and corner embedding layers - """ - self._init_corner_kpt_layers() - if self.with_corner_emb: - self._init_corner_emb_layers() - - def init_weights(self): - super(CornerHead, self).init_weights() - bias_init = bias_init_with_prob(0.1) - for i in range(self.num_feat_levels): - # The initialization of parameters are different between - # nn.Conv2d and ConvModule. Our experiments show that - # using the original initialization of nn.Conv2d increases - # the final mAP by about 0.2% - self.tl_heat[i][-1].conv.reset_parameters() - self.tl_heat[i][-1].conv.bias.data.fill_(bias_init) - self.br_heat[i][-1].conv.reset_parameters() - self.br_heat[i][-1].conv.bias.data.fill_(bias_init) - self.tl_off[i][-1].conv.reset_parameters() - self.br_off[i][-1].conv.reset_parameters() - if self.with_corner_emb: - self.tl_emb[i][-1].conv.reset_parameters() - self.br_emb[i][-1].conv.reset_parameters() - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually a tuple of corner heatmaps, offset heatmaps and - embedding heatmaps. - - tl_heats (list[Tensor]): Top-left corner heatmaps for all - levels, each is a 4D-tensor, the channels number is - num_classes. - - br_heats (list[Tensor]): Bottom-right corner heatmaps for all - levels, each is a 4D-tensor, the channels number is - num_classes. - - tl_embs (list[Tensor] | list[None]): Top-left embedding - heatmaps for all levels, each is a 4D-tensor or None. - If not None, the channels number is corner_emb_channels. - - br_embs (list[Tensor] | list[None]): Bottom-right embedding - heatmaps for all levels, each is a 4D-tensor or None. - If not None, the channels number is corner_emb_channels. - - tl_offs (list[Tensor]): Top-left offset heatmaps for all - levels, each is a 4D-tensor. The channels number is - corner_offset_channels. - - br_offs (list[Tensor]): Bottom-right offset heatmaps for all - levels, each is a 4D-tensor. The channels number is - corner_offset_channels. - """ - lvl_ind = list(range(self.num_feat_levels)) - return multi_apply(self.forward_single, feats, lvl_ind) - - def forward_single(self, x, lvl_ind, return_pool=False): - """Forward feature of a single level. - - Args: - x (Tensor): Feature of a single level. - lvl_ind (int): Level index of current feature. - return_pool (bool): Return corner pool feature or not. - - Returns: - tuple[Tensor]: A tuple of CornerHead's output for current feature - level. Containing the following Tensors: - - - tl_heat (Tensor): Predicted top-left corner heatmap. - - br_heat (Tensor): Predicted bottom-right corner heatmap. - - tl_emb (Tensor | None): Predicted top-left embedding heatmap. - None for `self.with_corner_emb == False`. - - br_emb (Tensor | None): Predicted bottom-right embedding - heatmap. None for `self.with_corner_emb == False`. - - tl_off (Tensor): Predicted top-left offset heatmap. - - br_off (Tensor): Predicted bottom-right offset heatmap. - - tl_pool (Tensor): Top-left corner pool feature. Not must - have. - - br_pool (Tensor): Bottom-right corner pool feature. Not must - have. - """ - tl_pool = self.tl_pool[lvl_ind](x) - tl_heat = self.tl_heat[lvl_ind](tl_pool) - br_pool = self.br_pool[lvl_ind](x) - br_heat = self.br_heat[lvl_ind](br_pool) - - tl_emb, br_emb = None, None - if self.with_corner_emb: - tl_emb = self.tl_emb[lvl_ind](tl_pool) - br_emb = self.br_emb[lvl_ind](br_pool) - - tl_off = self.tl_off[lvl_ind](tl_pool) - br_off = self.br_off[lvl_ind](br_pool) - - result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off] - if return_pool: - result_list.append(tl_pool) - result_list.append(br_pool) - - return result_list - - def get_targets(self, - gt_bboxes, - gt_labels, - feat_shape, - img_shape, - with_corner_emb=False, - with_guiding_shift=False, - with_centripetal_shift=False): - """Generate corner targets. - - Including corner heatmap, corner offset. - - Optional: corner embedding, corner guiding shift, centripetal shift. - - For CornerNet, we generate corner heatmap, corner offset and corner - embedding from this function. - - For CentripetalNet, we generate corner heatmap, corner offset, guiding - shift and centripetal shift from this function. - - Args: - gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each - has shape (num_gt, 4). - gt_labels (list[Tensor]): Ground truth labels of each box, each has - shape (num_gt,). - feat_shape (list[int]): Shape of output feature, - [batch, channel, height, width]. - img_shape (list[int]): Shape of input image, - [height, width, channel]. - with_corner_emb (bool): Generate corner embedding target or not. - Default: False. - with_guiding_shift (bool): Generate guiding shift target or not. - Default: False. - with_centripetal_shift (bool): Generate centripetal shift target or - not. Default: False. - - Returns: - dict: Ground truth of corner heatmap, corner offset, corner - embedding, guiding shift and centripetal shift. Containing the - following keys: - - - topleft_heatmap (Tensor): Ground truth top-left corner - heatmap. - - bottomright_heatmap (Tensor): Ground truth bottom-right - corner heatmap. - - topleft_offset (Tensor): Ground truth top-left corner offset. - - bottomright_offset (Tensor): Ground truth bottom-right corner - offset. - - corner_embedding (list[list[list[int]]]): Ground truth corner - embedding. Not must have. - - topleft_guiding_shift (Tensor): Ground truth top-left corner - guiding shift. Not must have. - - bottomright_guiding_shift (Tensor): Ground truth bottom-right - corner guiding shift. Not must have. - - topleft_centripetal_shift (Tensor): Ground truth top-left - corner centripetal shift. Not must have. - - bottomright_centripetal_shift (Tensor): Ground truth - bottom-right corner centripetal shift. Not must have. - """ - batch_size, _, height, width = feat_shape - img_h, img_w = img_shape[:2] - - width_ratio = float(width / img_w) - height_ratio = float(height / img_h) - - gt_tl_heatmap = gt_bboxes[-1].new_zeros( - [batch_size, self.num_classes, height, width]) - gt_br_heatmap = gt_bboxes[-1].new_zeros( - [batch_size, self.num_classes, height, width]) - gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) - gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) - - if with_corner_emb: - match = [] - - # Guiding shift is a kind of offset, from center to corner - if with_guiding_shift: - gt_tl_guiding_shift = gt_bboxes[-1].new_zeros( - [batch_size, 2, height, width]) - gt_br_guiding_shift = gt_bboxes[-1].new_zeros( - [batch_size, 2, height, width]) - # Centripetal shift is also a kind of offset, from center to corner - # and normalized by log. - if with_centripetal_shift: - gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros( - [batch_size, 2, height, width]) - gt_br_centripetal_shift = gt_bboxes[-1].new_zeros( - [batch_size, 2, height, width]) - - for batch_id in range(batch_size): - # Ground truth of corner embedding per image is a list of coord set - corner_match = [] - for box_id in range(len(gt_labels[batch_id])): - left, top, right, bottom = gt_bboxes[batch_id][box_id] - center_x = (left + right) / 2.0 - center_y = (top + bottom) / 2.0 - label = gt_labels[batch_id][box_id] - - # Use coords in the feature level to generate ground truth - scale_left = left * width_ratio - scale_right = right * width_ratio - scale_top = top * height_ratio - scale_bottom = bottom * height_ratio - scale_center_x = center_x * width_ratio - scale_center_y = center_y * height_ratio - - # Int coords on feature map/ground truth tensor - left_idx = int(min(scale_left, width - 1)) - right_idx = int(min(scale_right, width - 1)) - top_idx = int(min(scale_top, height - 1)) - bottom_idx = int(min(scale_bottom, height - 1)) - - # Generate gaussian heatmap - scale_box_width = ceil(scale_right - scale_left) - scale_box_height = ceil(scale_bottom - scale_top) - radius = gaussian_radius((scale_box_height, scale_box_width), - min_overlap=0.3) - radius = max(0, int(radius)) - gt_tl_heatmap[batch_id, label] = gen_gaussian_target( - gt_tl_heatmap[batch_id, label], [left_idx, top_idx], - radius) - gt_br_heatmap[batch_id, label] = gen_gaussian_target( - gt_br_heatmap[batch_id, label], [right_idx, bottom_idx], - radius) - - # Generate corner offset - left_offset = scale_left - left_idx - top_offset = scale_top - top_idx - right_offset = scale_right - right_idx - bottom_offset = scale_bottom - bottom_idx - gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset - gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset - gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset - gt_br_offset[batch_id, 1, bottom_idx, - right_idx] = bottom_offset - - # Generate corner embedding - if with_corner_emb: - corner_match.append([[top_idx, left_idx], - [bottom_idx, right_idx]]) - # Generate guiding shift - if with_guiding_shift: - gt_tl_guiding_shift[batch_id, 0, top_idx, - left_idx] = scale_center_x - left_idx - gt_tl_guiding_shift[batch_id, 1, top_idx, - left_idx] = scale_center_y - top_idx - gt_br_guiding_shift[batch_id, 0, bottom_idx, - right_idx] = right_idx - scale_center_x - gt_br_guiding_shift[ - batch_id, 1, bottom_idx, - right_idx] = bottom_idx - scale_center_y - # Generate centripetal shift - if with_centripetal_shift: - gt_tl_centripetal_shift[batch_id, 0, top_idx, - left_idx] = log(scale_center_x - - scale_left) - gt_tl_centripetal_shift[batch_id, 1, top_idx, - left_idx] = log(scale_center_y - - scale_top) - gt_br_centripetal_shift[batch_id, 0, bottom_idx, - right_idx] = log(scale_right - - scale_center_x) - gt_br_centripetal_shift[batch_id, 1, bottom_idx, - right_idx] = log(scale_bottom - - scale_center_y) - - if with_corner_emb: - match.append(corner_match) - - target_result = dict( - topleft_heatmap=gt_tl_heatmap, - topleft_offset=gt_tl_offset, - bottomright_heatmap=gt_br_heatmap, - bottomright_offset=gt_br_offset) - - if with_corner_emb: - target_result.update(corner_embedding=match) - if with_guiding_shift: - target_result.update( - topleft_guiding_shift=gt_tl_guiding_shift, - bottomright_guiding_shift=gt_br_guiding_shift) - if with_centripetal_shift: - target_result.update( - topleft_centripetal_shift=gt_tl_centripetal_shift, - bottomright_centripetal_shift=gt_br_centripetal_shift) - - return target_result - - @force_fp32() - def loss(self, - tl_heats, - br_heats, - tl_embs, - br_embs, - tl_offs, - br_offs, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - tl_heats (list[Tensor]): Top-left corner heatmaps for each level - with shape (N, num_classes, H, W). - br_heats (list[Tensor]): Bottom-right corner heatmaps for each - level with shape (N, num_classes, H, W). - tl_embs (list[Tensor]): Top-left corner embeddings for each level - with shape (N, corner_emb_channels, H, W). - br_embs (list[Tensor]): Bottom-right corner embeddings for each - level with shape (N, corner_emb_channels, H, W). - tl_offs (list[Tensor]): Top-left corner offsets for each level - with shape (N, corner_offset_channels, H, W). - br_offs (list[Tensor]): Bottom-right corner offsets for each level - with shape (N, corner_offset_channels, H, W). - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [left, top, right, bottom] format. - gt_labels (list[Tensor]): Class indices corresponding to each box. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): Specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. Containing the - following losses: - - - det_loss (list[Tensor]): Corner keypoint losses of all - feature levels. - - pull_loss (list[Tensor]): Part one of AssociativeEmbedding - losses of all feature levels. - - push_loss (list[Tensor]): Part two of AssociativeEmbedding - losses of all feature levels. - - off_loss (list[Tensor]): Corner offset losses of all feature - levels. - """ - targets = self.get_targets( - gt_bboxes, - gt_labels, - tl_heats[-1].shape, - img_metas[0]['pad_shape'], - with_corner_emb=self.with_corner_emb) - mlvl_targets = [targets for _ in range(self.num_feat_levels)] - det_losses, pull_losses, push_losses, off_losses = multi_apply( - self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs, - br_offs, mlvl_targets) - loss_dict = dict(det_loss=det_losses, off_loss=off_losses) - if self.with_corner_emb: - loss_dict.update(pull_loss=pull_losses, push_loss=push_losses) - return loss_dict - - def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off, - targets): - """Compute losses for single level. - - Args: - tl_hmp (Tensor): Top-left corner heatmap for current level with - shape (N, num_classes, H, W). - br_hmp (Tensor): Bottom-right corner heatmap for current level with - shape (N, num_classes, H, W). - tl_emb (Tensor): Top-left corner embedding for current level with - shape (N, corner_emb_channels, H, W). - br_emb (Tensor): Bottom-right corner embedding for current level - with shape (N, corner_emb_channels, H, W). - tl_off (Tensor): Top-left corner offset for current level with - shape (N, corner_offset_channels, H, W). - br_off (Tensor): Bottom-right corner offset for current level with - shape (N, corner_offset_channels, H, W). - targets (dict): Corner target generated by `get_targets`. - - Returns: - tuple[torch.Tensor]: Losses of the head's different branches - containing the following losses: - - - det_loss (Tensor): Corner keypoint loss. - - pull_loss (Tensor): Part one of AssociativeEmbedding loss. - - push_loss (Tensor): Part two of AssociativeEmbedding loss. - - off_loss (Tensor): Corner offset loss. - """ - gt_tl_hmp = targets['topleft_heatmap'] - gt_br_hmp = targets['bottomright_heatmap'] - gt_tl_off = targets['topleft_offset'] - gt_br_off = targets['bottomright_offset'] - gt_embedding = targets['corner_embedding'] - - # Detection loss - tl_det_loss = self.loss_heatmap( - tl_hmp.sigmoid(), - gt_tl_hmp, - avg_factor=max(1, - gt_tl_hmp.eq(1).sum())) - br_det_loss = self.loss_heatmap( - br_hmp.sigmoid(), - gt_br_hmp, - avg_factor=max(1, - gt_br_hmp.eq(1).sum())) - det_loss = (tl_det_loss + br_det_loss) / 2.0 - - # AssociativeEmbedding loss - if self.with_corner_emb and self.loss_embedding is not None: - pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb, - gt_embedding) - else: - pull_loss, push_loss = None, None - - # Offset loss - # We only compute the offset loss at the real corner position. - # The value of real corner would be 1 in heatmap ground truth. - # The mask is computed in class agnostic mode and its shape is - # batch * 1 * width * height. - tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( - gt_tl_hmp) - br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as( - gt_br_hmp) - tl_off_loss = self.loss_offset( - tl_off, - gt_tl_off, - tl_off_mask, - avg_factor=max(1, tl_off_mask.sum())) - br_off_loss = self.loss_offset( - br_off, - gt_br_off, - br_off_mask, - avg_factor=max(1, br_off_mask.sum())) - - off_loss = (tl_off_loss + br_off_loss) / 2.0 - - return det_loss, pull_loss, push_loss, off_loss - - @force_fp32() - def get_bboxes(self, - tl_heats, - br_heats, - tl_embs, - br_embs, - tl_offs, - br_offs, - img_metas, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - tl_heats (list[Tensor]): Top-left corner heatmaps for each level - with shape (N, num_classes, H, W). - br_heats (list[Tensor]): Bottom-right corner heatmaps for each - level with shape (N, num_classes, H, W). - tl_embs (list[Tensor]): Top-left corner embeddings for each level - with shape (N, corner_emb_channels, H, W). - br_embs (list[Tensor]): Bottom-right corner embeddings for each - level with shape (N, corner_emb_channels, H, W). - tl_offs (list[Tensor]): Top-left corner offsets for each level - with shape (N, corner_offset_channels, H, W). - br_offs (list[Tensor]): Bottom-right corner offsets for each level - with shape (N, corner_offset_channels, H, W). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - """ - assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas) - result_list = [] - for img_id in range(len(img_metas)): - result_list.append( - self._get_bboxes_single( - tl_heats[-1][img_id:img_id + 1, :], - br_heats[-1][img_id:img_id + 1, :], - tl_offs[-1][img_id:img_id + 1, :], - br_offs[-1][img_id:img_id + 1, :], - img_metas[img_id], - tl_emb=tl_embs[-1][img_id:img_id + 1, :], - br_emb=br_embs[-1][img_id:img_id + 1, :], - rescale=rescale, - with_nms=with_nms)) - - return result_list - - def _get_bboxes_single(self, - tl_heat, - br_heat, - tl_off, - br_off, - img_meta, - tl_emb=None, - br_emb=None, - tl_centripetal_shift=None, - br_centripetal_shift=None, - rescale=False, - with_nms=True): - """Transform outputs for a single batch item into bbox predictions. - - Args: - tl_heat (Tensor): Top-left corner heatmap for current level with - shape (N, num_classes, H, W). - br_heat (Tensor): Bottom-right corner heatmap for current level - with shape (N, num_classes, H, W). - tl_off (Tensor): Top-left corner offset for current level with - shape (N, corner_offset_channels, H, W). - br_off (Tensor): Bottom-right corner offset for current level with - shape (N, corner_offset_channels, H, W). - img_meta (dict): Meta information of current image, e.g., - image size, scaling factor, etc. - tl_emb (Tensor): Top-left corner embedding for current level with - shape (N, corner_emb_channels, H, W). - br_emb (Tensor): Bottom-right corner embedding for current level - with shape (N, corner_emb_channels, H, W). - tl_centripetal_shift: Top-left corner's centripetal shift for - current level with shape (N, 2, H, W). - br_centripetal_shift: Bottom-right corner's centripetal shift for - current level with shape (N, 2, H, W). - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - """ - if isinstance(img_meta, (list, tuple)): - img_meta = img_meta[0] - - batch_bboxes, batch_scores, batch_clses = self.decode_heatmap( - tl_heat=tl_heat.sigmoid(), - br_heat=br_heat.sigmoid(), - tl_off=tl_off, - br_off=br_off, - tl_emb=tl_emb, - br_emb=br_emb, - tl_centripetal_shift=tl_centripetal_shift, - br_centripetal_shift=br_centripetal_shift, - img_meta=img_meta, - k=self.test_cfg.corner_topk, - kernel=self.test_cfg.local_maximum_kernel, - distance_threshold=self.test_cfg.distance_threshold) - - if rescale: - batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor']) - - bboxes = batch_bboxes.view([-1, 4]) - scores = batch_scores.view(-1) - clses = batch_clses.view(-1) - - detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1) - keepinds = (detections[:, -1] > -0.1) - detections = detections[keepinds] - labels = clses[keepinds] - - if with_nms: - detections, labels = self._bboxes_nms(detections, labels, - self.test_cfg) - - return detections, labels - - def _bboxes_nms(self, bboxes, labels, cfg): - if 'nms_cfg' in cfg: - warning.warn('nms_cfg in test_cfg will be deprecated. ' - 'Please rename it as nms') - if 'nms' not in cfg: - cfg.nms = cfg.nms_cfg - - if labels.numel() > 0: - max_num = cfg.max_per_img - bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, - -1].contiguous(), - labels, cfg.nms) - if max_num > 0: - bboxes = bboxes[:max_num] - labels = labels[keep][:max_num] - - return bboxes, labels - - def decode_heatmap(self, - tl_heat, - br_heat, - tl_off, - br_off, - tl_emb=None, - br_emb=None, - tl_centripetal_shift=None, - br_centripetal_shift=None, - img_meta=None, - k=100, - kernel=3, - distance_threshold=0.5, - num_dets=1000): - """Transform outputs for a single batch item into raw bbox predictions. - - Args: - tl_heat (Tensor): Top-left corner heatmap for current level with - shape (N, num_classes, H, W). - br_heat (Tensor): Bottom-right corner heatmap for current level - with shape (N, num_classes, H, W). - tl_off (Tensor): Top-left corner offset for current level with - shape (N, corner_offset_channels, H, W). - br_off (Tensor): Bottom-right corner offset for current level with - shape (N, corner_offset_channels, H, W). - tl_emb (Tensor | None): Top-left corner embedding for current - level with shape (N, corner_emb_channels, H, W). - br_emb (Tensor | None): Bottom-right corner embedding for current - level with shape (N, corner_emb_channels, H, W). - tl_centripetal_shift (Tensor | None): Top-left centripetal shift - for current level with shape (N, 2, H, W). - br_centripetal_shift (Tensor | None): Bottom-right centripetal - shift for current level with shape (N, 2, H, W). - img_meta (dict): Meta information of current image, e.g., - image size, scaling factor, etc. - k (int): Get top k corner keypoints from heatmap. - kernel (int): Max pooling kernel for extract local maximum pixels. - distance_threshold (float): Distance threshold. Top-left and - bottom-right corner keypoints with feature distance less than - the threshold will be regarded as keypoints from same object. - num_dets (int): Num of raw boxes before doing nms. - - Returns: - tuple[torch.Tensor]: Decoded output of CornerHead, containing the - following Tensors: - - - bboxes (Tensor): Coords of each box. - - scores (Tensor): Scores of each box. - - clses (Tensor): Categories of each box. - """ - with_embedding = tl_emb is not None and br_emb is not None - with_centripetal_shift = ( - tl_centripetal_shift is not None - and br_centripetal_shift is not None) - assert with_embedding + with_centripetal_shift == 1 - batch, _, height, width = tl_heat.size() - if torch.onnx.is_in_onnx_export(): - inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2] - else: - inp_h, inp_w, _ = img_meta['pad_shape'] - - # perform nms on heatmaps - tl_heat = get_local_maximum(tl_heat, kernel=kernel) - br_heat = get_local_maximum(br_heat, kernel=kernel) - - tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap( - tl_heat, k=k) - br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap( - br_heat, k=k) - - # We use repeat instead of expand here because expand is a - # shallow-copy function. Thus it could cause unexpected testing result - # sometimes. Using expand will decrease about 10% mAP during testing - # compared to repeat. - tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k) - tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k) - br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1) - br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1) - - tl_off = transpose_and_gather_feat(tl_off, tl_inds) - tl_off = tl_off.view(batch, k, 1, 2) - br_off = transpose_and_gather_feat(br_off, br_inds) - br_off = br_off.view(batch, 1, k, 2) - - tl_xs = tl_xs + tl_off[..., 0] - tl_ys = tl_ys + tl_off[..., 1] - br_xs = br_xs + br_off[..., 0] - br_ys = br_ys + br_off[..., 1] - - if with_centripetal_shift: - tl_centripetal_shift = transpose_and_gather_feat( - tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp() - br_centripetal_shift = transpose_and_gather_feat( - br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp() - - tl_ctxs = tl_xs + tl_centripetal_shift[..., 0] - tl_ctys = tl_ys + tl_centripetal_shift[..., 1] - br_ctxs = br_xs - br_centripetal_shift[..., 0] - br_ctys = br_ys - br_centripetal_shift[..., 1] - - # all possible boxes based on top k corners (ignoring class) - tl_xs *= (inp_w / width) - tl_ys *= (inp_h / height) - br_xs *= (inp_w / width) - br_ys *= (inp_h / height) - - if with_centripetal_shift: - tl_ctxs *= (inp_w / width) - tl_ctys *= (inp_h / height) - br_ctxs *= (inp_w / width) - br_ctys *= (inp_h / height) - - x_off, y_off = 0, 0 # no crop - if not torch.onnx.is_in_onnx_export(): - # since `RandomCenterCropPad` is done on CPU with numpy and it's - # not dynamic traceable when exporting to ONNX, thus 'border' - # does not appears as key in 'img_meta'. As a tmp solution, - # we move this 'border' handle part to the postprocess after - # finished exporting to ONNX, which is handle in - # `mmdet/core/export/model_wrappers.py`. Though difference between - # pytorch and exported onnx model, it might be ignored since - # comparable performance is achieved between them (e.g. 40.4 vs - # 40.6 on COCO val2017, for CornerNet without test-time flip) - if 'border' in img_meta: - x_off = img_meta['border'][2] - y_off = img_meta['border'][0] - - tl_xs -= x_off - tl_ys -= y_off - br_xs -= x_off - br_ys -= y_off - - zeros = tl_xs.new_zeros(*tl_xs.size()) - tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros) - tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros) - br_xs = torch.where(br_xs > 0.0, br_xs, zeros) - br_ys = torch.where(br_ys > 0.0, br_ys, zeros) - - bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3) - area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs() - - if with_centripetal_shift: - tl_ctxs -= x_off - tl_ctys -= y_off - br_ctxs -= x_off - br_ctys -= y_off - - tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs) - tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys) - br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs) - br_ctys *= br_ctys.gt(0.0).type_as(br_ctys) - - ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys), - dim=3) - area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs() - - rcentral = torch.zeros_like(ct_bboxes) - # magic nums from paper section 4.1 - mu = torch.ones_like(area_bboxes) / 2.4 - mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu - - bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2 - bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2 - rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] - - bboxes[..., 0]) / 2 - rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] - - bboxes[..., 1]) / 2 - rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] - - bboxes[..., 0]) / 2 - rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] - - bboxes[..., 1]) / 2 - area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) * - (rcentral[..., 3] - rcentral[..., 1])).abs() - dists = area_ct_bboxes / area_rcentral - - tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | ( - ct_bboxes[..., 0] >= rcentral[..., 2]) - tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | ( - ct_bboxes[..., 1] >= rcentral[..., 3]) - br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | ( - ct_bboxes[..., 2] >= rcentral[..., 2]) - br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | ( - ct_bboxes[..., 3] >= rcentral[..., 3]) - - if with_embedding: - tl_emb = transpose_and_gather_feat(tl_emb, tl_inds) - tl_emb = tl_emb.view(batch, k, 1) - br_emb = transpose_and_gather_feat(br_emb, br_inds) - br_emb = br_emb.view(batch, 1, k) - dists = torch.abs(tl_emb - br_emb) - - tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k) - br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1) - - scores = (tl_scores + br_scores) / 2 # scores for all possible boxes - - # tl and br should have same class - tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k) - br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1) - cls_inds = (tl_clses != br_clses) - - # reject boxes based on distances - dist_inds = dists > distance_threshold - - # reject boxes based on widths and heights - width_inds = (br_xs <= tl_xs) - height_inds = (br_ys <= tl_ys) - - # No use `scores[cls_inds]`, instead we use `torch.where` here. - # Since only 1-D indices with type 'tensor(bool)' are supported - # when exporting to ONNX, any other bool indices with more dimensions - # (e.g. 2-D bool tensor) as input parameter in node is invalid - negative_scores = -1 * torch.ones_like(scores) - scores = torch.where(cls_inds, negative_scores, scores) - scores = torch.where(width_inds, negative_scores, scores) - scores = torch.where(height_inds, negative_scores, scores) - scores = torch.where(dist_inds, negative_scores, scores) - - if with_centripetal_shift: - scores[tl_ctx_inds] = -1 - scores[tl_cty_inds] = -1 - scores[br_ctx_inds] = -1 - scores[br_cty_inds] = -1 - - scores = scores.view(batch, -1) - scores, inds = torch.topk(scores, num_dets) - scores = scores.unsqueeze(2) - - bboxes = bboxes.view(batch, -1, 4) - bboxes = gather_feat(bboxes, inds) - - clses = tl_clses.contiguous().view(batch, -1, 1) - clses = gather_feat(clses, inds).float() - - return bboxes, scores, clses - - def onnx_export(self, - tl_heats, - br_heats, - tl_embs, - br_embs, - tl_offs, - br_offs, - img_metas, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - tl_heats (list[Tensor]): Top-left corner heatmaps for each level - with shape (N, num_classes, H, W). - br_heats (list[Tensor]): Bottom-right corner heatmaps for each - level with shape (N, num_classes, H, W). - tl_embs (list[Tensor]): Top-left corner embeddings for each level - with shape (N, corner_emb_channels, H, W). - br_embs (list[Tensor]): Bottom-right corner embeddings for each - level with shape (N, corner_emb_channels, H, W). - tl_offs (list[Tensor]): Top-left corner offsets for each level - with shape (N, corner_offset_channels, H, W). - br_offs (list[Tensor]): Bottom-right corner offsets for each level - with shape (N, corner_offset_channels, H, W). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple[Tensor, Tensor]: First tensor bboxes with shape - [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score) - and second element is class labels of shape [N, num_det]. - """ - assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len( - img_metas) == 1 - result_list = [] - for img_id in range(len(img_metas)): - result_list.append( - self._get_bboxes_single( - tl_heats[-1][img_id:img_id + 1, :], - br_heats[-1][img_id:img_id + 1, :], - tl_offs[-1][img_id:img_id + 1, :], - br_offs[-1][img_id:img_id + 1, :], - img_metas[img_id], - tl_emb=tl_embs[-1][img_id:img_id + 1, :], - br_emb=br_embs[-1][img_id:img_id + 1, :], - rescale=rescale, - with_nms=with_nms)) - - detections, labels = result_list[0] - # batch_size 1 here, [1, num_det, 5], [1, num_det] - return detections.unsqueeze(0), labels.unsqueeze(0) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ddod_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ddod_head.py deleted file mode 100644 index b2ff223348753b1338cccfefefd370dba0f38672..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ddod_head.py +++ /dev/null @@ -1,778 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init -from mmcv.runner import force_fp32 - -from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, - images_to_levels, multi_apply, reduce_mean, unmap) -from mmdet.core.bbox import bbox_overlaps -from ..builder import HEADS, build_loss -from .anchor_head import AnchorHead - -EPS = 1e-12 - - -@HEADS.register_module() -class DDODHead(AnchorHead): - """DDOD head decomposes conjunctions lying in most current one-stage - detectors via label assignment disentanglement, spatial feature - disentanglement, and pyramid supervision disentanglement. - - https://arxiv.org/abs/2107.02963 - - Args: - num_classes (int): Number of categories excluding the - background category. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): The number of stacked Conv. Default: 4. - conv_cfg (dict): Conv config of ddod head. Default: None. - use_dcn (bool): Use dcn, Same as ATSS when False. Default: True. - norm_cfg (dict): Normal config of ddod head. Default: - dict(type='GN', num_groups=32, requires_grad=True). - loss_iou (dict): Config of IoU loss. Default: - dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0). - """ - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - use_dcn=True, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - loss_iou=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - **kwargs): - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.use_dcn = use_dcn - super(DDODHead, self).__init__(num_classes, in_channels, **kwargs) - - self.sampling = False - if self.train_cfg: - self.cls_assigner = build_assigner(self.train_cfg.assigner) - self.reg_assigner = build_assigner(self.train_cfg.reg_assigner) - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.loss_iou = build_loss(loss_iou) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=dict(type='DCN', deform_groups=1) - if i == 0 and self.use_dcn else self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=dict(type='DCN', deform_groups=1) - if i == 0 and self.use_dcn else self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.atss_cls = nn.Conv2d( - self.feat_channels, - self.num_base_priors * self.cls_out_channels, - 3, - padding=1) - self.atss_reg = nn.Conv2d( - self.feat_channels, self.num_base_priors * 4, 3, padding=1) - self.atss_iou = nn.Conv2d( - self.feat_channels, self.num_base_priors * 1, 3, padding=1) - self.scales = nn.ModuleList( - [Scale(1.0) for _ in self.prior_generator.strides]) - - # we use the global list in loss - self.cls_num_pos_samples_per_level = [ - 0. for _ in range(len(self.prior_generator.strides)) - ] - self.reg_num_pos_samples_per_level = [ - 0. for _ in range(len(self.prior_generator.strides)) - ] - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.cls_convs: - normal_init(m.conv, std=0.01) - for m in self.reg_convs: - normal_init(m.conv, std=0.01) - normal_init(self.atss_reg, std=0.01) - normal_init(self.atss_iou, std=0.01) - bias_cls = bias_init_with_prob(0.01) - normal_init(self.atss_cls, std=0.01, bias=bias_cls) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * 4. - iou_preds (list[Tensor]): IoU scores for all scale levels, - each is a 4D-tensor, the channels number is - num_base_priors * 1. - """ - return multi_apply(self.forward_single, feats, self.scales) - - def forward_single(self, x, scale): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - - Returns: - tuple: - - cls_score (Tensor): Cls scores for a single scale level \ - the channels number is num_base_priors * num_classes. - - bbox_pred (Tensor): Box energies / deltas for a single \ - scale level, the channels number is num_base_priors * 4. - - iou_pred (Tensor): Iou for a single scale level, the \ - channel number is (N, num_base_priors * 1, H, W). - """ - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.atss_cls(cls_feat) - # we just follow atss, not apply exp in bbox_pred - bbox_pred = scale(self.atss_reg(reg_feat)).float() - iou_pred = self.atss_iou(reg_feat) - return cls_score, bbox_pred, iou_pred - - def loss_cls_single(self, cls_score, labels, label_weights, - reweight_factor, num_total_samples): - """Compute cls loss of a single scale level. - - Args: - cls_score (Tensor): Box scores for each scale level - Has shape (N, num_base_priors * num_classes, H, W). - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - reweight_factor (list[int]): Reweight factor for cls and reg - loss. - num_total_samples (int): Number of positive samples that is - reduced over all GPUs. - - Returns: - tuple[Tensor]: A tuple of loss components. - """ - cls_score = cls_score.permute(0, 2, 3, 1).reshape( - -1, self.cls_out_channels).contiguous() - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - return reweight_factor * loss_cls, - - def loss_reg_single(self, anchors, bbox_pred, iou_pred, labels, - label_weights, bbox_targets, bbox_weights, - reweight_factor, num_total_samples): - """Compute reg loss of a single scale level. - - Args: - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - bbox_pred (Tensor): Box energies / deltas for each scale - level with shape (N, num_base_priors * 4, H, W). - iou_pred (Tensor): Iou for a single scale level, the - channel number is (N, num_base_priors * 1, H, W). - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - bbox_targets (Tensor): BBox regression targets of each anchor - weight shape (N, num_total_anchors, 4). - bbox_weights (Tensor): BBox weights of all anchors in the - image with shape (N, 4) - reweight_factor (list[int]): Reweight factor for cls and reg - loss. - num_total_samples (int): Number of positive samples that is - reduced over all GPUs. - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - anchors = anchors.reshape(-1, 4) - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1, ) - bbox_targets = bbox_targets.reshape(-1, 4) - bbox_weights = bbox_weights.reshape(-1, 4) - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - - iou_targets = label_weights.new_zeros(labels.shape) - iou_weights = label_weights.new_zeros(labels.shape) - iou_weights[(bbox_weights.sum(axis=1) > 0).nonzero( - as_tuple=False)] = 1. - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & - (labels < bg_class_ind)).nonzero(as_tuple=False).squeeze(1) - - if len(pos_inds) > 0: - pos_bbox_targets = bbox_targets[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_anchors = anchors[pos_inds] - - pos_decode_bbox_pred = self.bbox_coder.decode( - pos_anchors, pos_bbox_pred) - pos_decode_bbox_targets = self.bbox_coder.decode( - pos_anchors, pos_bbox_targets) - - # regression loss - loss_bbox = self.loss_bbox( - pos_decode_bbox_pred, - pos_decode_bbox_targets, - avg_factor=num_total_samples) - - iou_targets[pos_inds] = bbox_overlaps( - pos_decode_bbox_pred.detach(), - pos_decode_bbox_targets, - is_aligned=True) - loss_iou = self.loss_iou( - iou_pred, - iou_targets, - iou_weights, - avg_factor=num_total_samples) - else: - loss_bbox = bbox_pred.sum() * 0 - loss_iou = iou_pred.sum() * 0 - - return reweight_factor * loss_bbox, reweight_factor * loss_iou - - def calc_reweight_factor(self, labels_list): - """Compute reweight_factor for regression and classification loss.""" - # get pos samples for each level - bg_class_ind = self.num_classes - for ii, each_level_label in enumerate(labels_list): - pos_inds = ((each_level_label >= 0) & - (each_level_label < bg_class_ind)).nonzero( - as_tuple=False).squeeze(1) - self.cls_num_pos_samples_per_level[ii] += len(pos_inds) - # get reweight factor from 1 ~ 2 with bilinear interpolation - min_pos_samples = min(self.cls_num_pos_samples_per_level) - max_pos_samples = max(self.cls_num_pos_samples_per_level) - interval = 1. / (max_pos_samples - min_pos_samples + 1e-10) - reweight_factor_per_level = [] - for pos_samples in self.cls_num_pos_samples_per_level: - factor = 2. - (pos_samples - min_pos_samples) * interval - reweight_factor_per_level.append(factor) - return reweight_factor_per_level - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) - def loss(self, - cls_scores, - bbox_preds, - iou_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_base_priors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_base_priors * 4, H, W) - iou_preds (list[Tensor]): Score factor for all scale level, - each is a 4D-tensor, has shape (batch_size, 1, H, W). - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - # calculate common vars for cls and reg assigners at once - targets_com = self.process_predictions_and_anchors( - anchor_list, valid_flag_list, cls_scores, bbox_preds, img_metas, - gt_bboxes_ignore) - (anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list, - bbox_pred_list, gt_bboxes_ignore_list) = targets_com - - # classification branch assigner - cls_targets = self.get_cls_targets( - anchor_list, - valid_flag_list, - num_level_anchors_list, - cls_score_list, - bbox_pred_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore_list, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_targets is None: - return None - - (cls_anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) = cls_targets - - num_total_samples = reduce_mean( - torch.tensor(num_total_pos, dtype=torch.float, - device=device)).item() - num_total_samples = max(num_total_samples, 1.0) - - reweight_factor_per_level = self.calc_reweight_factor(labels_list) - - cls_losses_cls, = multi_apply( - self.loss_cls_single, - cls_scores, - labels_list, - label_weights_list, - reweight_factor_per_level, - num_total_samples=num_total_samples) - - # regression branch assigner - reg_targets = self.get_reg_targets( - anchor_list, - valid_flag_list, - num_level_anchors_list, - cls_score_list, - bbox_pred_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore_list, - gt_labels_list=gt_labels, - label_channels=label_channels) - if reg_targets is None: - return None - - (reg_anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) = reg_targets - - num_total_samples = reduce_mean( - torch.tensor(num_total_pos, dtype=torch.float, - device=device)).item() - num_total_samples = max(num_total_samples, 1.0) - - reweight_factor_per_level = self.calc_reweight_factor(labels_list) - - reg_losses_bbox, reg_losses_iou = multi_apply( - self.loss_reg_single, - reg_anchor_list, - bbox_preds, - iou_preds, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - reweight_factor_per_level, - num_total_samples=num_total_samples) - - return dict( - loss_cls=cls_losses_cls, - loss_bbox=reg_losses_bbox, - loss_iou=reg_losses_iou) - - def process_predictions_and_anchors(self, anchor_list, valid_flag_list, - cls_scores, bbox_preds, img_metas, - gt_bboxes_ignore_list): - """Compute common vars for regression and classification targets. - - Args: - anchor_list (list[Tensor]): anchors of each image. - valid_flag_list (list[Tensor]): Valid flags of each image. - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * 4. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - - Return: - tuple[Tensor]: A tuple of common loss vars. - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - num_level_anchors_list = [num_level_anchors] * num_imgs - - anchor_list_ = [] - valid_flag_list_ = [] - # concat all level anchors and flags to a single tensor - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - anchor_list_.append(torch.cat(anchor_list[i])) - valid_flag_list_.append(torch.cat(valid_flag_list[i])) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - - num_levels = len(cls_scores) - cls_score_list = [] - bbox_pred_list = [] - - mlvl_cls_score_list = [ - cls_score.permute(0, 2, 3, 1).reshape( - num_imgs, -1, self.num_base_priors * self.cls_out_channels) - for cls_score in cls_scores - ] - mlvl_bbox_pred_list = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, - self.num_base_priors * 4) - for bbox_pred in bbox_preds - ] - - for i in range(num_imgs): - mlvl_cls_tensor_list = [ - mlvl_cls_score_list[j][i] for j in range(num_levels) - ] - mlvl_bbox_tensor_list = [ - mlvl_bbox_pred_list[j][i] for j in range(num_levels) - ] - cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0) - cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0) - cls_score_list.append(cat_mlvl_cls_score) - bbox_pred_list.append(cat_mlvl_bbox_pred) - return (anchor_list_, valid_flag_list_, num_level_anchors_list, - cls_score_list, bbox_pred_list, gt_bboxes_ignore_list) - - def get_cls_targets(self, - anchor_list, - valid_flag_list, - num_level_anchors_list, - cls_score_list, - bbox_pred_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """Get cls targets for DDOD head. - - This method is almost the same as `AnchorHead.get_targets()`. - Besides returning the targets as the parent method does, - it also returns the anchors as the first element of the - returned tuple. - - Args: - anchor_list (list[Tensor]): anchors of each image. - valid_flag_list (list[Tensor]): Valid flags of each image. - num_level_anchors_list (list[Tensor]): Number of anchors of each - scale level of all image. - cls_score_list (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * num_classes. - bbox_pred_list (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * 4. - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - gt_labels_list (list[Tensor]): class indices corresponding to - each box. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Return: - tuple[Tensor]: A tuple of cls targets components. - """ - (all_anchors, all_labels, all_label_weights, all_bbox_targets, - all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, - anchor_list, - valid_flag_list, - cls_score_list, - bbox_pred_list, - num_level_anchors_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs, - is_cls_assigner=True) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) - labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors_list[0]) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors_list[0]) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors_list[0]) - return (anchors_list, labels_list, label_weights_list, - bbox_targets_list, bbox_weights_list, num_total_pos, - num_total_neg) - - def get_reg_targets(self, - anchor_list, - valid_flag_list, - num_level_anchors_list, - cls_score_list, - bbox_pred_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """Get reg targets for DDOD head. - - This method is almost the same as `AnchorHead.get_targets()` when - is_cls_assigner is False. Besides returning the targets as the parent - method does, it also returns the anchors as the first element of the - returned tuple. - - Args: - anchor_list (list[Tensor]): anchors of each image. - valid_flag_list (list[Tensor]): Valid flags of each image. - num_level_anchors (int): Number of anchors of each scale level. - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_base_priors * 4. - gt_labels_list (list[Tensor]): class indices corresponding to - each box. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - - Return: - tuple[Tensor]: A tuple of reg targets components. - """ - (all_anchors, all_labels, all_label_weights, all_bbox_targets, - all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, - anchor_list, - valid_flag_list, - cls_score_list, - bbox_pred_list, - num_level_anchors_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs, - is_cls_assigner=False) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0]) - labels_list = images_to_levels(all_labels, num_level_anchors_list[0]) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors_list[0]) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors_list[0]) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors_list[0]) - return (anchors_list, labels_list, label_weights_list, - bbox_targets_list, bbox_weights_list, num_total_pos, - num_total_neg) - - def _get_target_single(self, - flat_anchors, - valid_flags, - cls_scores, - bbox_preds, - num_level_anchors, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True, - is_cls_assigner=True): - """Compute regression, classification targets for anchors in a single - image. - - Args: - flat_anchors (Tensor): Multi-level anchors of the image, - which are concatenated into a single tensor of shape - (num_base_priors, 4). - valid_flags (Tensor): Multi level valid flags of the image, - which are concatenated into a single tensor of - shape (num_base_priors,). - cls_scores (Tensor): Classification scores for all scale - levels of the image. - bbox_preds (Tensor): Box energies / deltas for all scale - levels of the image. - num_level_anchors (list[int]): Number of anchors of each - scale level. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, ). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts, ). - img_meta (dict): Meta info of the image. - label_channels (int): Channel of label. Default: 1. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. Default: True. - is_cls_assigner (bool): Classification or regression. - Default: True. - - Returns: - tuple: N is the number of total anchors in the image. - - labels (Tensor): Labels of all anchors in the image with \ - shape (N, ). - - label_weights (Tensor): Label weights of all anchor in the \ - image with shape (N, ). - - bbox_targets (Tensor): BBox targets of all anchors in the \ - image with shape (N, 4). - - bbox_weights (Tensor): BBox weights of all anchors in the \ - image with shape (N, 4) - - pos_inds (Tensor): Indices of positive anchor with shape \ - (num_pos, ). - - neg_inds (Tensor): Indices of negative anchor with shape \ - (num_neg, ). - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 7 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - - num_level_anchors_inside = self.get_num_level_anchors_inside( - num_level_anchors, inside_flags) - bbox_preds_valid = bbox_preds[inside_flags, :] - cls_scores_valid = cls_scores[inside_flags, :] - - assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner - - # decode prediction out of assigner - bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid) - assign_result = assigner.assign(anchors, num_level_anchors_inside, - gt_bboxes, gt_bboxes_ignore, gt_labels, - cls_scores_valid, bbox_preds_valid) - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if hasattr(self, 'bbox_coder'): - pos_bbox_targets = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - else: - # used in VFNetHead - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class since v2.5.0 - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - anchors = unmap(anchors, num_total_anchors, inside_flags) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - - return (anchors, labels, label_weights, bbox_targets, bbox_weights, - pos_inds, neg_inds) - - def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): - """Get the anchors of each scale level inside. - - Args: - num_level_anchors (list[int]): Number of anchors of each - scale level. - inside_flags (Tensor): Multi level inside flags of the image, - which are concatenated into a single tensor of - shape (num_base_priors,). - - Returns: - list[int]: Number of anchors of each scale level inside. - """ - split_inside_flags = torch.split(inside_flags, num_level_anchors) - num_level_anchors_inside = [ - int(flags.sum()) for flags in split_inside_flags - ] - return num_level_anchors_inside diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/deformable_detr_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/deformable_detr_head.py deleted file mode 100644 index 71c278523f6e26a1f88219a66173a82af6c6d0db..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/deformable_detr_head.py +++ /dev/null @@ -1,318 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Linear, bias_init_with_prob, constant_init -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply -from mmdet.models.utils.transformer import inverse_sigmoid -from ..builder import HEADS -from .detr_head import DETRHead - - -@HEADS.register_module() -class DeformableDETRHead(DETRHead): - """Head of DeformDETR: Deformable DETR: Deformable Transformers for End-to- - End Object Detection. - - Code is modified from the `official github repo - `_. - - More details can be found in the `paper - `_ . - - Args: - with_box_refine (bool): Whether to refine the reference points - in the decoder. Defaults to False. - as_two_stage (bool) : Whether to generate the proposal from - the outputs of encoder. - transformer (obj:`ConfigDict`): ConfigDict is used for building - the Encoder and Decoder. - """ - - def __init__(self, - *args, - with_box_refine=False, - as_two_stage=False, - transformer=None, - **kwargs): - self.with_box_refine = with_box_refine - self.as_two_stage = as_two_stage - if self.as_two_stage: - transformer['as_two_stage'] = self.as_two_stage - - super(DeformableDETRHead, self).__init__( - *args, transformer=transformer, **kwargs) - - def _init_layers(self): - """Initialize classification branch and regression branch of head.""" - - fc_cls = Linear(self.embed_dims, self.cls_out_channels) - reg_branch = [] - for _ in range(self.num_reg_fcs): - reg_branch.append(Linear(self.embed_dims, self.embed_dims)) - reg_branch.append(nn.ReLU()) - reg_branch.append(Linear(self.embed_dims, 4)) - reg_branch = nn.Sequential(*reg_branch) - - def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - # last reg_branch is used to generate proposal from - # encode feature map when as_two_stage is True. - num_pred = (self.transformer.decoder.num_layers + 1) if \ - self.as_two_stage else self.transformer.decoder.num_layers - - if self.with_box_refine: - self.cls_branches = _get_clones(fc_cls, num_pred) - self.reg_branches = _get_clones(reg_branch, num_pred) - else: - - self.cls_branches = nn.ModuleList( - [fc_cls for _ in range(num_pred)]) - self.reg_branches = nn.ModuleList( - [reg_branch for _ in range(num_pred)]) - - if not self.as_two_stage: - self.query_embedding = nn.Embedding(self.num_query, - self.embed_dims * 2) - - def init_weights(self): - """Initialize weights of the DeformDETR head.""" - self.transformer.init_weights() - if self.loss_cls.use_sigmoid: - bias_init = bias_init_with_prob(0.01) - for m in self.cls_branches: - nn.init.constant_(m.bias, bias_init) - for m in self.reg_branches: - constant_init(m[-1], 0, bias=0) - nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0) - if self.as_two_stage: - for m in self.reg_branches: - nn.init.constant_(m[-1].bias.data[2:], 0.0) - - def forward(self, mlvl_feats, img_metas): - """Forward function. - - Args: - mlvl_feats (tuple[Tensor]): Features from the upstream - network, each is a 4D-tensor with shape - (N, C, H, W). - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, \ - shape [nb_dec, bs, num_query, cls_out_channels]. Note \ - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression \ - head with normalized coordinate format (cx, cy, w, h). \ - Shape [nb_dec, bs, num_query, 4]. - enc_outputs_class (Tensor): The score of each point on encode \ - feature map, has shape (N, h*w, num_class). Only when \ - as_two_stage is True it would be returned, otherwise \ - `None` would be returned. - enc_outputs_coord (Tensor): The proposal generate from the \ - encode feature map, has shape (N, h*w, 4). Only when \ - as_two_stage is True it would be returned, otherwise \ - `None` would be returned. - """ - - batch_size = mlvl_feats[0].size(0) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - img_masks = mlvl_feats[0].new_ones( - (batch_size, input_img_h, input_img_w)) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]['img_shape'] - img_masks[img_id, :img_h, :img_w] = 0 - - mlvl_masks = [] - mlvl_positional_encodings = [] - for feat in mlvl_feats: - mlvl_masks.append( - F.interpolate(img_masks[None], - size=feat.shape[-2:]).to(torch.bool).squeeze(0)) - mlvl_positional_encodings.append( - self.positional_encoding(mlvl_masks[-1])) - - query_embeds = None - if not self.as_two_stage: - query_embeds = self.query_embedding.weight - hs, init_reference, inter_references, \ - enc_outputs_class, enc_outputs_coord = self.transformer( - mlvl_feats, - mlvl_masks, - query_embeds, - mlvl_positional_encodings, - reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 - cls_branches=self.cls_branches if self.as_two_stage else None # noqa:E501 - ) - hs = hs.permute(0, 2, 1, 3) - outputs_classes = [] - outputs_coords = [] - - for lvl in range(hs.shape[0]): - if lvl == 0: - reference = init_reference - else: - reference = inter_references[lvl - 1] - reference = inverse_sigmoid(reference) - outputs_class = self.cls_branches[lvl](hs[lvl]) - tmp = self.reg_branches[lvl](hs[lvl]) - if reference.shape[-1] == 4: - tmp += reference - else: - assert reference.shape[-1] == 2 - tmp[..., :2] += reference - outputs_coord = tmp.sigmoid() - outputs_classes.append(outputs_class) - outputs_coords.append(outputs_coord) - - outputs_classes = torch.stack(outputs_classes) - outputs_coords = torch.stack(outputs_coords) - if self.as_two_stage: - return outputs_classes, outputs_coords, \ - enc_outputs_class, \ - enc_outputs_coord.sigmoid() - else: - return outputs_classes, outputs_coords, \ - None, None - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def loss(self, - all_cls_scores, - all_bbox_preds, - enc_cls_scores, - enc_bbox_preds, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore=None): - """"Loss function. - - Args: - all_cls_scores (Tensor): Classification score of all - decoder layers, has shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds (Tensor): Sigmoid regression - outputs of all decode layers. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - enc_cls_scores (Tensor): Classification scores of - points on encode feature map , has shape - (N, h*w, num_classes). Only be passed when as_two_stage is - True, otherwise is None. - enc_bbox_preds (Tensor): Regression results of each points - on the encode feature map, has shape (N, h*w, 4). Only be - passed when as_two_stage is True, otherwise is None. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore (list[Tensor], optional): Bounding boxes - which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert gt_bboxes_ignore is None, \ - f'{self.__class__.__name__} only supports ' \ - f'for gt_bboxes_ignore setting to None.' - - num_dec_layers = len(all_cls_scores) - all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - all_gt_bboxes_ignore_list = [ - gt_bboxes_ignore for _ in range(num_dec_layers) - ] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - - losses_cls, losses_bbox, losses_iou = multi_apply( - self.loss_single, all_cls_scores, all_bbox_preds, - all_gt_bboxes_list, all_gt_labels_list, img_metas_list, - all_gt_bboxes_ignore_list) - - loss_dict = dict() - # loss of proposal generated from encode feature map. - if enc_cls_scores is not None: - binary_labels_list = [ - torch.zeros_like(gt_labels_list[i]) - for i in range(len(img_metas)) - ] - enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ - self.loss_single(enc_cls_scores, enc_bbox_preds, - gt_bboxes_list, binary_labels_list, - img_metas, gt_bboxes_ignore) - loss_dict['enc_loss_cls'] = enc_loss_cls - loss_dict['enc_loss_bbox'] = enc_losses_bbox - loss_dict['enc_loss_iou'] = enc_losses_iou - - # loss from the last decoder layer - loss_dict['loss_cls'] = losses_cls[-1] - loss_dict['loss_bbox'] = losses_bbox[-1] - loss_dict['loss_iou'] = losses_iou[-1] - # loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], - losses_bbox[:-1], - losses_iou[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i - loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i - num_dec_layer += 1 - return loss_dict - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def get_bboxes(self, - all_cls_scores, - all_bbox_preds, - enc_cls_scores, - enc_bbox_preds, - img_metas, - rescale=False): - """Transform network outputs for a batch into bbox predictions. - - Args: - all_cls_scores (Tensor): Classification score of all - decoder layers, has shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds (Tensor): Sigmoid regression - outputs of all decode layers. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - enc_cls_scores (Tensor): Classification scores of - points on encode feature map , has shape - (N, h*w, num_classes). Only be passed when as_two_stage is - True, otherwise is None. - enc_bbox_preds (Tensor): Regression results of each points - on the encode feature map, has shape (N, h*w, 4). Only be - passed when as_two_stage is True, otherwise is None. - img_metas (list[dict]): Meta information of each image. - rescale (bool, optional): If True, return boxes in original - image space. Default False. - - Returns: - list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ - The first item is an (n, 5) tensor, where the first 4 columns \ - are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ - 5-th column is a score between 0 and 1. The second item is a \ - (n,) tensor where each item is the predicted class label of \ - the corresponding box. - """ - cls_scores = all_cls_scores[-1] - bbox_preds = all_bbox_preds[-1] - - result_list = [] - for img_id in range(len(img_metas)): - cls_score = cls_scores[img_id] - bbox_pred = bbox_preds[img_id] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self._get_bboxes_single(cls_score, bbox_pred, - img_shape, scale_factor, - rescale) - result_list.append(proposals) - return result_list diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/dense_test_mixins.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/dense_test_mixins.py deleted file mode 100644 index 3421548955d62652ea3d6e65dec71253d021615a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/dense_test_mixins.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import sys -from inspect import signature - -import torch -from mmcv.ops import batched_nms - -from mmdet.core import bbox_mapping_back, merge_aug_proposals - -if sys.version_info >= (3, 7): - from mmdet.utils.contextmanagers import completed - - -class BBoxTestMixin(object): - """Mixin class for testing det bboxes via DenseHead.""" - - def simple_test_bboxes(self, feats, img_metas, rescale=False): - """Test det bboxes without test-time augmentation, can be applied in - DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, - etc. - - Args: - feats (tuple[torch.Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is ``bboxes`` with shape (n, 5), - where 5 represent (tl_x, tl_y, br_x, br_y, score). - The shape of the second tensor in the tuple is ``labels`` - with shape (n,) - """ - outs = self.forward(feats) - results_list = self.get_bboxes( - *outs, img_metas=img_metas, rescale=rescale) - return results_list - - def aug_test_bboxes(self, feats, img_metas, rescale=False): - """Test det bboxes with test time augmentation, can be applied in - DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, - etc. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is ``bboxes`` with shape (n, 5), - where 5 represent (tl_x, tl_y, br_x, br_y, score). - The shape of the second tensor in the tuple is ``labels`` - with shape (n,). The length of list should always be 1. - """ - # check with_nms argument - gb_sig = signature(self.get_bboxes) - gb_args = [p.name for p in gb_sig.parameters.values()] - gbs_sig = signature(self._get_bboxes_single) - gbs_args = [p.name for p in gbs_sig.parameters.values()] - assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ - f'{self.__class__.__name__}' \ - ' does not support test-time augmentation' - - aug_bboxes = [] - aug_scores = [] - aug_labels = [] - for x, img_meta in zip(feats, img_metas): - # only one image in the batch - outs = self.forward(x) - bbox_outputs = self.get_bboxes( - *outs, - img_metas=img_meta, - cfg=self.test_cfg, - rescale=False, - with_nms=False)[0] - aug_bboxes.append(bbox_outputs[0]) - aug_scores.append(bbox_outputs[1]) - if len(bbox_outputs) >= 3: - aug_labels.append(bbox_outputs[2]) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = self.merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas) - merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None - - if merged_bboxes.numel() == 0: - det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1) - return [ - (det_bboxes, merged_labels), - ] - - det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores, - merged_labels, self.test_cfg.nms) - det_bboxes = det_bboxes[:self.test_cfg.max_per_img] - det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img] - - if rescale: - _det_bboxes = det_bboxes - else: - _det_bboxes = det_bboxes.clone() - _det_bboxes[:, :4] *= det_bboxes.new_tensor( - img_metas[0][0]['scale_factor']) - - return [ - (_det_bboxes, det_labels), - ] - - def simple_test_rpn(self, x, img_metas): - """Test without augmentation, only for ``RPNHead`` and its variants, - e.g., ``GARPNHead``, etc. - - Args: - x (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - img_metas (list[dict]): Meta info of each image. - - Returns: - list[Tensor]: Proposals of each image, each item has shape (n, 5), - where 5 represent (tl_x, tl_y, br_x, br_y, score). - """ - rpn_outs = self(x) - proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas) - return proposal_list - - def aug_test_rpn(self, feats, img_metas): - """Test with augmentation for only for ``RPNHead`` and its variants, - e.g., ``GARPNHead``, etc. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - img_metas (list[dict]): Meta info of each image. - - Returns: - list[Tensor]: Proposals of each image, each item has shape (n, 5), - where 5 represent (tl_x, tl_y, br_x, br_y, score). - """ - samples_per_gpu = len(img_metas[0]) - aug_proposals = [[] for _ in range(samples_per_gpu)] - for x, img_meta in zip(feats, img_metas): - proposal_list = self.simple_test_rpn(x, img_meta) - for i, proposals in enumerate(proposal_list): - aug_proposals[i].append(proposals) - # reorganize the order of 'img_metas' to match the dimensions - # of 'aug_proposals' - aug_img_metas = [] - for i in range(samples_per_gpu): - aug_img_meta = [] - for j in range(len(img_metas)): - aug_img_meta.append(img_metas[j][i]) - aug_img_metas.append(aug_img_meta) - # after merging, proposals will be rescaled to the original image size - merged_proposals = [ - merge_aug_proposals(proposals, aug_img_meta, self.test_cfg) - for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas) - ] - return merged_proposals - - if sys.version_info >= (3, 7): - - async def async_simple_test_rpn(self, x, img_metas): - sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025) - async with completed( - __name__, 'rpn_head_forward', - sleep_interval=sleep_interval): - rpn_outs = self(x) - - proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas) - return proposal_list - - def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): - """Merge augmented detection bboxes and scores. - - Args: - aug_bboxes (list[Tensor]): shape (n, 4*#class) - aug_scores (list[Tensor] or None): shape (n, #class) - img_shapes (list[Tensor]): shape (3, ). - - Returns: - tuple[Tensor]: ``bboxes`` with shape (n,4), where - 4 represent (tl_x, tl_y, br_x, br_y) - and ``scores`` with shape (n,). - """ - recovered_bboxes = [] - for bboxes, img_info in zip(aug_bboxes, img_metas): - img_shape = img_info[0]['img_shape'] - scale_factor = img_info[0]['scale_factor'] - flip = img_info[0]['flip'] - flip_direction = img_info[0]['flip_direction'] - bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, - flip_direction) - recovered_bboxes.append(bboxes) - bboxes = torch.cat(recovered_bboxes, dim=0) - if aug_scores is None: - return bboxes - else: - scores = torch.cat(aug_scores, dim=0) - return bboxes, scores diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/detr_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/detr_head.py deleted file mode 100644 index 6b7ee13cb6830dd4e1ff85de524f06dde09cbcec..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/detr_head.py +++ /dev/null @@ -1,835 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, Linear, build_activation_layer -from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding -from mmcv.runner import force_fp32 - -from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, - build_assigner, build_sampler, multi_apply, - reduce_mean) -from mmdet.models.utils import build_transformer -from ..builder import HEADS, build_loss -from .anchor_free_head import AnchorFreeHead - - -@HEADS.register_module() -class DETRHead(AnchorFreeHead): - """Implements the DETR transformer head. - - See `paper: End-to-End Object Detection with Transformers - `_ for details. - - Args: - num_classes (int): Number of categories excluding the background. - in_channels (int): Number of channels in the input feature map. - num_query (int): Number of query in Transformer. - num_reg_fcs (int, optional): Number of fully-connected layers used in - `FFN`, which is then used for the regression head. Default 2. - transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer. - Default: None. - sync_cls_avg_factor (bool): Whether to sync the avg_factor of - all ranks. Default to False. - positional_encoding (obj:`mmcv.ConfigDict`|dict): - Config for position encoding. - loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the - classification loss. Default `CrossEntropyLoss`. - loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the - regression loss. Default `L1Loss`. - loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the - regression iou loss. Default `GIoULoss`. - tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of - transformer head. - test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of - transformer head. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - _version = 2 - - def __init__(self, - num_classes, - in_channels, - num_query=100, - num_reg_fcs=2, - transformer=None, - sync_cls_avg_factor=False, - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - normalize=True), - loss_cls=dict( - type='CrossEntropyLoss', - bg_cls_weight=0.1, - use_sigmoid=False, - loss_weight=1.0, - class_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0), - train_cfg=dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='ClassificationCost', weight=1.), - reg_cost=dict(type='BBoxL1Cost', weight=5.0), - iou_cost=dict( - type='IoUCost', iou_mode='giou', weight=2.0))), - test_cfg=dict(max_per_img=100), - init_cfg=None, - **kwargs): - # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, - # since it brings inconvenience when the initialization of - # `AnchorFreeHead` is called. - super(AnchorFreeHead, self).__init__(init_cfg) - self.bg_cls_weight = 0 - self.sync_cls_avg_factor = sync_cls_avg_factor - class_weight = loss_cls.get('class_weight', None) - if class_weight is not None and (self.__class__ is DETRHead): - assert isinstance(class_weight, float), 'Expected ' \ - 'class_weight to have type float. Found ' \ - f'{type(class_weight)}.' - # NOTE following the official DETR rep0, bg_cls_weight means - # relative classification weight of the no-object class. - bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) - assert isinstance(bg_cls_weight, float), 'Expected ' \ - 'bg_cls_weight to have type float. Found ' \ - f'{type(bg_cls_weight)}.' - class_weight = torch.ones(num_classes + 1) * class_weight - # set background class as the last indice - class_weight[num_classes] = bg_cls_weight - loss_cls.update({'class_weight': class_weight}) - if 'bg_cls_weight' in loss_cls: - loss_cls.pop('bg_cls_weight') - self.bg_cls_weight = bg_cls_weight - - if train_cfg: - assert 'assigner' in train_cfg, 'assigner should be provided '\ - 'when train_cfg is set.' - assigner = train_cfg['assigner'] - self.assigner = build_assigner(assigner) - # DETR sampling=False, so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.num_query = num_query - self.num_classes = num_classes - self.in_channels = in_channels - self.num_reg_fcs = num_reg_fcs - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self.fp16_enabled = False - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.loss_iou = build_loss(loss_iou) - - if self.loss_cls.use_sigmoid: - self.cls_out_channels = num_classes - else: - self.cls_out_channels = num_classes + 1 - self.act_cfg = transformer.get('act_cfg', - dict(type='ReLU', inplace=True)) - self.activate = build_activation_layer(self.act_cfg) - self.positional_encoding = build_positional_encoding( - positional_encoding) - self.transformer = build_transformer(transformer) - self.embed_dims = self.transformer.embed_dims - assert 'num_feats' in positional_encoding - num_feats = positional_encoding['num_feats'] - assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ - f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ - f' and {num_feats}.' - self._init_layers() - - def _init_layers(self): - """Initialize layers of the transformer head.""" - self.input_proj = Conv2d( - self.in_channels, self.embed_dims, kernel_size=1) - self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) - self.reg_ffn = FFN( - self.embed_dims, - self.embed_dims, - self.num_reg_fcs, - self.act_cfg, - dropout=0.0, - add_residual=False) - self.fc_reg = Linear(self.embed_dims, 4) - self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) - - def init_weights(self): - """Initialize weights of the transformer head.""" - # The initialization for transformer is important - self.transformer.init_weights() - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """load checkpoints.""" - # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, - # since `AnchorFreeHead._load_from_state_dict` should not be - # called here. Invoking the default `Module._load_from_state_dict` - # is enough. - - # Names of some parameters in has been changed. - version = local_metadata.get('version', None) - if (version is None or version < 2) and self.__class__ is DETRHead: - convert_dict = { - '.self_attn.': '.attentions.0.', - '.ffn.': '.ffns.0.', - '.multihead_attn.': '.attentions.1.', - '.decoder.norm.': '.decoder.post_norm.' - } - state_dict_keys = list(state_dict.keys()) - for k in state_dict_keys: - for ori_key, convert_key in convert_dict.items(): - if ori_key in k: - convert_key = k.replace(ori_key, convert_key) - state_dict[convert_key] = state_dict[k] - del state_dict[k] - - super(AnchorFreeHead, - self)._load_from_state_dict(state_dict, prefix, local_metadata, - strict, missing_keys, - unexpected_keys, error_msgs) - - def forward(self, feats, img_metas): - """Forward function. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - - - all_cls_scores_list (list[Tensor]): Classification scores \ - for each scale level. Each is a 4D-tensor with shape \ - [nb_dec, bs, num_query, cls_out_channels]. Note \ - `cls_out_channels` should includes background. - - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ - outputs for each scale level. Each is a 4D-tensor with \ - normalized coordinate format (cx, cy, w, h) and shape \ - [nb_dec, bs, num_query, 4]. - """ - num_levels = len(feats) - img_metas_list = [img_metas for _ in range(num_levels)] - return multi_apply(self.forward_single, feats, img_metas_list) - - def forward_single(self, x, img_metas): - """"Forward function for a single feature level. - - Args: - x (Tensor): Input feature from backbone's single stage, shape - [bs, c, h, w]. - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, - shape [nb_dec, bs, num_query, cls_out_channels]. Note - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression - head with normalized coordinate format (cx, cy, w, h). - Shape [nb_dec, bs, num_query, 4]. - """ - # construct binary masks which used for the transformer. - # NOTE following the official DETR repo, non-zero values representing - # ignored positions, while zero values means valid positions. - batch_size = x.size(0) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - masks = x.new_ones((batch_size, input_img_h, input_img_w)) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]['img_shape'] - masks[img_id, :img_h, :img_w] = 0 - - x = self.input_proj(x) - # interpolate masks to have the same spatial shape with x - masks = F.interpolate( - masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) - # position encoding - pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] - # outs_dec: [nb_dec, bs, num_query, embed_dim] - outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, - pos_embed) - - all_cls_scores = self.fc_cls(outs_dec) - all_bbox_preds = self.fc_reg(self.activate( - self.reg_ffn(outs_dec))).sigmoid() - return all_cls_scores, all_bbox_preds - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def loss(self, - all_cls_scores_list, - all_bbox_preds_list, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore=None): - """"Loss function. - - Only outputs from the last feature level are used for computing - losses by default. - - Args: - all_cls_scores_list (list[Tensor]): Classification outputs - for each feature level. Each is a 4D-tensor with shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds_list (list[Tensor]): Sigmoid regression - outputs for each feature level. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore (list[Tensor], optional): Bounding boxes - which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - # NOTE defaultly only the outputs from the last feature scale is used. - all_cls_scores = all_cls_scores_list[-1] - all_bbox_preds = all_bbox_preds_list[-1] - assert gt_bboxes_ignore is None, \ - 'Only supports for gt_bboxes_ignore setting to None.' - - num_dec_layers = len(all_cls_scores) - all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - all_gt_bboxes_ignore_list = [ - gt_bboxes_ignore for _ in range(num_dec_layers) - ] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - - losses_cls, losses_bbox, losses_iou = multi_apply( - self.loss_single, all_cls_scores, all_bbox_preds, - all_gt_bboxes_list, all_gt_labels_list, img_metas_list, - all_gt_bboxes_ignore_list) - - loss_dict = dict() - # loss from the last decoder layer - loss_dict['loss_cls'] = losses_cls[-1] - loss_dict['loss_bbox'] = losses_bbox[-1] - loss_dict['loss_iou'] = losses_iou[-1] - # loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], - losses_bbox[:-1], - losses_iou[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i - loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i - num_dec_layer += 1 - return loss_dict - - def loss_single(self, - cls_scores, - bbox_preds, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore_list=None): - """"Loss function for outputs from a single decoder layer of a single - feature level. - - Args: - cls_scores (Tensor): Box score logits from a single decoder layer - for all images. Shape [bs, num_query, cls_out_channels]. - bbox_preds (Tensor): Sigmoid outputs from a single decoder layer - for all images, with normalized coordinate (cx, cy, w, h) and - shape [bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components for outputs from - a single decoder layer. - """ - num_imgs = cls_scores.size(0) - cls_scores_list = [cls_scores[i] for i in range(num_imgs)] - bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] - cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, - gt_bboxes_list, gt_labels_list, - img_metas, gt_bboxes_ignore_list) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - labels = torch.cat(labels_list, 0) - label_weights = torch.cat(label_weights_list, 0) - bbox_targets = torch.cat(bbox_targets_list, 0) - bbox_weights = torch.cat(bbox_weights_list, 0) - - # classification loss - cls_scores = cls_scores.reshape(-1, self.cls_out_channels) - # construct weighted avg_factor to match with the official DETR repo - cls_avg_factor = num_total_pos * 1.0 + \ - num_total_neg * self.bg_cls_weight - if self.sync_cls_avg_factor: - cls_avg_factor = reduce_mean( - cls_scores.new_tensor([cls_avg_factor])) - cls_avg_factor = max(cls_avg_factor, 1) - - loss_cls = self.loss_cls( - cls_scores, labels, label_weights, avg_factor=cls_avg_factor) - - # Compute the average number of gt boxes across all gpus, for - # normalization purposes - num_total_pos = loss_cls.new_tensor([num_total_pos]) - num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() - - # construct factors used for rescale bboxes - factors = [] - for img_meta, bbox_pred in zip(img_metas, bbox_preds): - img_h, img_w, _ = img_meta['img_shape'] - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0).repeat( - bbox_pred.size(0), 1) - factors.append(factor) - factors = torch.cat(factors, 0) - - # DETR regress the relative position of boxes (cxcywh) in the image, - # thus the learning target is normalized by the image size. So here - # we need to re-scale them for calculating IoU loss - bbox_preds = bbox_preds.reshape(-1, 4) - bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors - bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors - - # regression IoU loss, defaultly GIoU loss - loss_iou = self.loss_iou( - bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) - - # regression L1 loss - loss_bbox = self.loss_bbox( - bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) - return loss_cls, loss_bbox, loss_iou - - def get_targets(self, - cls_scores_list, - bbox_preds_list, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore_list=None): - """"Compute regression and classification targets for a batch image. - - Outputs from a single decoder layer of a single feature level are used. - - Args: - cls_scores_list (list[Tensor]): Box score logits from a single - decoder layer for each image with shape [num_query, - cls_out_channels]. - bbox_preds_list (list[Tensor]): Sigmoid outputs from a single - decoder layer for each image, with normalized coordinate - (cx, cy, w, h) and shape [num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - tuple: a tuple containing the following targets. - - - labels_list (list[Tensor]): Labels for all images. - - label_weights_list (list[Tensor]): Label weights for all \ - images. - - bbox_targets_list (list[Tensor]): BBox targets for all \ - images. - - bbox_weights_list (list[Tensor]): BBox weights for all \ - images. - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - """ - assert gt_bboxes_ignore_list is None, \ - 'Only supports for gt_bboxes_ignore setting to None.' - num_imgs = len(cls_scores_list) - gt_bboxes_ignore_list = [ - gt_bboxes_ignore_list for _ in range(num_imgs) - ] - - (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, cls_scores_list, bbox_preds_list, - gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) - num_total_pos = sum((inds.numel() for inds in pos_inds_list)) - num_total_neg = sum((inds.numel() for inds in neg_inds_list)) - return (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - - def _get_target_single(self, - cls_score, - bbox_pred, - gt_bboxes, - gt_labels, - img_meta, - gt_bboxes_ignore=None): - """"Compute regression and classification targets for one image. - - Outputs from a single decoder layer of a single feature level are used. - - Args: - cls_score (Tensor): Box score logits from a single decoder layer - for one image. Shape [num_query, cls_out_channels]. - bbox_pred (Tensor): Sigmoid outputs from a single decoder layer - for one image, with normalized coordinate (cx, cy, w, h) and - shape [num_query, 4]. - gt_bboxes (Tensor): Ground truth bboxes for one image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (Tensor): Ground truth class indices for one image - with shape (num_gts, ). - img_meta (dict): Meta information for one image. - gt_bboxes_ignore (Tensor, optional): Bounding boxes - which can be ignored. Default None. - - Returns: - tuple[Tensor]: a tuple containing the following for one image. - - - labels (Tensor): Labels of each image. - - label_weights (Tensor]): Label weights of each image. - - bbox_targets (Tensor): BBox targets of each image. - - bbox_weights (Tensor): BBox weights of each image. - - pos_inds (Tensor): Sampled positive indices for each image. - - neg_inds (Tensor): Sampled negative indices for each image. - """ - - num_bboxes = bbox_pred.size(0) - # assigner and sampler - assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, - gt_labels, img_meta, - gt_bboxes_ignore) - sampling_result = self.sampler.sample(assign_result, bbox_pred, - gt_bboxes) - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - - # label targets - labels = gt_bboxes.new_full((num_bboxes, ), - self.num_classes, - dtype=torch.long) - labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] - label_weights = gt_bboxes.new_ones(num_bboxes) - - # bbox targets - bbox_targets = torch.zeros_like(bbox_pred) - bbox_weights = torch.zeros_like(bbox_pred) - bbox_weights[pos_inds] = 1.0 - img_h, img_w, _ = img_meta['img_shape'] - - # DETR regress the relative position of boxes (cxcywh) in the image. - # Thus the learning target should be normalized by the image size, also - # the box format should be converted from defaultly x1y1x2y2 to cxcywh. - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0) - pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor - pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) - bbox_targets[pos_inds] = pos_gt_bboxes_targets - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds) - - # over-write because img_metas are needed as inputs for bbox_head. - def forward_train(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None, - **kwargs): - """Forward function for training mode. - - Args: - x (list[Tensor]): Features from backbone. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - proposal_cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert proposal_cfg is None, '"proposal_cfg" must be None' - outs = self(x, img_metas) - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, img_metas) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) - losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - return losses - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def get_bboxes(self, - all_cls_scores_list, - all_bbox_preds_list, - img_metas, - rescale=False): - """Transform network outputs for a batch into bbox predictions. - - Args: - all_cls_scores_list (list[Tensor]): Classification outputs - for each feature level. Each is a 4D-tensor with shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds_list (list[Tensor]): Sigmoid regression - outputs for each feature level. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - img_metas (list[dict]): Meta information of each image. - rescale (bool, optional): If True, return boxes in original - image space. Default False. - - Returns: - list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ - The first item is an (n, 5) tensor, where the first 4 columns \ - are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ - 5-th column is a score between 0 and 1. The second item is a \ - (n,) tensor where each item is the predicted class label of \ - the corresponding box. - """ - # NOTE defaultly only using outputs from the last feature level, - # and only the outputs from the last decoder layer is used. - cls_scores = all_cls_scores_list[-1][-1] - bbox_preds = all_bbox_preds_list[-1][-1] - - result_list = [] - for img_id in range(len(img_metas)): - cls_score = cls_scores[img_id] - bbox_pred = bbox_preds[img_id] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self._get_bboxes_single(cls_score, bbox_pred, - img_shape, scale_factor, - rescale) - result_list.append(proposals) - - return result_list - - def _get_bboxes_single(self, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=False): - """Transform outputs from the last decoder layer into bbox predictions - for each image. - - Args: - cls_score (Tensor): Box score logits from the last decoder layer - for each image. Shape [num_query, cls_out_channels]. - bbox_pred (Tensor): Sigmoid outputs from the last decoder layer - for each image, with coordinate format (cx, cy, w, h) and - shape [num_query, 4]. - img_shape (tuple[int]): Shape of input image, (height, width, 3). - scale_factor (ndarray, optional): Scale factor of the image arange - as (w_scale, h_scale, w_scale, h_scale). - rescale (bool, optional): If True, return boxes in original image - space. Default False. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. - - - det_bboxes: Predicted bboxes with shape [num_query, 5], \ - where the first 4 columns are bounding box positions \ - (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \ - between 0 and 1. - - det_labels: Predicted labels of the corresponding box with \ - shape [num_query]. - """ - assert len(cls_score) == len(bbox_pred) - max_per_img = self.test_cfg.get('max_per_img', self.num_query) - # exclude background - if self.loss_cls.use_sigmoid: - cls_score = cls_score.sigmoid() - scores, indexes = cls_score.view(-1).topk(max_per_img) - det_labels = indexes % self.num_classes - bbox_index = indexes // self.num_classes - bbox_pred = bbox_pred[bbox_index] - else: - scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) - scores, bbox_index = scores.topk(max_per_img) - bbox_pred = bbox_pred[bbox_index] - det_labels = det_labels[bbox_index] - - det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) - det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] - det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] - det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) - det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) - if rescale: - det_bboxes /= det_bboxes.new_tensor(scale_factor) - det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) - - return det_bboxes, det_labels - - def simple_test_bboxes(self, feats, img_metas, rescale=False): - """Test det bboxes without test-time augmentation. - - Args: - feats (tuple[torch.Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is ``bboxes`` with shape (n, 5), - where 5 represent (tl_x, tl_y, br_x, br_y, score). - The shape of the second tensor in the tuple is ``labels`` - with shape (n,) - """ - # forward of this head requires img_metas - outs = self.forward(feats, img_metas) - results_list = self.get_bboxes(*outs, img_metas, rescale=rescale) - return results_list - - def forward_onnx(self, feats, img_metas): - """Forward function for exporting to ONNX. - - Over-write `forward` because: `masks` is directly created with - zero (valid position tag) and has the same spatial size as `x`. - Thus the construction of `masks` is different from that in `forward`. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. - - - all_cls_scores_list (list[Tensor]): Classification scores \ - for each scale level. Each is a 4D-tensor with shape \ - [nb_dec, bs, num_query, cls_out_channels]. Note \ - `cls_out_channels` should includes background. - - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ - outputs for each scale level. Each is a 4D-tensor with \ - normalized coordinate format (cx, cy, w, h) and shape \ - [nb_dec, bs, num_query, 4]. - """ - num_levels = len(feats) - img_metas_list = [img_metas for _ in range(num_levels)] - return multi_apply(self.forward_single_onnx, feats, img_metas_list) - - def forward_single_onnx(self, x, img_metas): - """"Forward function for a single feature level with ONNX exportation. - - Args: - x (Tensor): Input feature from backbone's single stage, shape - [bs, c, h, w]. - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, - shape [nb_dec, bs, num_query, cls_out_channels]. Note - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression - head with normalized coordinate format (cx, cy, w, h). - Shape [nb_dec, bs, num_query, 4]. - """ - # Note `img_shape` is not dynamically traceable to ONNX, - # since the related augmentation was done with numpy under - # CPU. Thus `masks` is directly created with zeros (valid tag) - # and the same spatial shape as `x`. - # The difference between torch and exported ONNX model may be - # ignored, since the same performance is achieved (e.g. - # 40.1 vs 40.1 for DETR) - batch_size = x.size(0) - h, w = x.size()[-2:] - masks = x.new_zeros((batch_size, h, w)) # [B,h,w] - - x = self.input_proj(x) - # interpolate masks to have the same spatial shape with x - masks = F.interpolate( - masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) - pos_embed = self.positional_encoding(masks) - outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, - pos_embed) - - all_cls_scores = self.fc_cls(outs_dec) - all_bbox_preds = self.fc_reg(self.activate( - self.reg_ffn(outs_dec))).sigmoid() - return all_cls_scores, all_bbox_preds - - def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_metas): - """Transform network outputs into bbox predictions, with ONNX - exportation. - - Args: - all_cls_scores_list (list[Tensor]): Classification outputs - for each feature level. Each is a 4D-tensor with shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds_list (list[Tensor]): Sigmoid regression - outputs for each feature level. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - img_metas (list[dict]): Meta information of each image. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - assert len(img_metas) == 1, \ - 'Only support one input image while in exporting to ONNX' - - cls_scores = all_cls_scores_list[-1][-1] - bbox_preds = all_bbox_preds_list[-1][-1] - - # Note `img_shape` is not dynamically traceable to ONNX, - # here `img_shape_for_onnx` (padded shape of image tensor) - # is used. - img_shape = img_metas[0]['img_shape_for_onnx'] - max_per_img = self.test_cfg.get('max_per_img', self.num_query) - batch_size = cls_scores.size(0) - # `batch_index_offset` is used for the gather of concatenated tensor - batch_index_offset = torch.arange(batch_size).to( - cls_scores.device) * max_per_img - batch_index_offset = batch_index_offset.unsqueeze(1).expand( - batch_size, max_per_img) - - # supports dynamical batch inference - if self.loss_cls.use_sigmoid: - cls_scores = cls_scores.sigmoid() - scores, indexes = cls_scores.view(batch_size, -1).topk( - max_per_img, dim=1) - det_labels = indexes % self.num_classes - bbox_index = indexes // self.num_classes - bbox_index = (bbox_index + batch_index_offset).view(-1) - bbox_preds = bbox_preds.view(-1, 4)[bbox_index] - bbox_preds = bbox_preds.view(batch_size, -1, 4) - else: - scores, det_labels = F.softmax( - cls_scores, dim=-1)[..., :-1].max(-1) - scores, bbox_index = scores.topk(max_per_img, dim=1) - bbox_index = (bbox_index + batch_index_offset).view(-1) - bbox_preds = bbox_preds.view(-1, 4)[bbox_index] - det_labels = det_labels.view(-1)[bbox_index] - bbox_preds = bbox_preds.view(batch_size, -1, 4) - det_labels = det_labels.view(batch_size, -1) - - det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds) - # use `img_shape_tensor` for dynamically exporting to ONNX - img_shape_tensor = img_shape.flip(0).repeat(2) # [w,h,w,h] - img_shape_tensor = img_shape_tensor.unsqueeze(0).unsqueeze(0).expand( - batch_size, det_bboxes.size(1), 4) - det_bboxes = det_bboxes * img_shape_tensor - # dynamically clip bboxes - x1, y1, x2, y2 = det_bboxes.split((1, 1, 1, 1), dim=-1) - from mmdet.core.export import dynamic_clip_for_onnx - x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, img_shape) - det_bboxes = torch.cat([x1, y1, x2, y2], dim=-1) - det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1) - - return det_bboxes, det_labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/embedding_rpn_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/embedding_rpn_head.py deleted file mode 100644 index 22060b964846298cae5a4625a0ffc32d9a139657..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/embedding_rpn_head.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.runner import BaseModule - -from mmdet.models.builder import HEADS -from ...core import bbox_cxcywh_to_xyxy - - -@HEADS.register_module() -class EmbeddingRPNHead(BaseModule): - """RPNHead in the `Sparse R-CNN `_ . - - Unlike traditional RPNHead, this module does not need FPN input, but just - decode `init_proposal_bboxes` and expand the first dimension of - `init_proposal_bboxes` and `init_proposal_features` to the batch_size. - - Args: - num_proposals (int): Number of init_proposals. Default 100. - proposal_feature_channel (int): Channel number of - init_proposal_feature. Defaults to 256. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - num_proposals=100, - proposal_feature_channel=256, - init_cfg=None, - **kwargs): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super(EmbeddingRPNHead, self).__init__(init_cfg) - self.num_proposals = num_proposals - self.proposal_feature_channel = proposal_feature_channel - self._init_layers() - - def _init_layers(self): - """Initialize a sparse set of proposal boxes and proposal features.""" - self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4) - self.init_proposal_features = nn.Embedding( - self.num_proposals, self.proposal_feature_channel) - - def init_weights(self): - """Initialize the init_proposal_bboxes as normalized. - - [c_x, c_y, w, h], and we initialize it to the size of the entire - image. - """ - super(EmbeddingRPNHead, self).init_weights() - nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5) - nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1) - - def _decode_init_proposals(self, imgs, img_metas): - """Decode init_proposal_bboxes according to the size of images and - expand dimension of init_proposal_features to batch_size. - - Args: - imgs (list[Tensor]): List of FPN features. - img_metas (list[dict]): List of meta-information of - images. Need the img_shape to decode the init_proposals. - - Returns: - Tuple(Tensor): - - - proposals (Tensor): Decoded proposal bboxes, - has shape (batch_size, num_proposals, 4). - - init_proposal_features (Tensor): Expanded proposal - features, has shape - (batch_size, num_proposals, proposal_feature_channel). - - imgs_whwh (Tensor): Tensor with shape - (batch_size, 4), the dimension means - [img_width, img_height, img_width, img_height]. - """ - proposals = self.init_proposal_bboxes.weight.clone() - proposals = bbox_cxcywh_to_xyxy(proposals) - num_imgs = len(imgs[0]) - imgs_whwh = [] - for meta in img_metas: - h, w, _ = meta['img_shape'] - imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]])) - imgs_whwh = torch.cat(imgs_whwh, dim=0) - imgs_whwh = imgs_whwh[:, None, :] - - # imgs_whwh has shape (batch_size, 1, 4) - # The shape of proposals change from (num_proposals, 4) - # to (batch_size ,num_proposals, 4) - proposals = proposals * imgs_whwh - - init_proposal_features = self.init_proposal_features.weight.clone() - init_proposal_features = init_proposal_features[None].expand( - num_imgs, *init_proposal_features.size()) - return proposals, init_proposal_features, imgs_whwh - - def forward_dummy(self, img, img_metas): - """Dummy forward function. - - Used in flops calculation. - """ - return self._decode_init_proposals(img, img_metas) - - def forward_train(self, img, img_metas): - """Forward function in training stage.""" - return self._decode_init_proposals(img, img_metas) - - def simple_test_rpn(self, img, img_metas): - """Forward function in testing stage.""" - return self._decode_init_proposals(img, img_metas) - - def simple_test(self, img, img_metas): - """Forward function in testing stage.""" - raise NotImplementedError - - def aug_test_rpn(self, feats, img_metas): - raise NotImplementedError( - 'EmbeddingRPNHead does not support test-time augmentation') diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fcos_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fcos_head.py deleted file mode 100644 index d72fb56caa1599414d67c32445a6f6def44fefdf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fcos_head.py +++ /dev/null @@ -1,455 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn as nn -from mmcv.cnn import Scale -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply, reduce_mean -from ..builder import HEADS, build_loss -from .anchor_free_head import AnchorFreeHead - -INF = 1e8 - - -@HEADS.register_module() -class FCOSHead(AnchorFreeHead): - """Anchor-free head used in `FCOS `_. - - The FCOS head does not use anchor boxes. Instead bounding boxes are - predicted at each pixel and a centerness measure is used to suppress - low-quality predictions. - Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training - tricks used in official repo, which will bring remarkable mAP gains - of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for - more detail. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - strides (list[int] | list[tuple[int, int]]): Strides of points - in multiple feature levels. Default: (4, 8, 16, 32, 64). - regress_ranges (tuple[tuple[int, int]]): Regress range of multiple - level points. - center_sampling (bool): If true, use center sampling. Default: False. - center_sample_radius (float): Radius of center sampling. Default: 1.5. - norm_on_bbox (bool): If true, normalize the regression targets - with FPN strides. Default: False. - centerness_on_reg (bool): If true, position centerness on the - regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. - Default: False. - conv_bias (bool | str): If specified as `auto`, it will be decided by the - norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise - False. Default: "auto". - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - loss_centerness (dict): Config of centerness loss. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). - init_cfg (dict or list[dict], optional): Initialization config dict. - - Example: - >>> self = FCOSHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_score, bbox_pred, centerness = self.forward(feats) - >>> assert len(cls_score) == len(self.scales) - """ # noqa: E501 - - def __init__(self, - num_classes, - in_channels, - regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), - (512, INF)), - center_sampling=False, - center_sample_radius=1.5, - norm_on_bbox=False, - centerness_on_reg=False, - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - loss_centerness=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='conv_cls', - std=0.01, - bias_prob=0.01)), - **kwargs): - self.regress_ranges = regress_ranges - self.center_sampling = center_sampling - self.center_sample_radius = center_sample_radius - self.norm_on_bbox = norm_on_bbox - self.centerness_on_reg = centerness_on_reg - super().__init__( - num_classes, - in_channels, - loss_cls=loss_cls, - loss_bbox=loss_bbox, - norm_cfg=norm_cfg, - init_cfg=init_cfg, - **kwargs) - self.loss_centerness = build_loss(loss_centerness) - - def _init_layers(self): - """Initialize layers of the head.""" - super()._init_layers() - self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) - self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: - cls_scores (list[Tensor]): Box scores for each scale level, \ - each is a 4D-tensor, the channel number is \ - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each \ - scale level, each is a 4D-tensor, the channel number is \ - num_points * 4. - centernesses (list[Tensor]): centerness for each scale level, \ - each is a 4D-tensor, the channel number is num_points * 1. - """ - return multi_apply(self.forward_single, feats, self.scales, - self.strides) - - def forward_single(self, x, scale, stride): - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - stride (int): The corresponding stride for feature maps, only - used to normalize the bbox prediction when self.norm_on_bbox - is True. - - Returns: - tuple: scores for each class, bbox predictions and centerness \ - predictions of input feature maps. - """ - cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) - if self.centerness_on_reg: - centerness = self.conv_centerness(reg_feat) - else: - centerness = self.conv_centerness(cls_feat) - # scale the bbox_pred of different level - # float to avoid overflow when enabling FP16 - bbox_pred = scale(bbox_pred).float() - if self.norm_on_bbox: - # bbox_pred needed for gradient computation has been modified - # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace - # F.relu(bbox_pred) with bbox_pred.clamp(min=0) - bbox_pred = bbox_pred.clamp(min=0) - if not self.training: - bbox_pred *= stride - else: - bbox_pred = bbox_pred.exp() - return cls_score, bbox_pred, centerness - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) - def loss(self, - cls_scores, - bbox_preds, - centernesses, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - centernesses (list[Tensor]): centerness for each scale level, each - is a 4D-tensor, the channel number is num_points * 1. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert len(cls_scores) == len(bbox_preds) == len(centernesses) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - all_level_points = self.prior_generator.grid_priors( - featmap_sizes, - dtype=bbox_preds[0].dtype, - device=bbox_preds[0].device) - labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes, - gt_labels) - - num_imgs = cls_scores[0].size(0) - # flatten cls_scores, bbox_preds and centerness - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - for bbox_pred in bbox_preds - ] - flatten_centerness = [ - centerness.permute(0, 2, 3, 1).reshape(-1) - for centerness in centernesses - ] - flatten_cls_scores = torch.cat(flatten_cls_scores) - flatten_bbox_preds = torch.cat(flatten_bbox_preds) - flatten_centerness = torch.cat(flatten_centerness) - flatten_labels = torch.cat(labels) - flatten_bbox_targets = torch.cat(bbox_targets) - # repeat points to align with bbox_preds - flatten_points = torch.cat( - [points.repeat(num_imgs, 1) for points in all_level_points]) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((flatten_labels >= 0) - & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) - num_pos = torch.tensor( - len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) - num_pos = max(reduce_mean(num_pos), 1.0) - loss_cls = self.loss_cls( - flatten_cls_scores, flatten_labels, avg_factor=num_pos) - - pos_bbox_preds = flatten_bbox_preds[pos_inds] - pos_centerness = flatten_centerness[pos_inds] - pos_bbox_targets = flatten_bbox_targets[pos_inds] - pos_centerness_targets = self.centerness_target(pos_bbox_targets) - # centerness weighted iou loss - centerness_denorm = max( - reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) - - if len(pos_inds) > 0: - pos_points = flatten_points[pos_inds] - pos_decoded_bbox_preds = self.bbox_coder.decode( - pos_points, pos_bbox_preds) - pos_decoded_target_preds = self.bbox_coder.decode( - pos_points, pos_bbox_targets) - loss_bbox = self.loss_bbox( - pos_decoded_bbox_preds, - pos_decoded_target_preds, - weight=pos_centerness_targets, - avg_factor=centerness_denorm) - loss_centerness = self.loss_centerness( - pos_centerness, pos_centerness_targets, avg_factor=num_pos) - else: - loss_bbox = pos_bbox_preds.sum() - loss_centerness = pos_centerness.sum() - - return dict( - loss_cls=loss_cls, - loss_bbox=loss_bbox, - loss_centerness=loss_centerness) - - def get_targets(self, points, gt_bboxes_list, gt_labels_list): - """Compute regression, classification and centerness targets for points - in multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels_list (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - - Returns: - tuple: - concat_lvl_labels (list[Tensor]): Labels of each level. \ - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ - level. - """ - assert len(points) == len(self.regress_ranges) - num_levels = len(points) - # expand regress ranges to align with points - expanded_regress_ranges = [ - points[i].new_tensor(self.regress_ranges[i])[None].expand_as( - points[i]) for i in range(num_levels) - ] - # concat all levels points and regress ranges - concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) - concat_points = torch.cat(points, dim=0) - - # the number of points per img, per lvl - num_points = [center.size(0) for center in points] - - # get labels and bbox_targets of each image - labels_list, bbox_targets_list = multi_apply( - self._get_target_single, - gt_bboxes_list, - gt_labels_list, - points=concat_points, - regress_ranges=concat_regress_ranges, - num_points_per_lvl=num_points) - - # split to per img, per level - labels_list = [labels.split(num_points, 0) for labels in labels_list] - bbox_targets_list = [ - bbox_targets.split(num_points, 0) - for bbox_targets in bbox_targets_list - ] - - # concat per level image - concat_lvl_labels = [] - concat_lvl_bbox_targets = [] - for i in range(num_levels): - concat_lvl_labels.append( - torch.cat([labels[i] for labels in labels_list])) - bbox_targets = torch.cat( - [bbox_targets[i] for bbox_targets in bbox_targets_list]) - if self.norm_on_bbox: - bbox_targets = bbox_targets / self.strides[i] - concat_lvl_bbox_targets.append(bbox_targets) - return concat_lvl_labels, concat_lvl_bbox_targets - - def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges, - num_points_per_lvl): - """Compute regression and classification targets for a single image.""" - num_points = points.size(0) - num_gts = gt_labels.size(0) - if num_gts == 0: - return gt_labels.new_full((num_points,), self.num_classes), \ - gt_bboxes.new_zeros((num_points, 4)) - - areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( - gt_bboxes[:, 3] - gt_bboxes[:, 1]) - # TODO: figure out why these two are different - # areas = areas[None].expand(num_points, num_gts) - areas = areas[None].repeat(num_points, 1) - regress_ranges = regress_ranges[:, None, :].expand( - num_points, num_gts, 2) - gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) - xs, ys = points[:, 0], points[:, 1] - xs = xs[:, None].expand(num_points, num_gts) - ys = ys[:, None].expand(num_points, num_gts) - - left = xs - gt_bboxes[..., 0] - right = gt_bboxes[..., 2] - xs - top = ys - gt_bboxes[..., 1] - bottom = gt_bboxes[..., 3] - ys - bbox_targets = torch.stack((left, top, right, bottom), -1) - - if self.center_sampling: - # condition1: inside a `center bbox` - radius = self.center_sample_radius - center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 - center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 - center_gts = torch.zeros_like(gt_bboxes) - stride = center_xs.new_zeros(center_xs.shape) - - # project the points on current lvl back to the `original` sizes - lvl_begin = 0 - for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): - lvl_end = lvl_begin + num_points_lvl - stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius - lvl_begin = lvl_end - - x_mins = center_xs - stride - y_mins = center_ys - stride - x_maxs = center_xs + stride - y_maxs = center_ys + stride - center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], - x_mins, gt_bboxes[..., 0]) - center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], - y_mins, gt_bboxes[..., 1]) - center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], - gt_bboxes[..., 2], x_maxs) - center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], - gt_bboxes[..., 3], y_maxs) - - cb_dist_left = xs - center_gts[..., 0] - cb_dist_right = center_gts[..., 2] - xs - cb_dist_top = ys - center_gts[..., 1] - cb_dist_bottom = center_gts[..., 3] - ys - center_bbox = torch.stack( - (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) - inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 - else: - # condition1: inside a gt bbox - inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 - - # condition2: limit the regression range for each location - max_regress_distance = bbox_targets.max(-1)[0] - inside_regress_range = ( - (max_regress_distance >= regress_ranges[..., 0]) - & (max_regress_distance <= regress_ranges[..., 1])) - - # if there are still more than one objects for a location, - # we choose the one with minimal area - areas[inside_gt_bbox_mask == 0] = INF - areas[inside_regress_range == 0] = INF - min_area, min_area_inds = areas.min(dim=1) - - labels = gt_labels[min_area_inds] - labels[min_area == INF] = self.num_classes # set as BG - bbox_targets = bbox_targets[range(num_points), min_area_inds] - - return labels, bbox_targets - - def centerness_target(self, pos_bbox_targets): - """Compute centerness targets. - - Args: - pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape - (num_pos, 4) - - Returns: - Tensor: Centerness target. - """ - # only calculate pos centerness targets, otherwise there may be nan - left_right = pos_bbox_targets[:, [0, 2]] - top_bottom = pos_bbox_targets[:, [1, 3]] - if len(left_right) == 0: - centerness_targets = left_right[..., 0] - else: - centerness_targets = ( - left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( - top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) - return torch.sqrt(centerness_targets) - - def _get_points_single(self, - featmap_size, - stride, - dtype, - device, - flatten=False): - """Get points according to feature map size. - - This function will be deprecated soon. - """ - warnings.warn( - '`_get_points_single` in `FCOSHead` will be ' - 'deprecated soon, we support a multi level point generator now' - 'you can get points of a single level feature map ' - 'with `self.prior_generator.single_level_grid_priors` ') - - y, x = super()._get_points_single(featmap_size, stride, dtype, device) - points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), - dim=-1) + stride // 2 - return points diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fovea_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fovea_head.py deleted file mode 100644 index 8be7fc94c767005da5d31d201dcc55fb760b5c53..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fovea_head.py +++ /dev/null @@ -1,385 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.ops import DeformConv2d -from mmcv.runner import BaseModule - -from mmdet.core import multi_apply -from mmdet.core.utils import filter_scores_and_topk -from ..builder import HEADS -from .anchor_free_head import AnchorFreeHead - -INF = 1e8 - - -class FeatureAlign(BaseModule): - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - deform_groups=4, - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.1, - override=dict( - type='Normal', name='conv_adaption', std=0.01))): - super(FeatureAlign, self).__init__(init_cfg) - offset_channels = kernel_size * kernel_size * 2 - self.conv_offset = nn.Conv2d( - 4, deform_groups * offset_channels, 1, bias=False) - self.conv_adaption = DeformConv2d( - in_channels, - out_channels, - kernel_size=kernel_size, - padding=(kernel_size - 1) // 2, - deform_groups=deform_groups) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x, shape): - offset = self.conv_offset(shape) - x = self.relu(self.conv_adaption(x, offset)) - return x - - -@HEADS.register_module() -class FoveaHead(AnchorFreeHead): - """FoveaBox: Beyond Anchor-based Object Detector - https://arxiv.org/abs/1904.03797 - """ - - def __init__(self, - num_classes, - in_channels, - base_edge_list=(16, 32, 64, 128, 256), - scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, - 512)), - sigma=0.4, - with_deform=False, - deform_groups=4, - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='conv_cls', - std=0.01, - bias_prob=0.01)), - **kwargs): - self.base_edge_list = base_edge_list - self.scale_ranges = scale_ranges - self.sigma = sigma - self.with_deform = with_deform - self.deform_groups = deform_groups - super().__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs) - - def _init_layers(self): - # box branch - super()._init_reg_convs() - self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - - # cls branch - if not self.with_deform: - super()._init_cls_convs() - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - else: - self.cls_convs = nn.ModuleList() - self.cls_convs.append( - ConvModule( - self.feat_channels, (self.feat_channels * 4), - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.norm_cfg is None)) - self.cls_convs.append( - ConvModule((self.feat_channels * 4), (self.feat_channels * 4), - 1, - stride=1, - padding=0, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.norm_cfg is None)) - self.feature_adaption = FeatureAlign( - self.feat_channels, - self.feat_channels, - kernel_size=3, - deform_groups=self.deform_groups) - self.conv_cls = nn.Conv2d( - int(self.feat_channels * 4), - self.cls_out_channels, - 3, - padding=1) - - def forward_single(self, x): - cls_feat = x - reg_feat = x - for reg_layer in self.reg_convs: - reg_feat = reg_layer(reg_feat) - bbox_pred = self.conv_reg(reg_feat) - if self.with_deform: - cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) - for cls_layer in self.cls_convs: - cls_feat = cls_layer(cls_feat) - cls_score = self.conv_cls(cls_feat) - return cls_score, bbox_pred - - def loss(self, - cls_scores, - bbox_preds, - gt_bbox_list, - gt_label_list, - img_metas, - gt_bboxes_ignore=None): - assert len(cls_scores) == len(bbox_preds) - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - points = self.prior_generator.grid_priors( - featmap_sizes, - dtype=bbox_preds[0].dtype, - device=bbox_preds[0].device) - num_imgs = cls_scores[0].size(0) - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - for bbox_pred in bbox_preds - ] - flatten_cls_scores = torch.cat(flatten_cls_scores) - flatten_bbox_preds = torch.cat(flatten_bbox_preds) - flatten_labels, flatten_bbox_targets = self.get_targets( - gt_bbox_list, gt_label_list, featmap_sizes, points) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - pos_inds = ((flatten_labels >= 0) - & (flatten_labels < self.num_classes)).nonzero().view(-1) - num_pos = len(pos_inds) - - loss_cls = self.loss_cls( - flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) - if num_pos > 0: - pos_bbox_preds = flatten_bbox_preds[pos_inds] - pos_bbox_targets = flatten_bbox_targets[pos_inds] - pos_weights = pos_bbox_targets.new_zeros( - pos_bbox_targets.size()) + 1.0 - loss_bbox = self.loss_bbox( - pos_bbox_preds, - pos_bbox_targets, - pos_weights, - avg_factor=num_pos) - else: - loss_bbox = torch.tensor( - 0, - dtype=flatten_bbox_preds.dtype, - device=flatten_bbox_preds.device) - return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) - - def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points): - label_list, bbox_target_list = multi_apply( - self._get_target_single, - gt_bbox_list, - gt_label_list, - featmap_size_list=featmap_sizes, - point_list=points) - flatten_labels = [ - torch.cat([ - labels_level_img.flatten() for labels_level_img in labels_level - ]) for labels_level in zip(*label_list) - ] - flatten_bbox_targets = [ - torch.cat([ - bbox_targets_level_img.reshape(-1, 4) - for bbox_targets_level_img in bbox_targets_level - ]) for bbox_targets_level in zip(*bbox_target_list) - ] - flatten_labels = torch.cat(flatten_labels) - flatten_bbox_targets = torch.cat(flatten_bbox_targets) - return flatten_labels, flatten_bbox_targets - - def _get_target_single(self, - gt_bboxes_raw, - gt_labels_raw, - featmap_size_list=None, - point_list=None): - - gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * - (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) - label_list = [] - bbox_target_list = [] - # for each pyramid, find the cls and box target - for base_len, (lower_bound, upper_bound), stride, featmap_size, \ - points in zip(self.base_edge_list, self.scale_ranges, - self.strides, featmap_size_list, point_list): - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - points = points.view(*featmap_size, 2) - x, y = points[..., 0], points[..., 1] - labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes - bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1], - 4) + 1 - # scale assignment - hit_indices = ((gt_areas >= lower_bound) & - (gt_areas <= upper_bound)).nonzero().flatten() - if len(hit_indices) == 0: - label_list.append(labels) - bbox_target_list.append(torch.log(bbox_targets)) - continue - _, hit_index_order = torch.sort(-gt_areas[hit_indices]) - hit_indices = hit_indices[hit_index_order] - gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride - gt_labels = gt_labels_raw[hit_indices] - half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) - half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) - # valid fovea area: left, right, top, down - pos_left = torch.ceil( - gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \ - clamp(0, featmap_size[1] - 1) - pos_right = torch.floor( - gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \ - clamp(0, featmap_size[1] - 1) - pos_top = torch.ceil( - gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \ - clamp(0, featmap_size[0] - 1) - pos_down = torch.floor( - gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \ - clamp(0, featmap_size[0] - 1) - for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ - zip(pos_left, pos_top, pos_right, pos_down, gt_labels, - gt_bboxes_raw[hit_indices, :]): - labels[py1:py2 + 1, px1:px2 + 1] = label - bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ - (x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len - bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ - (y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len - bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ - (gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len - bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ - (gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len - bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) - label_list.append(labels) - bbox_target_list.append(torch.log(bbox_targets)) - return label_list, bbox_target_list - - # Same as base_dense_head/_get_bboxes_single except self._bbox_decode - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - score_factor_list, - mlvl_priors, - img_meta, - cfg, - rescale=False, - with_nms=True, - **kwargs): - """Transform outputs of a single image into bbox predictions. - - Args: - cls_score_list (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_priors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has shape - (num_priors * 4, H, W). - score_factor_list (list[Tensor]): Score factor from all scale - levels of a single image. Fovea head does not need this value. - mlvl_priors (list[Tensor]): Each element in the list is - the priors of a single level in feature pyramid, has shape - (num_priors, 2). - img_meta (dict): Image meta info. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape \ - [num_bboxes, 5], where the first 4 columns are bounding \ - box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding \ - box with shape [num_bboxes]. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(cls_score_list) == len(bbox_pred_list) - img_shape = img_meta['img_shape'] - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_labels = [] - for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \ - enumerate(zip(cls_score_list, bbox_pred_list, self.strides, - self.base_edge_list, mlvl_priors)): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - - scores = cls_score.permute(1, 2, 0).reshape( - -1, self.cls_out_channels).sigmoid() - - # After https://github.com/open-mmlab/mmdetection/pull/6268/, - # this operation keeps fewer bboxes under the same `nms_pre`. - # There is no difference in performance for most models. If you - # find a slight drop in performance, you can set a larger - # `nms_pre` than before. - results = filter_scores_and_topk( - scores, cfg.score_thr, nms_pre, - dict(bbox_pred=bbox_pred, priors=priors)) - scores, labels, _, filtered_results = results - - bbox_pred = filtered_results['bbox_pred'] - priors = filtered_results['priors'] - - bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape) - - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_labels.append(labels) - - return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, - img_meta['scale_factor'], cfg, rescale, - with_nms) - - def _bbox_decode(self, priors, bbox_pred, base_len, max_shape): - bbox_pred = bbox_pred.exp() - - y = priors[:, 1] - x = priors[:, 0] - x1 = (x - base_len * bbox_pred[:, 0]). \ - clamp(min=0, max=max_shape[1] - 1) - y1 = (y - base_len * bbox_pred[:, 1]). \ - clamp(min=0, max=max_shape[0] - 1) - x2 = (x + base_len * bbox_pred[:, 2]). \ - clamp(min=0, max=max_shape[1] - 1) - y2 = (y + base_len * bbox_pred[:, 3]). \ - clamp(min=0, max=max_shape[0] - 1) - decoded_bboxes = torch.stack([x1, y1, x2, y2], -1) - return decoded_bboxes - - def _get_points_single(self, *args, **kwargs): - """Get points according to feature map size. - - This function will be deprecated soon. - """ - warnings.warn( - '`_get_points_single` in `FoveaHead` will be ' - 'deprecated soon, we support a multi level point generator now' - 'you can get points of a single level feature map ' - 'with `self.prior_generator.single_level_grid_priors` ') - y, x = super()._get_points_single(*args, **kwargs) - return y + 0.5, x + 0.5 diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/free_anchor_retina_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/free_anchor_retina_head.py deleted file mode 100644 index 3acd25ecba414b691b2b00a6bc30faa580dadebc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/free_anchor_retina_head.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn.functional as F - -from mmdet.core import bbox_overlaps -from ..builder import HEADS -from .retina_head import RetinaHead - -EPS = 1e-12 - - -@HEADS.register_module() -class FreeAnchorRetinaHead(RetinaHead): - """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): Number of conv layers in cls and reg tower. - Default: 4. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: norm_cfg=dict(type='GN', num_groups=32, - requires_grad=True). - pre_anchor_topk (int): Number of boxes that be token in each bag. - bbox_thr (float): The threshold of the saturated linear function. It is - usually the same with the IoU threshold used in NMS. - gamma (float): Gamma parameter in focal loss. - alpha (float): Alpha parameter in focal loss. - """ # noqa: W605 - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=None, - pre_anchor_topk=50, - bbox_thr=0.6, - gamma=2.0, - alpha=0.5, - **kwargs): - super(FreeAnchorRetinaHead, - self).__init__(num_classes, in_channels, stacked_convs, conv_cfg, - norm_cfg, **kwargs) - - self.pre_anchor_topk = pre_anchor_topk - self.bbox_thr = bbox_thr - self.gamma = gamma - self.alpha = alpha - - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - device = cls_scores[0].device - anchor_list, _ = self.get_anchors( - featmap_sizes, img_metas, device=device) - anchors = [torch.cat(anchor) for anchor in anchor_list] - - # concatenate each level - cls_scores = [ - cls.permute(0, 2, 3, - 1).reshape(cls.size(0), -1, self.cls_out_channels) - for cls in cls_scores - ] - bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) - for bbox_pred in bbox_preds - ] - cls_scores = torch.cat(cls_scores, dim=1) - bbox_preds = torch.cat(bbox_preds, dim=1) - - cls_prob = torch.sigmoid(cls_scores) - box_prob = [] - num_pos = 0 - positive_losses = [] - for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, - bbox_preds_) in enumerate( - zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)): - - with torch.no_grad(): - if len(gt_bboxes_) == 0: - image_box_prob = torch.zeros( - anchors_.size(0), - self.cls_out_channels).type_as(bbox_preds_) - else: - # box_localization: a_{j}^{loc}, shape: [j, 4] - pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_) - - # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] - object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes) - - # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] - t1 = self.bbox_thr - t2 = object_box_iou.max( - dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) - object_box_prob = ((object_box_iou - t1) / - (t2 - t1)).clamp( - min=0, max=1) - - # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] - num_obj = gt_labels_.size(0) - indices = torch.stack([ - torch.arange(num_obj).type_as(gt_labels_), gt_labels_ - ], - dim=0) - object_cls_box_prob = torch.sparse_coo_tensor( - indices, object_box_prob) - - # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] - """ - from "start" to "end" implement: - image_box_iou = torch.sparse.max(object_cls_box_prob, - dim=0).t() - - """ - # start - box_cls_prob = torch.sparse.sum( - object_cls_box_prob, dim=0).to_dense() - - indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() - if indices.numel() == 0: - image_box_prob = torch.zeros( - anchors_.size(0), - self.cls_out_channels).type_as(object_box_prob) - else: - nonzero_box_prob = torch.where( - (gt_labels_.unsqueeze(dim=-1) == indices[0]), - object_box_prob[:, indices[1]], - torch.tensor([ - 0 - ]).type_as(object_box_prob)).max(dim=0).values - - # upmap to shape [j, c] - image_box_prob = torch.sparse_coo_tensor( - indices.flip([0]), - nonzero_box_prob, - size=(anchors_.size(0), - self.cls_out_channels)).to_dense() - # end - - box_prob.append(image_box_prob) - - # construct bags for objects - match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_) - _, matched = torch.topk( - match_quality_matrix, - self.pre_anchor_topk, - dim=1, - sorted=False) - del match_quality_matrix - - # matched_cls_prob: P_{ij}^{cls} - matched_cls_prob = torch.gather( - cls_prob_[matched], 2, - gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, - 1)).squeeze(2) - - # matched_box_prob: P_{ij}^{loc} - matched_anchors = anchors_[matched] - matched_object_targets = self.bbox_coder.encode( - matched_anchors, - gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors)) - loss_bbox = self.loss_bbox( - bbox_preds_[matched], - matched_object_targets, - reduction_override='none').sum(-1) - matched_box_prob = torch.exp(-loss_bbox) - - # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} - num_pos += len(gt_bboxes_) - positive_losses.append( - self.positive_bag_loss(matched_cls_prob, matched_box_prob)) - positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) - - # box_prob: P{a_{j} \in A_{+}} - box_prob = torch.stack(box_prob, dim=0) - - # negative_loss: - # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| - negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( - 1, num_pos * self.pre_anchor_topk) - - # avoid the absence of gradients in regression subnet - # when no ground-truth in a batch - if num_pos == 0: - positive_loss = bbox_preds.sum() * 0 - - losses = { - 'positive_bag_loss': positive_loss, - 'negative_bag_loss': negative_loss - } - return losses - - def positive_bag_loss(self, matched_cls_prob, matched_box_prob): - """Compute positive bag loss. - - :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`. - - :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples. - - :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples. - - Args: - matched_cls_prob (Tensor): Classification probability of matched - samples in shape (num_gt, pre_anchor_topk). - matched_box_prob (Tensor): BBox probability of matched samples, - in shape (num_gt, pre_anchor_topk). - - Returns: - Tensor: Positive bag loss in shape (num_gt,). - """ # noqa: E501, W605 - # bag_prob = Mean-max(matched_prob) - matched_prob = matched_cls_prob * matched_box_prob - weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) - weight /= weight.sum(dim=1).unsqueeze(dim=-1) - bag_prob = (weight * matched_prob).sum(dim=1) - # positive_bag_loss = -self.alpha * log(bag_prob) - return self.alpha * F.binary_cross_entropy( - bag_prob, torch.ones_like(bag_prob), reduction='none') - - def negative_bag_loss(self, cls_prob, box_prob): - """Compute negative bag loss. - - :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`. - - :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples. - - :math:`P_{j}^{bg}`: Classification probability of negative samples. - - Args: - cls_prob (Tensor): Classification probability, in shape - (num_img, num_anchors, num_classes). - box_prob (Tensor): Box probability, in shape - (num_img, num_anchors, num_classes). - - Returns: - Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). - """ # noqa: E501, W605 - prob = cls_prob * (1 - box_prob) - # There are some cases when neg_prob = 0. - # This will cause the neg_prob.log() to be inf without clamp. - prob = prob.clamp(min=EPS, max=1 - EPS) - negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( - prob, torch.zeros_like(prob), reduction='none') - return (1 - self.alpha) * negative_bag_loss diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fsaf_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fsaf_head.py deleted file mode 100644 index 2d2b78796948bec17a44624106d9022ae2be3e6c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/fsaf_head.py +++ /dev/null @@ -1,433 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply, - unmap) -from ..builder import HEADS -from ..losses.accuracy import accuracy -from ..losses.utils import weight_reduce_loss -from .retina_head import RetinaHead - - -@HEADS.register_module() -class FSAFHead(RetinaHead): - """Anchor-free head used in `FSAF `_. - - The head contains two subnetworks. The first classifies anchor boxes and - the second regresses deltas for the anchors (num_anchors is 1 for anchor- - free methods) - - Args: - *args: Same as its base class in :class:`RetinaHead` - score_threshold (float, optional): The score_threshold to calculate - positive recall. If given, prediction scores lower than this value - is counted as incorrect prediction. Default to None. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - **kwargs: Same as its base class in :class:`RetinaHead` - - Example: - >>> import torch - >>> self = FSAFHead(11, 7) - >>> x = torch.rand(1, 7, 32, 32) - >>> cls_score, bbox_pred = self.forward_single(x) - >>> # Each anchor predicts a score for each class except background - >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors - >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors - >>> assert cls_per_anchor == self.num_classes - >>> assert box_per_anchor == 4 - """ - - def __init__(self, *args, score_threshold=None, init_cfg=None, **kwargs): - # The positive bias in self.retina_reg conv is to prevent predicted \ - # bbox with 0 area - if init_cfg is None: - init_cfg = dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=[ - dict( - type='Normal', - name='retina_cls', - std=0.01, - bias_prob=0.01), - dict( - type='Normal', name='retina_reg', std=0.01, bias=0.25) - ]) - super().__init__(*args, init_cfg=init_cfg, **kwargs) - self.score_threshold = score_threshold - - def forward_single(self, x): - """Forward feature map of a single scale level. - - Args: - x (Tensor): Feature map of a single scale level. - - Returns: - tuple (Tensor): - cls_score (Tensor): Box scores for each scale level - Has shape (N, num_points * num_classes, H, W). - bbox_pred (Tensor): Box energies / deltas for each scale - level with shape (N, num_points * 4, H, W). - """ - cls_score, bbox_pred = super().forward_single(x) - # relu: TBLR encoder only accepts positive bbox_pred - return cls_score, self.relu(bbox_pred) - - def _get_targets_single(self, - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - Most of the codes are the same with the base class - :obj: `AnchorHead`, except that it also collects and returns - the matched gt index in the image (from 0 to num_gt-1). If the - anchor bbox is not matched to any gt, the corresponding value in - pos_gt_inds is -1. - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 7 - # Assign gt and sample anchors - anchors = flat_anchors[inside_flags.type(torch.bool), :] - assign_result = self.assigner.assign( - anchors, gt_bboxes, gt_bboxes_ignore, - None if self.sampling else gt_labels) - - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros((num_valid_anchors, label_channels), - dtype=torch.float) - pos_gt_inds = anchors.new_full((num_valid_anchors, ), - -1, - dtype=torch.long) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - - if len(pos_inds) > 0: - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - else: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, both - # the predicted boxes and regression targets should be with - # absolute coordinate format. - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - # The assigned gt_index for each anchor. (0-based) - pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # shadowed_labels is a tensor composed of tuples - # (anchor_inds, class_label) that indicate those anchors lying in the - # outer region of a gt or overlapped by another gt with a smaller - # area. - # - # Therefore, only the shadowed labels are ignored for loss calculation. - # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner` - shadowed_labels = assign_result.get_extra_property('shadowed_labels') - if shadowed_labels is not None and shadowed_labels.numel(): - if len(shadowed_labels.shape) == 2: - idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1] - assert (labels[idx_] != label_).all(), \ - 'One label cannot be both positive and ignored' - label_weights[idx_, label_] = 0 - else: - label_weights[shadowed_labels] = 0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - labels = unmap(labels, num_total_anchors, inside_flags) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - pos_gt_inds = unmap( - pos_gt_inds, num_total_anchors, inside_flags, fill=-1) - - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds, sampling_result, pos_gt_inds) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_points * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_points * 4, H, W). - gt_bboxes (list[Tensor]): each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - for i in range(len(bbox_preds)): # loop over fpn level - # avoid 0 area of the predicted bbox - bbox_preds[i] = bbox_preds[i].clamp(min=1e-4) - # TODO: It may directly use the base-class loss function. - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - batch_size = len(gt_bboxes) - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, - pos_assigned_gt_inds_list) = cls_reg_targets - - num_gts = np.array(list(map(len, gt_labels))) - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - losses_cls, losses_bbox = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - all_anchor_list, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - num_total_samples=num_total_samples) - - # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned - # gt index of each anchor bbox in each fpn level. - cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size - for i, assign in enumerate(pos_assigned_gt_inds_list): - # loop over fpn levels - for j in range(1, batch_size): - # loop over batch size - # Convert gt indices in each img to those in the batch - assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1]) - pos_assigned_gt_inds_list[i] = assign.flatten() - labels_list[i] = labels_list[i].flatten() - num_gts = sum(map(len, gt_labels)) # total number of gt in the batch - # The unique label index of each gt in the batch - label_sequence = torch.arange(num_gts, device=device) - # Collect the average loss of each gt in each level - with torch.no_grad(): - loss_levels, = multi_apply( - self.collect_loss_level_single, - losses_cls, - losses_bbox, - pos_assigned_gt_inds_list, - labels_seq=label_sequence) - # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level - loss_levels = torch.stack(loss_levels, dim=0) - # Locate the best fpn level for loss back-propagation - if loss_levels.numel() == 0: # zero gt - argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long) - else: - _, argmin = loss_levels.min(dim=0) - - # Reweight the loss of each (anchor, label) pair, so that only those - # at the best gt level are back-propagated. - losses_cls, losses_bbox, pos_inds = multi_apply( - self.reweight_loss_single, - losses_cls, - losses_bbox, - pos_assigned_gt_inds_list, - labels_list, - list(range(len(losses_cls))), - min_levels=argmin) - num_pos = torch.cat(pos_inds, 0).sum().float() - pos_recall = self.calculate_pos_recall(cls_scores, labels_list, - pos_inds) - - if num_pos == 0: # No gt - avg_factor = num_pos + float(num_total_neg) - else: - avg_factor = num_pos - for i in range(len(losses_cls)): - losses_cls[i] /= avg_factor - losses_bbox[i] /= avg_factor - return dict( - loss_cls=losses_cls, - loss_bbox=losses_bbox, - num_pos=num_pos / batch_size, - pos_recall=pos_recall) - - def calculate_pos_recall(self, cls_scores, labels_list, pos_inds): - """Calculate positive recall with score threshold. - - Args: - cls_scores (list[Tensor]): Classification scores at all fpn levels. - Each tensor is in shape (N, num_classes * num_anchors, H, W) - labels_list (list[Tensor]): The label that each anchor is assigned - to. Shape (N * H * W * num_anchors, ) - pos_inds (list[Tensor]): List of bool tensors indicating whether - the anchor is assigned to a positive label. - Shape (N * H * W * num_anchors, ) - - Returns: - Tensor: A single float number indicating the positive recall. - """ - with torch.no_grad(): - num_class = self.num_classes - scores = [ - cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos] - for cls, pos in zip(cls_scores, pos_inds) - ] - labels = [ - label.reshape(-1)[pos] - for label, pos in zip(labels_list, pos_inds) - ] - scores = torch.cat(scores, dim=0) - labels = torch.cat(labels, dim=0) - if self.use_sigmoid_cls: - scores = scores.sigmoid() - else: - scores = scores.softmax(dim=1) - - return accuracy(scores, labels, thresh=self.score_threshold) - - def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds, - labels_seq): - """Get the average loss in each FPN level w.r.t. each gt label. - - Args: - cls_loss (Tensor): Classification loss of each feature map pixel, - shape (num_anchor, num_class) - reg_loss (Tensor): Regression loss of each feature map pixel, - shape (num_anchor, 4) - assigned_gt_inds (Tensor): It indicates which gt the prior is - assigned to (0-based, -1: no assignment). shape (num_anchor), - labels_seq: The rank of labels. shape (num_gt) - - Returns: - shape: (num_gt), average loss of each gt in this level - """ - if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4) - reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims - if len(cls_loss.shape) == 2: - cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims - loss = cls_loss + reg_loss - assert loss.size(0) == assigned_gt_inds.size(0) - # Default loss value is 1e6 for a layer where no anchor is positive - # to ensure it will not be chosen to back-propagate gradient - losses_ = loss.new_full(labels_seq.shape, 1e6) - for i, l in enumerate(labels_seq): - match = assigned_gt_inds == l - if match.any(): - losses_[i] = loss[match].mean() - return losses_, - - def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds, - labels, level, min_levels): - """Reweight loss values at each level. - - Reassign loss values at each level by masking those where the - pre-calculated loss is too large. Then return the reduced losses. - - Args: - cls_loss (Tensor): Element-wise classification loss. - Shape: (num_anchors, num_classes) - reg_loss (Tensor): Element-wise regression loss. - Shape: (num_anchors, 4) - assigned_gt_inds (Tensor): The gt indices that each anchor bbox - is assigned to. -1 denotes a negative anchor, otherwise it is the - gt index (0-based). Shape: (num_anchors, ), - labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ). - level (int): The current level index in the pyramid - (0-4 for RetinaNet) - min_levels (Tensor): The best-matching level for each gt. - Shape: (num_gts, ), - - Returns: - tuple: - - cls_loss: Reduced corrected classification loss. Scalar. - - reg_loss: Reduced corrected regression loss. Scalar. - - pos_flags (Tensor): Corrected bool tensor indicating the - final positive anchors. Shape: (num_anchors, ). - """ - loc_weight = torch.ones_like(reg_loss) - cls_weight = torch.ones_like(cls_loss) - pos_flags = assigned_gt_inds >= 0 # positive pixel flag - pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten() - - if pos_flags.any(): # pos pixels exist - pos_assigned_gt_inds = assigned_gt_inds[pos_flags] - zeroing_indices = (min_levels[pos_assigned_gt_inds] != level) - neg_indices = pos_indices[zeroing_indices] - - if neg_indices.numel(): - pos_flags[neg_indices] = 0 - loc_weight[neg_indices] = 0 - # Only the weight corresponding to the label is - # zeroed out if not selected - zeroing_labels = labels[neg_indices] - assert (zeroing_labels >= 0).all() - cls_weight[neg_indices, zeroing_labels] = 0 - - # Weighted loss for both cls and reg loss - cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum') - reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum') - - return cls_loss, reg_loss, pos_flags diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ga_retina_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ga_retina_head.py deleted file mode 100644 index 6d9e874c2bfdd07b408d148110eb4dd85c3a9069..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ga_retina_head.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.ops import MaskedConv2d - -from ..builder import HEADS -from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead - - -@HEADS.register_module() -class GARetinaHead(GuidedAnchorHead): - """Guided-Anchor-based RetinaNet head.""" - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=None, - init_cfg=None, - **kwargs): - if init_cfg is None: - init_cfg = dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=[ - dict( - type='Normal', - name='conv_loc', - std=0.01, - bias_prob=0.01), - dict( - type='Normal', - name='retina_cls', - std=0.01, - bias_prob=0.01) - ]) - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - super(GARetinaHead, self).__init__( - num_classes, in_channels, init_cfg=init_cfg, **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - - self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) - self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, - 1) - self.feature_adaption_cls = FeatureAdaption( - self.feat_channels, - self.feat_channels, - kernel_size=3, - deform_groups=self.deform_groups) - self.feature_adaption_reg = FeatureAdaption( - self.feat_channels, - self.feat_channels, - kernel_size=3, - deform_groups=self.deform_groups) - self.retina_cls = MaskedConv2d( - self.feat_channels, - self.num_base_priors * self.cls_out_channels, - 3, - padding=1) - self.retina_reg = MaskedConv2d( - self.feat_channels, self.num_base_priors * 4, 3, padding=1) - - def forward_single(self, x): - """Forward feature map of a single scale level.""" - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - - loc_pred = self.conv_loc(cls_feat) - shape_pred = self.conv_shape(reg_feat) - - cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) - reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) - - if not self.training: - mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr - else: - mask = None - cls_score = self.retina_cls(cls_feat, mask) - bbox_pred = self.retina_reg(reg_feat, mask) - return cls_score, bbox_pred, shape_pred, loc_pred diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ga_rpn_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ga_rpn_head.py deleted file mode 100644 index 4123c8b3f56f29f94668920d77b7db75ae78d8a2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ga_rpn_head.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv import ConfigDict -from mmcv.ops import nms - -from ..builder import HEADS -from .guided_anchor_head import GuidedAnchorHead - - -@HEADS.register_module() -class GARPNHead(GuidedAnchorHead): - """Guided-Anchor-based RPN head.""" - - def __init__(self, - in_channels, - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='conv_loc', - std=0.01, - bias_prob=0.01)), - **kwargs): - super(GARPNHead, self).__init__( - 1, in_channels, init_cfg=init_cfg, **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - self.rpn_conv = nn.Conv2d( - self.in_channels, self.feat_channels, 3, padding=1) - super(GARPNHead, self)._init_layers() - - def forward_single(self, x): - """Forward feature of a single scale level.""" - - x = self.rpn_conv(x) - x = F.relu(x, inplace=True) - (cls_score, bbox_pred, shape_pred, - loc_pred) = super(GARPNHead, self).forward_single(x) - return cls_score, bbox_pred, shape_pred, loc_pred - - def loss(self, - cls_scores, - bbox_preds, - shape_preds, - loc_preds, - gt_bboxes, - img_metas, - gt_bboxes_ignore=None): - losses = super(GARPNHead, self).loss( - cls_scores, - bbox_preds, - shape_preds, - loc_preds, - gt_bboxes, - None, - img_metas, - gt_bboxes_ignore=gt_bboxes_ignore) - return dict( - loss_rpn_cls=losses['loss_cls'], - loss_rpn_bbox=losses['loss_bbox'], - loss_anchor_shape=losses['loss_shape'], - loss_anchor_loc=losses['loss_loc']) - - def _get_bboxes_single(self, - cls_scores, - bbox_preds, - mlvl_anchors, - mlvl_masks, - img_shape, - scale_factor, - cfg, - rescale=False): - cfg = self.test_cfg if cfg is None else cfg - - cfg = copy.deepcopy(cfg) - - # deprecate arguments warning - if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: - warnings.warn( - 'In rpn_proposal or test_cfg, ' - 'nms_thr has been moved to a dict named nms as ' - 'iou_threshold, max_num has been renamed as max_per_img, ' - 'name of original arguments and the way to specify ' - 'iou_threshold of NMS will be deprecated.') - if 'nms' not in cfg: - cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) - if 'max_num' in cfg: - if 'max_per_img' in cfg: - assert cfg.max_num == cfg.max_per_img, f'You ' \ - f'set max_num and max_per_img at the same time, ' \ - f'but get {cfg.max_num} ' \ - f'and {cfg.max_per_img} respectively' \ - 'Please delete max_num which will be deprecated.' - else: - cfg.max_per_img = cfg.max_num - if 'nms_thr' in cfg: - assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ - f'iou_threshold in nms and ' \ - f'nms_thr at the same time, but get ' \ - f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ - f' respectively. Please delete the ' \ - f'nms_thr which will be deprecated.' - - assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \ - 'naive nms.' - - mlvl_proposals = [] - for idx in range(len(cls_scores)): - rpn_cls_score = cls_scores[idx] - rpn_bbox_pred = bbox_preds[idx] - anchors = mlvl_anchors[idx] - mask = mlvl_masks[idx] - assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] - # if no location is kept, end. - if mask.sum() == 0: - continue - rpn_cls_score = rpn_cls_score.permute(1, 2, 0) - if self.use_sigmoid_cls: - rpn_cls_score = rpn_cls_score.reshape(-1) - scores = rpn_cls_score.sigmoid() - else: - rpn_cls_score = rpn_cls_score.reshape(-1, 2) - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - scores = rpn_cls_score.softmax(dim=1)[:, :-1] - # filter scores, bbox_pred w.r.t. mask. - # anchors are filtered in get_anchors() beforehand. - scores = scores[mask] - rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, - 4)[mask, :] - if scores.dim() == 0: - rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) - anchors = anchors.unsqueeze(0) - scores = scores.unsqueeze(0) - # filter anchors, bbox_pred, scores w.r.t. scores - if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: - _, topk_inds = scores.topk(cfg.nms_pre) - rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] - anchors = anchors[topk_inds, :] - scores = scores[topk_inds] - # get proposals w.r.t. anchors and rpn_bbox_pred - proposals = self.bbox_coder.decode( - anchors, rpn_bbox_pred, max_shape=img_shape) - # filter out too small bboxes - if cfg.min_bbox_size >= 0: - w = proposals[:, 2] - proposals[:, 0] - h = proposals[:, 3] - proposals[:, 1] - valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) - if not valid_mask.all(): - proposals = proposals[valid_mask] - scores = scores[valid_mask] - - # NMS in current level - proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold) - proposals = proposals[:cfg.nms_post, :] - mlvl_proposals.append(proposals) - proposals = torch.cat(mlvl_proposals, 0) - if cfg.get('nms_across_levels', False): - # NMS across multi levels - proposals, _ = nms(proposals[:, :4], proposals[:, -1], - cfg.nms.iou_threshold) - proposals = proposals[:cfg.max_per_img, :] - else: - scores = proposals[:, 4] - num = min(cfg.max_per_img, proposals.shape[0]) - _, topk_inds = scores.topk(num) - proposals = proposals[topk_inds, :] - return proposals diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/gfl_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/gfl_head.py deleted file mode 100644 index 12eb89db8c9c9336955d7ef40d6636d122537908..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/gfl_head.py +++ /dev/null @@ -1,648 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, Scale -from mmcv.runner import force_fp32 - -from mmdet.core import (anchor_inside_flags, bbox_overlaps, build_assigner, - build_sampler, images_to_levels, multi_apply, - reduce_mean, unmap) -from mmdet.core.utils import filter_scores_and_topk -from ..builder import HEADS, build_loss -from .anchor_head import AnchorHead - - -class Integral(nn.Module): - """A fixed layer for calculating integral result from distribution. - - This layer calculates the target location by :math: `sum{P(y_i) * y_i}`, - P(y_i) denotes the softmax vector that represents the discrete distribution - y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} - - Args: - reg_max (int): The maximal value of the discrete set. Default: 16. You - may want to reset it according to your new dataset or related - settings. - """ - - def __init__(self, reg_max=16): - super(Integral, self).__init__() - self.reg_max = reg_max - self.register_buffer('project', - torch.linspace(0, self.reg_max, self.reg_max + 1)) - - def forward(self, x): - """Forward feature from the regression head to get integral result of - bounding box location. - - Args: - x (Tensor): Features of the regression head, shape (N, 4*(n+1)), - n is self.reg_max. - - Returns: - x (Tensor): Integral result of box locations, i.e., distance - offsets from the box center in four directions, shape (N, 4). - """ - x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) - x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) - return x - - -@HEADS.register_module() -class GFLHead(AnchorHead): - """Generalized Focal Loss: Learning Qualified and Distributed Bounding - Boxes for Dense Object Detection. - - GFL head structure is similar with ATSS, however GFL uses - 1) joint representation for classification and localization quality, and - 2) flexible General distribution for bounding box locations, - which are supervised by - Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively - - https://arxiv.org/abs/2006.04388 - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): Number of conv layers in cls and reg tower. - Default: 4. - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='GN', num_groups=32, requires_grad=True). - loss_qfl (dict): Config of Quality Focal Loss (QFL). - bbox_coder (dict): Config of bbox coder. Defaults - 'DistancePointBBoxCoder'. - reg_max (int): Max value of integral set :math: `{0, ..., reg_max}` - in QFL setting. Default: 16. - init_cfg (dict or list[dict], optional): Initialization config dict. - Example: - >>> self = GFLHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_quality_score, bbox_pred = self.forward(feats) - >>> assert len(cls_quality_score) == len(self.scales) - """ - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), - bbox_coder=dict(type='DistancePointBBoxCoder'), - reg_max=16, - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='gfl_cls', - std=0.01, - bias_prob=0.01)), - **kwargs): - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.reg_max = reg_max - super(GFLHead, self).__init__( - num_classes, - in_channels, - bbox_coder=bbox_coder, - init_cfg=init_cfg, - **kwargs) - - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # SSD sampling=False so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - self.integral = Integral(self.reg_max) - self.loss_dfl = build_loss(loss_dfl) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - assert self.num_anchors == 1, 'anchor free version' - self.gfl_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.gfl_reg = nn.Conv2d( - self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) - self.scales = nn.ModuleList( - [Scale(1.0) for _ in self.prior_generator.strides]) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification and quality (IoU) - joint scores for all scale levels, each is a 4D-tensor, - the channel number is num_classes. - bbox_preds (list[Tensor]): Box distribution logits for all - scale levels, each is a 4D-tensor, the channel number is - 4*(n+1), n is max value of integral set. - """ - return multi_apply(self.forward_single, feats, self.scales) - - def forward_single(self, x, scale): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - - Returns: - tuple: - cls_score (Tensor): Cls and quality joint scores for a single - scale level the channel number is num_classes. - bbox_pred (Tensor): Box distribution logits for a single scale - level, the channel number is 4*(n+1), n is max value of - integral set. - """ - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.gfl_cls(cls_feat) - bbox_pred = scale(self.gfl_reg(reg_feat)).float() - return cls_score, bbox_pred - - def anchor_center(self, anchors): - """Get anchor centers from anchors. - - Args: - anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. - - Returns: - Tensor: Anchor centers with shape (N, 2), "xy" format. - """ - anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2 - anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2 - return torch.stack([anchors_cx, anchors_cy], dim=-1) - - def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, - bbox_targets, stride, num_total_samples): - """Compute loss of a single scale level. - - Args: - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - cls_score (Tensor): Cls and quality joint scores for each scale - level has shape (N, num_classes, H, W). - bbox_pred (Tensor): Box distribution logits for each scale - level with shape (N, 4*(n+1), H, W), n is max value of integral - set. - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - bbox_targets (Tensor): BBox regression targets of each anchor - weight shape (N, num_total_anchors, 4). - stride (tuple): Stride in this scale level. - num_total_samples (int): Number of positive samples that is - reduced over all GPUs. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert stride[0] == stride[1], 'h stride is not equal to w stride!' - anchors = anchors.reshape(-1, 4) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - bbox_pred = bbox_pred.permute(0, 2, 3, - 1).reshape(-1, 4 * (self.reg_max + 1)) - bbox_targets = bbox_targets.reshape(-1, 4) - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - score = label_weights.new_zeros(labels.shape) - - if len(pos_inds) > 0: - pos_bbox_targets = bbox_targets[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_anchors = anchors[pos_inds] - pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] - - weight_targets = cls_score.detach().sigmoid() - weight_targets = weight_targets.max(dim=1)[0][pos_inds] - pos_bbox_pred_corners = self.integral(pos_bbox_pred) - pos_decode_bbox_pred = self.bbox_coder.decode( - pos_anchor_centers, pos_bbox_pred_corners) - pos_decode_bbox_targets = pos_bbox_targets / stride[0] - score[pos_inds] = bbox_overlaps( - pos_decode_bbox_pred.detach(), - pos_decode_bbox_targets, - is_aligned=True) - pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) - target_corners = self.bbox_coder.encode(pos_anchor_centers, - pos_decode_bbox_targets, - self.reg_max).reshape(-1) - - # regression loss - loss_bbox = self.loss_bbox( - pos_decode_bbox_pred, - pos_decode_bbox_targets, - weight=weight_targets, - avg_factor=1.0) - - # dfl loss - loss_dfl = self.loss_dfl( - pred_corners, - target_corners, - weight=weight_targets[:, None].expand(-1, 4).reshape(-1), - avg_factor=4.0) - else: - loss_bbox = bbox_pred.sum() * 0 - loss_dfl = bbox_pred.sum() * 0 - weight_targets = bbox_pred.new_tensor(0) - - # cls (qfl) loss - loss_cls = self.loss_cls( - cls_score, (labels, score), - weight=label_weights, - avg_factor=num_total_samples) - - return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Cls and quality scores for each scale - level has shape (N, num_classes, H, W). - bbox_preds (list[Tensor]): Box distribution logits for each scale - level with shape (N, 4*(n+1), H, W), n is max value of integral - set. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets - - num_total_samples = reduce_mean( - torch.tensor(num_total_pos, dtype=torch.float, - device=device)).item() - num_total_samples = max(num_total_samples, 1.0) - - losses_cls, losses_bbox, losses_dfl,\ - avg_factor = multi_apply( - self.loss_single, - anchor_list, - cls_scores, - bbox_preds, - labels_list, - label_weights_list, - bbox_targets_list, - self.prior_generator.strides, - num_total_samples=num_total_samples) - - avg_factor = sum(avg_factor) - avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() - losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) - losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) - return dict( - loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) - - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - score_factor_list, - mlvl_priors, - img_meta, - cfg, - rescale=False, - with_nms=True, - **kwargs): - """Transform outputs of a single image into bbox predictions. - - Args: - cls_score_list (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_priors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has shape - (num_priors * 4, H, W). - score_factor_list (list[Tensor]): Score factor from all scale - levels of a single image. GFL head does not need this value. - mlvl_priors (list[Tensor]): Each element in the list is - the priors of a single level in feature pyramid, has shape - (num_priors, 4). - img_meta (dict): Image meta info. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape \ - [num_bboxes, 5], where the first 4 columns are bounding \ - box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding \ - box with shape [num_bboxes]. - """ - cfg = self.test_cfg if cfg is None else cfg - img_shape = img_meta['img_shape'] - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_labels = [] - for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate( - zip(cls_score_list, bbox_pred_list, - self.prior_generator.strides, mlvl_priors)): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - assert stride[0] == stride[1] - - bbox_pred = bbox_pred.permute(1, 2, 0) - bbox_pred = self.integral(bbox_pred) * stride[0] - - scores = cls_score.permute(1, 2, 0).reshape( - -1, self.cls_out_channels).sigmoid() - - # After https://github.com/open-mmlab/mmdetection/pull/6268/, - # this operation keeps fewer bboxes under the same `nms_pre`. - # There is no difference in performance for most models. If you - # find a slight drop in performance, you can set a larger - # `nms_pre` than before. - results = filter_scores_and_topk( - scores, cfg.score_thr, nms_pre, - dict(bbox_pred=bbox_pred, priors=priors)) - scores, labels, _, filtered_results = results - - bbox_pred = filtered_results['bbox_pred'] - priors = filtered_results['priors'] - - bboxes = self.bbox_coder.decode( - self.anchor_center(priors), bbox_pred, max_shape=img_shape) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_labels.append(labels) - - return self._bbox_post_process( - mlvl_scores, - mlvl_labels, - mlvl_bboxes, - img_meta['scale_factor'], - cfg, - rescale=rescale, - with_nms=with_nms) - - def get_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """Get targets for GFL head. - - This method is almost the same as `AnchorHead.get_targets()`. Besides - returning the targets as the parent method does, it also returns the - anchors as the first element of the returned tuple. - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - num_level_anchors_list = [num_level_anchors] * num_imgs - - # concat all level anchors and flags to a single tensor - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - anchor_list[i] = torch.cat(anchor_list[i]) - valid_flag_list[i] = torch.cat(valid_flag_list[i]) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - (all_anchors, all_labels, all_label_weights, all_bbox_targets, - all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, - anchor_list, - valid_flag_list, - num_level_anchors_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - anchors_list = images_to_levels(all_anchors, num_level_anchors) - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors) - return (anchors_list, labels_list, label_weights_list, - bbox_targets_list, bbox_weights_list, num_total_pos, - num_total_neg) - - def _get_target_single(self, - flat_anchors, - valid_flags, - num_level_anchors, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression, classification targets for anchors in a single - image. - - Args: - flat_anchors (Tensor): Multi-level anchors of the image, which are - concatenated into a single tensor of shape (num_anchors, 4) - valid_flags (Tensor): Multi level valid flags of the image, - which are concatenated into a single tensor of - shape (num_anchors,). - num_level_anchors Tensor): Number of anchors of each scale level. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - img_meta (dict): Meta info of the image. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: N is the number of total anchors in the image. - anchors (Tensor): All anchors in the image with shape (N, 4). - labels (Tensor): Labels of all anchors in the image with shape - (N,). - label_weights (Tensor): Label weights of all anchor in the - image with shape (N,). - bbox_targets (Tensor): BBox targets of all anchors in the - image with shape (N, 4). - bbox_weights (Tensor): BBox weights of all anchors in the - image with shape (N, 4). - pos_inds (Tensor): Indices of positive anchor with shape - (num_pos,). - neg_inds (Tensor): Indices of negative anchor with shape - (num_neg,). - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 7 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - - num_level_anchors_inside = self.get_num_level_anchors_inside( - num_level_anchors, inside_flags) - assign_result = self.assigner.assign(anchors, num_level_anchors_inside, - gt_bboxes, gt_bboxes_ignore, - gt_labels) - - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - anchors = unmap(anchors, num_total_anchors, inside_flags) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - - return (anchors, labels, label_weights, bbox_targets, bbox_weights, - pos_inds, neg_inds) - - def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): - split_inside_flags = torch.split(inside_flags, num_level_anchors) - num_level_anchors_inside = [ - int(flags.sum()) for flags in split_inside_flags - ] - return num_level_anchors_inside diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/guided_anchor_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/guided_anchor_head.py deleted file mode 100644 index 53e8cd8a750287ca60b33a5cdcb9ce2b02e4c2e3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/guided_anchor_head.py +++ /dev/null @@ -1,868 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn as nn -from mmcv.ops import DeformConv2d, MaskedConv2d -from mmcv.runner import BaseModule, force_fp32 - -from mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder, - build_prior_generator, build_sampler, calc_region, - images_to_levels, multi_apply, multiclass_nms, unmap) -from ..builder import HEADS, build_loss -from .anchor_head import AnchorHead - - -class FeatureAdaption(BaseModule): - """Feature Adaption Module. - - Feature Adaption Module is implemented based on DCN v1. - It uses anchor shape prediction rather than feature map to - predict offsets of deform conv layer. - - Args: - in_channels (int): Number of channels in the input feature map. - out_channels (int): Number of channels in the output feature map. - kernel_size (int): Deformable conv kernel size. - deform_groups (int): Deformable conv group size. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - deform_groups=4, - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.1, - override=dict( - type='Normal', name='conv_adaption', std=0.01))): - super(FeatureAdaption, self).__init__(init_cfg) - offset_channels = kernel_size * kernel_size * 2 - self.conv_offset = nn.Conv2d( - 2, deform_groups * offset_channels, 1, bias=False) - self.conv_adaption = DeformConv2d( - in_channels, - out_channels, - kernel_size=kernel_size, - padding=(kernel_size - 1) // 2, - deform_groups=deform_groups) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x, shape): - offset = self.conv_offset(shape.detach()) - x = self.relu(self.conv_adaption(x, offset)) - return x - - -@HEADS.register_module() -class GuidedAnchorHead(AnchorHead): - """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). - - This GuidedAnchorHead will predict high-quality feature guided - anchors and locations where anchors will be kept in inference. - There are mainly 3 categories of bounding-boxes. - - - Sampled 9 pairs for target assignment. (approxes) - - The square boxes where the predicted anchors are based on. (squares) - - Guided anchors. - - Please refer to https://arxiv.org/abs/1901.03278 for more details. - - Args: - num_classes (int): Number of classes. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels. - approx_anchor_generator (dict): Config dict for approx generator - square_anchor_generator (dict): Config dict for square generator - anchor_coder (dict): Config dict for anchor coder - bbox_coder (dict): Config dict for bbox coder - reg_decoded_bbox (bool): If true, the regression loss would be - applied directly on decoded bounding boxes, converting both - the predicted boxes and regression targets to absolute - coordinates format. Default False. It should be `True` when - using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. - deform_groups: (int): Group number of DCN in - FeatureAdaption module. - loc_filter_thr (float): Threshold to filter out unconcerned regions. - loss_loc (dict): Config of location loss. - loss_shape (dict): Config of anchor shape loss. - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of bbox regression loss. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__( - self, - num_classes, - in_channels, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=8, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[8], - strides=[4, 8, 16, 32, 64]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0] - ), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0] - ), - reg_decoded_bbox=False, - deform_groups=4, - loc_filter_thr=0.01, - train_cfg=None, - test_cfg=None, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0), - init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, - override=dict(type='Normal', - name='conv_loc', - std=0.01, - bias_prob=0.01))): # yapf: disable - super(AnchorHead, self).__init__(init_cfg) - self.in_channels = in_channels - self.num_classes = num_classes - self.feat_channels = feat_channels - self.deform_groups = deform_groups - self.loc_filter_thr = loc_filter_thr - - # build approx_anchor_generator and square_anchor_generator - assert (approx_anchor_generator['octave_base_scale'] == - square_anchor_generator['scales'][0]) - assert (approx_anchor_generator['strides'] == - square_anchor_generator['strides']) - self.approx_anchor_generator = build_prior_generator( - approx_anchor_generator) - self.square_anchor_generator = build_prior_generator( - square_anchor_generator) - self.approxs_per_octave = self.approx_anchor_generator \ - .num_base_priors[0] - - self.reg_decoded_bbox = reg_decoded_bbox - - # one anchor per location - self.num_base_priors = self.square_anchor_generator.num_base_priors[0] - - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] - self.sampling = loss_cls['type'] not in ['FocalLoss'] - self.ga_sampling = train_cfg is not None and hasattr( - train_cfg, 'ga_sampler') - if self.use_sigmoid_cls: - self.cls_out_channels = self.num_classes - else: - self.cls_out_channels = self.num_classes + 1 - - # build bbox_coder - self.anchor_coder = build_bbox_coder(anchor_coder) - self.bbox_coder = build_bbox_coder(bbox_coder) - - # build losses - self.loss_loc = build_loss(loss_loc) - self.loss_shape = build_loss(loss_shape) - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # use PseudoSampler when sampling is False - if self.sampling and hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - self.ga_assigner = build_assigner(self.train_cfg.ga_assigner) - if self.ga_sampling: - ga_sampler_cfg = self.train_cfg.ga_sampler - else: - ga_sampler_cfg = dict(type='PseudoSampler') - self.ga_sampler = build_sampler(ga_sampler_cfg, context=self) - - self.fp16_enabled = False - - self._init_layers() - - @property - def num_anchors(self): - warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' - 'please use "num_base_priors" instead') - return self.square_anchor_generator.num_base_priors[0] - - def _init_layers(self): - self.relu = nn.ReLU(inplace=True) - self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) - self.conv_shape = nn.Conv2d(self.in_channels, self.num_base_priors * 2, - 1) - self.feature_adaption = FeatureAdaption( - self.in_channels, - self.feat_channels, - kernel_size=3, - deform_groups=self.deform_groups) - self.conv_cls = MaskedConv2d( - self.feat_channels, self.num_base_priors * self.cls_out_channels, - 1) - self.conv_reg = MaskedConv2d(self.feat_channels, - self.num_base_priors * 4, 1) - - def forward_single(self, x): - loc_pred = self.conv_loc(x) - shape_pred = self.conv_shape(x) - x = self.feature_adaption(x, shape_pred) - # masked conv is only used during inference for speed-up - if not self.training: - mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr - else: - mask = None - cls_score = self.conv_cls(x, mask) - bbox_pred = self.conv_reg(x, mask) - return cls_score, bbox_pred, shape_pred, loc_pred - - def forward(self, feats): - return multi_apply(self.forward_single, feats) - - def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'): - """Get sampled approxs and inside flags according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - img_metas (list[dict]): Image meta info. - device (torch.device | str): device for returned tensors - - Returns: - tuple: approxes of each image, inside flags of each image - """ - num_imgs = len(img_metas) - - # since feature map sizes of all images are the same, we only compute - # approxes for one time - multi_level_approxs = self.approx_anchor_generator.grid_priors( - featmap_sizes, device=device) - approxs_list = [multi_level_approxs for _ in range(num_imgs)] - - # for each image, we compute inside flags of multi level approxes - inside_flag_list = [] - for img_id, img_meta in enumerate(img_metas): - multi_level_flags = [] - multi_level_approxs = approxs_list[img_id] - - # obtain valid flags for each approx first - multi_level_approx_flags = self.approx_anchor_generator \ - .valid_flags(featmap_sizes, - img_meta['pad_shape'], - device=device) - - for i, flags in enumerate(multi_level_approx_flags): - approxs = multi_level_approxs[i] - inside_flags_list = [] - for i in range(self.approxs_per_octave): - split_valid_flags = flags[i::self.approxs_per_octave] - split_approxs = approxs[i::self.approxs_per_octave, :] - inside_flags = anchor_inside_flags( - split_approxs, split_valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - inside_flags_list.append(inside_flags) - # inside_flag for a position is true if any anchor in this - # position is true - inside_flags = ( - torch.stack(inside_flags_list, 0).sum(dim=0) > 0) - multi_level_flags.append(inside_flags) - inside_flag_list.append(multi_level_flags) - return approxs_list, inside_flag_list - - def get_anchors(self, - featmap_sizes, - shape_preds, - loc_preds, - img_metas, - use_loc_filter=False, - device='cuda'): - """Get squares according to feature map sizes and guided anchors. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - shape_preds (list[tensor]): Multi-level shape predictions. - loc_preds (list[tensor]): Multi-level location predictions. - img_metas (list[dict]): Image meta info. - use_loc_filter (bool): Use loc filter or not. - device (torch.device | str): device for returned tensors - - Returns: - tuple: square approxs of each image, guided anchors of each image, - loc masks of each image - """ - num_imgs = len(img_metas) - num_levels = len(featmap_sizes) - - # since feature map sizes of all images are the same, we only compute - # squares for one time - multi_level_squares = self.square_anchor_generator.grid_priors( - featmap_sizes, device=device) - squares_list = [multi_level_squares for _ in range(num_imgs)] - - # for each image, we compute multi level guided anchors - guided_anchors_list = [] - loc_mask_list = [] - for img_id, img_meta in enumerate(img_metas): - multi_level_guided_anchors = [] - multi_level_loc_mask = [] - for i in range(num_levels): - squares = squares_list[img_id][i] - shape_pred = shape_preds[i][img_id] - loc_pred = loc_preds[i][img_id] - guided_anchors, loc_mask = self._get_guided_anchors_single( - squares, - shape_pred, - loc_pred, - use_loc_filter=use_loc_filter) - multi_level_guided_anchors.append(guided_anchors) - multi_level_loc_mask.append(loc_mask) - guided_anchors_list.append(multi_level_guided_anchors) - loc_mask_list.append(multi_level_loc_mask) - return squares_list, guided_anchors_list, loc_mask_list - - def _get_guided_anchors_single(self, - squares, - shape_pred, - loc_pred, - use_loc_filter=False): - """Get guided anchors and loc masks for a single level. - - Args: - square (tensor): Squares of a single level. - shape_pred (tensor): Shape predictions of a single level. - loc_pred (tensor): Loc predictions of a single level. - use_loc_filter (list[tensor]): Use loc filter or not. - - Returns: - tuple: guided anchors, location masks - """ - # calculate location filtering mask - loc_pred = loc_pred.sigmoid().detach() - if use_loc_filter: - loc_mask = loc_pred >= self.loc_filter_thr - else: - loc_mask = loc_pred >= 0.0 - mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_base_priors) - mask = mask.contiguous().view(-1) - # calculate guided anchors - squares = squares[mask] - anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( - -1, 2).detach()[mask] - bbox_deltas = anchor_deltas.new_full(squares.size(), 0) - bbox_deltas[:, 2:] = anchor_deltas - guided_anchors = self.anchor_coder.decode( - squares, bbox_deltas, wh_ratio_clip=1e-6) - return guided_anchors, mask - - def ga_loc_targets(self, gt_bboxes_list, featmap_sizes): - """Compute location targets for guided anchoring. - - Each feature map is divided into positive, negative and ignore regions. - - positive regions: target 1, weight 1 - - ignore regions: target 0, weight 0 - - negative regions: target 0, weight 0.1 - - Args: - gt_bboxes_list (list[Tensor]): Gt bboxes of each image. - featmap_sizes (list[tuple]): Multi level sizes of each feature - maps. - - Returns: - tuple - """ - anchor_scale = self.approx_anchor_generator.octave_base_scale - anchor_strides = self.approx_anchor_generator.strides - # Currently only supports same stride in x and y direction. - for stride in anchor_strides: - assert (stride[0] == stride[1]) - anchor_strides = [stride[0] for stride in anchor_strides] - - center_ratio = self.train_cfg.center_ratio - ignore_ratio = self.train_cfg.ignore_ratio - img_per_gpu = len(gt_bboxes_list) - num_lvls = len(featmap_sizes) - r1 = (1 - center_ratio) / 2 - r2 = (1 - ignore_ratio) / 2 - all_loc_targets = [] - all_loc_weights = [] - all_ignore_map = [] - for lvl_id in range(num_lvls): - h, w = featmap_sizes[lvl_id] - loc_targets = torch.zeros( - img_per_gpu, - 1, - h, - w, - device=gt_bboxes_list[0].device, - dtype=torch.float32) - loc_weights = torch.full_like(loc_targets, -1) - ignore_map = torch.zeros_like(loc_targets) - all_loc_targets.append(loc_targets) - all_loc_weights.append(loc_weights) - all_ignore_map.append(ignore_map) - for img_id in range(img_per_gpu): - gt_bboxes = gt_bboxes_list[img_id] - scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * - (gt_bboxes[:, 3] - gt_bboxes[:, 1])) - min_anchor_size = scale.new_full( - (1, ), float(anchor_scale * anchor_strides[0])) - # assign gt bboxes to different feature levels w.r.t. their scales - target_lvls = torch.floor( - torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) - target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() - for gt_id in range(gt_bboxes.size(0)): - lvl = target_lvls[gt_id].item() - # rescaled to corresponding feature map - gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] - # calculate ignore regions - ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( - gt_, r2, featmap_sizes[lvl]) - # calculate positive (center) regions - ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( - gt_, r1, featmap_sizes[lvl]) - all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, - ctr_x1:ctr_x2 + 1] = 1 - all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, - ignore_x1:ignore_x2 + 1] = 0 - all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, - ctr_x1:ctr_x2 + 1] = 1 - # calculate ignore map on nearby low level feature - if lvl > 0: - d_lvl = lvl - 1 - # rescaled to corresponding feature map - gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] - ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( - gt_, r2, featmap_sizes[d_lvl]) - all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, - ignore_x1:ignore_x2 + 1] = 1 - # calculate ignore map on nearby high level feature - if lvl < num_lvls - 1: - u_lvl = lvl + 1 - # rescaled to corresponding feature map - gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] - ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( - gt_, r2, featmap_sizes[u_lvl]) - all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, - ignore_x1:ignore_x2 + 1] = 1 - for lvl_id in range(num_lvls): - # ignore negative regions w.r.t. ignore map - all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) - & (all_ignore_map[lvl_id] > 0)] = 0 - # set negative regions with weight 0.1 - all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 - # loc average factor to balance loss - loc_avg_factor = sum( - [t.size(0) * t.size(-1) * t.size(-2) - for t in all_loc_targets]) / 200 - return all_loc_targets, all_loc_weights, loc_avg_factor - - def _ga_shape_target_single(self, - flat_approxs, - inside_flags, - flat_squares, - gt_bboxes, - gt_bboxes_ignore, - img_meta, - unmap_outputs=True): - """Compute guided anchoring targets. - - This function returns sampled anchors and gt bboxes directly - rather than calculates regression targets. - - Args: - flat_approxs (Tensor): flat approxs of a single image, - shape (n, 4) - inside_flags (Tensor): inside flags of a single image, - shape (n, ). - flat_squares (Tensor): flat squares of a single image, - shape (approxs_per_octave * n, 4) - gt_bboxes (Tensor): Ground truth bboxes of a single image. - img_meta (dict): Meta info of a single image. - approxs_per_octave (int): number of approxs per octave - cfg (dict): RPN train configs. - unmap_outputs (bool): unmap outputs or not. - - Returns: - tuple - """ - if not inside_flags.any(): - return (None, ) * 5 - # assign gt and sample anchors - expand_inside_flags = inside_flags[:, None].expand( - -1, self.approxs_per_octave).reshape(-1) - approxs = flat_approxs[expand_inside_flags, :] - squares = flat_squares[inside_flags, :] - - assign_result = self.ga_assigner.assign(approxs, squares, - self.approxs_per_octave, - gt_bboxes, gt_bboxes_ignore) - sampling_result = self.ga_sampler.sample(assign_result, squares, - gt_bboxes) - - bbox_anchors = torch.zeros_like(squares) - bbox_gts = torch.zeros_like(squares) - bbox_weights = torch.zeros_like(squares) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes - bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes - bbox_weights[pos_inds, :] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_squares.size(0) - bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) - bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - - return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds) - - def ga_shape_targets(self, - approx_list, - inside_flag_list, - square_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - unmap_outputs=True): - """Compute guided anchoring targets. - - Args: - approx_list (list[list]): Multi level approxs of each image. - inside_flag_list (list[list]): Multi level inside flags of each - image. - square_list (list[list]): Multi level squares of each image. - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. - unmap_outputs (bool): unmap outputs or not. - - Returns: - tuple - """ - num_imgs = len(img_metas) - assert len(approx_list) == len(inside_flag_list) == len( - square_list) == num_imgs - # anchor number of multi levels - num_level_squares = [squares.size(0) for squares in square_list[0]] - # concat all level anchors and flags to a single tensor - inside_flag_flat_list = [] - approx_flat_list = [] - square_flat_list = [] - for i in range(num_imgs): - assert len(square_list[i]) == len(inside_flag_list[i]) - inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) - approx_flat_list.append(torch.cat(approx_list[i])) - square_flat_list.append(torch.cat(square_list[i])) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, - neg_inds_list) = multi_apply( - self._ga_shape_target_single, - approx_flat_list, - inside_flag_flat_list, - square_flat_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - img_metas, - unmap_outputs=unmap_outputs) - # no valid anchors - if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - bbox_anchors_list = images_to_levels(all_bbox_anchors, - num_level_squares) - bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_squares) - return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, - num_total_pos, num_total_neg) - - def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts, - anchor_weights, anchor_total_num): - shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) - bbox_anchors = bbox_anchors.contiguous().view(-1, 4) - bbox_gts = bbox_gts.contiguous().view(-1, 4) - anchor_weights = anchor_weights.contiguous().view(-1, 4) - bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) - bbox_deltas[:, 2:] += shape_pred - # filter out negative samples to speed-up weighted_bounded_iou_loss - inds = torch.nonzero( - anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1) - bbox_deltas_ = bbox_deltas[inds] - bbox_anchors_ = bbox_anchors[inds] - bbox_gts_ = bbox_gts[inds] - anchor_weights_ = anchor_weights[inds] - pred_anchors_ = self.anchor_coder.decode( - bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6) - loss_shape = self.loss_shape( - pred_anchors_, - bbox_gts_, - anchor_weights_, - avg_factor=anchor_total_num) - return loss_shape - - def loss_loc_single(self, loc_pred, loc_target, loc_weight, - loc_avg_factor): - loss_loc = self.loss_loc( - loc_pred.reshape(-1, 1), - loc_target.reshape(-1).long(), - loc_weight.reshape(-1), - avg_factor=loc_avg_factor) - return loss_loc - - @force_fp32( - apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) - def loss(self, - cls_scores, - bbox_preds, - shape_preds, - loc_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.approx_anchor_generator.num_levels - - device = cls_scores[0].device - - # get loc targets - loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets( - gt_bboxes, featmap_sizes) - - # get sampled approxes - approxs_list, inside_flag_list = self.get_sampled_approxs( - featmap_sizes, img_metas, device=device) - # get squares and guided anchors - squares_list, guided_anchors_list, _ = self.get_anchors( - featmap_sizes, shape_preds, loc_preds, img_metas, device=device) - - # get shape targets - shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list, - squares_list, gt_bboxes, - img_metas) - if shape_targets is None: - return None - (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num, - anchor_bg_num) = shape_targets - anchor_total_num = ( - anchor_fg_num if not self.ga_sampling else anchor_fg_num + - anchor_bg_num) - - # get anchor targets - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - guided_anchors_list, - inside_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [ - anchors.size(0) for anchors in guided_anchors_list[0] - ] - # concat all level anchors to a single tensor - concat_anchor_list = [] - for i in range(len(guided_anchors_list)): - concat_anchor_list.append(torch.cat(guided_anchors_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - - # get classification and bbox regression losses - losses_cls, losses_bbox = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - all_anchor_list, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - num_total_samples=num_total_samples) - - # get anchor location loss - losses_loc = [] - for i in range(len(loc_preds)): - loss_loc = self.loss_loc_single( - loc_preds[i], - loc_targets[i], - loc_weights[i], - loc_avg_factor=loc_avg_factor) - losses_loc.append(loss_loc) - - # get anchor shape loss - losses_shape = [] - for i in range(len(shape_preds)): - loss_shape = self.loss_shape_single( - shape_preds[i], - bbox_anchors_list[i], - bbox_gts_list[i], - anchor_weights_list[i], - anchor_total_num=anchor_total_num) - losses_shape.append(loss_shape) - - return dict( - loss_cls=losses_cls, - loss_bbox=losses_bbox, - loss_shape=losses_shape, - loss_loc=losses_loc) - - @force_fp32( - apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - shape_preds, - loc_preds, - img_metas, - cfg=None, - rescale=False): - assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( - loc_preds) - num_levels = len(cls_scores) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - device = cls_scores[0].device - # get guided anchors - _, guided_anchors, loc_masks = self.get_anchors( - featmap_sizes, - shape_preds, - loc_preds, - img_metas, - use_loc_filter=not self.training, - device=device) - result_list = [] - for img_id in range(len(img_metas)): - cls_score_list = [ - cls_scores[i][img_id].detach() for i in range(num_levels) - ] - bbox_pred_list = [ - bbox_preds[i][img_id].detach() for i in range(num_levels) - ] - guided_anchor_list = [ - guided_anchors[img_id][i].detach() for i in range(num_levels) - ] - loc_mask_list = [ - loc_masks[img_id][i].detach() for i in range(num_levels) - ] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, - guided_anchor_list, - loc_mask_list, img_shape, - scale_factor, cfg, rescale) - result_list.append(proposals) - return result_list - - def _get_bboxes_single(self, - cls_scores, - bbox_preds, - mlvl_anchors, - mlvl_masks, - img_shape, - scale_factor, - cfg, - rescale=False): - cfg = self.test_cfg if cfg is None else cfg - assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) - mlvl_bboxes = [] - mlvl_scores = [] - for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, - mlvl_anchors, - mlvl_masks): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - # if no location is kept, end. - if mask.sum() == 0: - continue - # reshape scores and bbox_pred - cls_score = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - scores = cls_score.softmax(-1) - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - # filter scores, bbox_pred w.r.t. mask. - # anchors are filtered in get_anchors() beforehand. - scores = scores[mask, :] - bbox_pred = bbox_pred[mask, :] - if scores.dim() == 0: - anchors = anchors.unsqueeze(0) - scores = scores.unsqueeze(0) - bbox_pred = bbox_pred.unsqueeze(0) - # filter anchors, bbox_pred, scores w.r.t. scores - nms_pre = cfg.get('nms_pre', -1) - if nms_pre > 0 and scores.shape[0] > nms_pre: - if self.use_sigmoid_cls: - max_scores, _ = scores.max(dim=1) - else: - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - max_scores, _ = scores[:, :-1].max(dim=1) - _, topk_inds = max_scores.topk(nms_pre) - anchors = anchors[topk_inds, :] - bbox_pred = bbox_pred[topk_inds, :] - scores = scores[topk_inds, :] - bboxes = self.bbox_coder.decode( - anchors, bbox_pred, max_shape=img_shape) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_bboxes = torch.cat(mlvl_bboxes) - if rescale: - mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) - mlvl_scores = torch.cat(mlvl_scores) - if self.use_sigmoid_cls: - # Add a dummy background class to the backend when using sigmoid - # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 - # BG cat_id: num_class - padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) - mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) - # multi class NMS - det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, - cfg.score_thr, cfg.nms, - cfg.max_per_img) - return det_bboxes, det_labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/lad_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/lad_head.py deleted file mode 100644 index 85273bcb24308dd6f47c8d47362164a6f1393e1e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/lad_head.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import bbox_overlaps, multi_apply -from ..builder import HEADS -from .paa_head import PAAHead, levels_to_images - - -@HEADS.register_module() -class LADHead(PAAHead): - """Label Assignment Head from the paper: `Improving Object Detection by - Label Assignment Distillation `_""" - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) - def get_label_assignment(self, - cls_scores, - bbox_preds, - iou_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Get label assignment (from teacher). - - Args: - cls_scores (list[Tensor]): Box scores for each scale level. - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - iou_preds (list[Tensor]): iou_preds for each scale - level with shape (N, num_anchors * 1, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): Specify which bounding - boxes can be ignored when are computing the loss. - - Returns: - tuple: Returns a tuple containing label assignment variables. - - - labels (Tensor): Labels of all anchors, each with - shape (num_anchors,). - - labels_weight (Tensor): Label weights of all anchor. - each with shape (num_anchors,). - - bboxes_target (Tensor): BBox targets of all anchors. - each with shape (num_anchors, 4). - - bboxes_weight (Tensor): BBox weights of all anchors. - each with shape (num_anchors, 4). - - pos_inds_flatten (Tensor): Contains all index of positive - sample in all anchor. - - pos_anchors (Tensor): Positive anchors. - - num_pos (int): Number of positive anchors. - """ - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - ) - (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, - pos_gt_index) = cls_reg_targets - cls_scores = levels_to_images(cls_scores) - cls_scores = [ - item.reshape(-1, self.cls_out_channels) for item in cls_scores - ] - bbox_preds = levels_to_images(bbox_preds) - bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] - pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, - cls_scores, bbox_preds, labels, - labels_weight, bboxes_target, - bboxes_weight, pos_inds) - - with torch.no_grad(): - reassign_labels, reassign_label_weight, \ - reassign_bbox_weights, num_pos = multi_apply( - self.paa_reassign, - pos_losses_list, - labels, - labels_weight, - bboxes_weight, - pos_inds, - pos_gt_index, - anchor_list) - num_pos = sum(num_pos) - # convert all tensor list to a flatten tensor - labels = torch.cat(reassign_labels, 0).view(-1) - flatten_anchors = torch.cat( - [torch.cat(item, 0) for item in anchor_list]) - labels_weight = torch.cat(reassign_label_weight, 0).view(-1) - bboxes_target = torch.cat(bboxes_target, - 0).view(-1, bboxes_target[0].size(-1)) - - pos_inds_flatten = ((labels >= 0) - & - (labels < self.num_classes)).nonzero().reshape(-1) - - if num_pos: - pos_anchors = flatten_anchors[pos_inds_flatten] - else: - pos_anchors = None - - label_assignment_results = (labels, labels_weight, bboxes_target, - bboxes_weight, pos_inds_flatten, - pos_anchors, num_pos) - return label_assignment_results - - def forward_train(self, - x, - label_assignment_results, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - **kwargs): - """Forward train with the available label assignment (student receives - from teacher). - - Args: - x (list[Tensor]): Features from FPN. - label_assignment_results (tuple): As the outputs defined in the - function `self.get_label_assignment`. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - - Returns: - losses: (dict[str, Tensor]): A dictionary of loss components. - """ - outs = self(x) - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, img_metas) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) - losses = self.loss( - *loss_inputs, - gt_bboxes_ignore=gt_bboxes_ignore, - label_assignment_results=label_assignment_results) - return losses - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) - def loss(self, - cls_scores, - bbox_preds, - iou_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None, - label_assignment_results=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - iou_preds (list[Tensor]): iou_preds for each scale - level with shape (N, num_anchors * 1, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): Specify which bounding - boxes can be ignored when are computing the loss. - label_assignment_results (tuple): As the outputs defined in the - function `self.get_label_assignment`. - - Returns: - dict[str, Tensor]: A dictionary of loss gmm_assignment. - """ - - (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten, - pos_anchors, num_pos) = label_assignment_results - - cls_scores = levels_to_images(cls_scores) - cls_scores = [ - item.reshape(-1, self.cls_out_channels) for item in cls_scores - ] - bbox_preds = levels_to_images(bbox_preds) - bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] - iou_preds = levels_to_images(iou_preds) - iou_preds = [item.reshape(-1, 1) for item in iou_preds] - - # convert all tensor list to a flatten tensor - cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) - bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) - iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) - - losses_cls = self.loss_cls( - cls_scores, - labels, - labels_weight, - avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 - if num_pos: - pos_bbox_pred = self.bbox_coder.decode( - pos_anchors, bbox_preds[pos_inds_flatten]) - pos_bbox_target = bboxes_target[pos_inds_flatten] - iou_target = bbox_overlaps( - pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) - losses_iou = self.loss_centerness( - iou_preds[pos_inds_flatten], - iou_target.unsqueeze(-1), - avg_factor=num_pos) - losses_bbox = self.loss_bbox( - pos_bbox_pred, pos_bbox_target, avg_factor=num_pos) - - else: - losses_iou = iou_preds.sum() * 0 - losses_bbox = bbox_preds.sum() * 0 - - return dict( - loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ld_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ld_head.py deleted file mode 100644 index c5a945fe2cc9c6f42f9fdc64e278ccdc27bd9e55..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ld_head.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import bbox_overlaps, multi_apply, reduce_mean -from ..builder import HEADS, build_loss -from .gfl_head import GFLHead - - -@HEADS.register_module() -class LDHead(GFLHead): - """Localization distillation Head. (Short description) - - It utilizes the learned bbox distributions to transfer the localization - dark knowledge from teacher to student. Original paper: `Localization - Distillation for Object Detection. `_ - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - loss_ld (dict): Config of Localization Distillation Loss (LD), - T is the temperature for distillation. - """ - - def __init__(self, - num_classes, - in_channels, - loss_ld=dict( - type='LocalizationDistillationLoss', - loss_weight=0.25, - T=10), - **kwargs): - - super(LDHead, self).__init__(num_classes, in_channels, **kwargs) - self.loss_ld = build_loss(loss_ld) - - def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, - bbox_targets, stride, soft_targets, num_total_samples): - """Compute loss of a single scale level. - - Args: - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - cls_score (Tensor): Cls and quality joint scores for each scale - level has shape (N, num_classes, H, W). - bbox_pred (Tensor): Box distribution logits for each scale - level with shape (N, 4*(n+1), H, W), n is max value of integral - set. - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - bbox_targets (Tensor): BBox regression targets of each anchor - weight shape (N, num_total_anchors, 4). - stride (tuple): Stride in this scale level. - num_total_samples (int): Number of positive samples that is - reduced over all GPUs. - - Returns: - dict[tuple, Tensor]: Loss components and weight targets. - """ - assert stride[0] == stride[1], 'h stride is not equal to w stride!' - anchors = anchors.reshape(-1, 4) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - bbox_pred = bbox_pred.permute(0, 2, 3, - 1).reshape(-1, 4 * (self.reg_max + 1)) - soft_targets = soft_targets.permute(0, 2, 3, - 1).reshape(-1, - 4 * (self.reg_max + 1)) - - bbox_targets = bbox_targets.reshape(-1, 4) - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - score = label_weights.new_zeros(labels.shape) - - if len(pos_inds) > 0: - pos_bbox_targets = bbox_targets[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_anchors = anchors[pos_inds] - pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] - - weight_targets = cls_score.detach().sigmoid() - weight_targets = weight_targets.max(dim=1)[0][pos_inds] - pos_bbox_pred_corners = self.integral(pos_bbox_pred) - pos_decode_bbox_pred = self.bbox_coder.decode( - pos_anchor_centers, pos_bbox_pred_corners) - pos_decode_bbox_targets = pos_bbox_targets / stride[0] - score[pos_inds] = bbox_overlaps( - pos_decode_bbox_pred.detach(), - pos_decode_bbox_targets, - is_aligned=True) - pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) - pos_soft_targets = soft_targets[pos_inds] - soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1) - - target_corners = self.bbox_coder.encode(pos_anchor_centers, - pos_decode_bbox_targets, - self.reg_max).reshape(-1) - - # regression loss - loss_bbox = self.loss_bbox( - pos_decode_bbox_pred, - pos_decode_bbox_targets, - weight=weight_targets, - avg_factor=1.0) - - # dfl loss - loss_dfl = self.loss_dfl( - pred_corners, - target_corners, - weight=weight_targets[:, None].expand(-1, 4).reshape(-1), - avg_factor=4.0) - - # ld loss - loss_ld = self.loss_ld( - pred_corners, - soft_corners, - weight=weight_targets[:, None].expand(-1, 4).reshape(-1), - avg_factor=4.0) - - else: - loss_ld = bbox_pred.sum() * 0 - loss_bbox = bbox_pred.sum() * 0 - loss_dfl = bbox_pred.sum() * 0 - weight_targets = bbox_pred.new_tensor(0) - - # cls (qfl) loss - loss_cls = self.loss_cls( - cls_score, (labels, score), - weight=label_weights, - avg_factor=num_total_samples) - - return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum() - - def forward_train(self, - x, - out_teacher, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None, - **kwargs): - """ - Args: - x (list[Tensor]): Features from FPN. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - proposal_cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used - - Returns: - tuple[dict, list]: The loss components and proposals of each image. - - - losses (dict[str, Tensor]): A dictionary of loss components. - - proposal_list (list[Tensor]): Proposals of each image. - """ - outs = self(x) - soft_target = out_teacher[1] - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, soft_target, img_metas) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, soft_target, img_metas) - losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - if proposal_cfg is None: - return losses - else: - proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg) - return losses, proposal_list - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - soft_target, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Cls and quality scores for each scale - level has shape (N, num_classes, H, W). - bbox_preds (list[Tensor]): Box distribution logits for each scale - level with shape (N, 4*(n+1), H, W), n is max value of integral - set. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets - - num_total_samples = reduce_mean( - torch.tensor(num_total_pos, dtype=torch.float, - device=device)).item() - num_total_samples = max(num_total_samples, 1.0) - - losses_cls, losses_bbox, losses_dfl, losses_ld, \ - avg_factor = multi_apply( - self.loss_single, - anchor_list, - cls_scores, - bbox_preds, - labels_list, - label_weights_list, - bbox_targets_list, - self.prior_generator.strides, - soft_target, - num_total_samples=num_total_samples) - - avg_factor = sum(avg_factor) + 1e-6 - avg_factor = reduce_mean(avg_factor).item() - losses_bbox = [x / avg_factor for x in losses_bbox] - losses_dfl = [x / avg_factor for x in losses_dfl] - return dict( - loss_cls=losses_cls, - loss_bbox=losses_bbox, - loss_dfl=losses_dfl, - loss_ld=losses_ld) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/mask2former_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/mask2former_head.py deleted file mode 100644 index 59047bdbb7939ba4fe7bcbdb0d0b165e408ed7be..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/mask2former_head.py +++ /dev/null @@ -1,430 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init -from mmcv.cnn.bricks.transformer import (build_positional_encoding, - build_transformer_layer_sequence) -from mmcv.ops import point_sample -from mmcv.runner import ModuleList - -from mmdet.core import build_assigner, build_sampler, reduce_mean -from mmdet.models.utils import get_uncertain_point_coords_with_randomness -from ..builder import HEADS, build_loss -from .anchor_free_head import AnchorFreeHead -from .maskformer_head import MaskFormerHead - - -@HEADS.register_module() -class Mask2FormerHead(MaskFormerHead): - """Implements the Mask2Former head. - - See `Masked-attention Mask Transformer for Universal Image - Segmentation `_ for details. - - Args: - in_channels (list[int]): Number of channels in the input feature map. - feat_channels (int): Number of channels for features. - out_channels (int): Number of channels for output. - num_things_classes (int): Number of things. - num_stuff_classes (int): Number of stuff. - num_queries (int): Number of query in Transformer decoder. - pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel - decoder. Defaults to None. - enforce_decoder_input_project (bool, optional): Whether to add - a layer to change the embed_dim of tranformer encoder in - pixel decoder to the embed_dim of transformer decoder. - Defaults to False. - transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for - transformer decoder. Defaults to None. - positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for - transformer decoder position encoding. Defaults to None. - loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification - loss. Defaults to None. - loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss. - Defaults to None. - loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss. - Defaults to None. - train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of - Mask2Former head. - test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of - Mask2Former head. - init_cfg (dict or list[dict], optional): Initialization config dict. - Defaults to None. - """ - - def __init__(self, - in_channels, - feat_channels, - out_channels, - num_things_classes=80, - num_stuff_classes=53, - num_queries=100, - num_transformer_feat_level=3, - pixel_decoder=None, - enforce_decoder_input_project=False, - transformer_decoder=None, - positional_encoding=None, - loss_cls=None, - loss_mask=None, - loss_dice=None, - train_cfg=None, - test_cfg=None, - init_cfg=None, - **kwargs): - super(AnchorFreeHead, self).__init__(init_cfg) - self.num_things_classes = num_things_classes - self.num_stuff_classes = num_stuff_classes - self.num_classes = self.num_things_classes + self.num_stuff_classes - self.num_queries = num_queries - self.num_transformer_feat_level = num_transformer_feat_level - self.num_heads = transformer_decoder.transformerlayers.\ - attn_cfgs.num_heads - self.num_transformer_decoder_layers = transformer_decoder.num_layers - assert pixel_decoder.encoder.transformerlayers.\ - attn_cfgs.num_levels == num_transformer_feat_level - pixel_decoder_ = copy.deepcopy(pixel_decoder) - pixel_decoder_.update( - in_channels=in_channels, - feat_channels=feat_channels, - out_channels=out_channels) - self.pixel_decoder = build_plugin_layer(pixel_decoder_)[1] - self.transformer_decoder = build_transformer_layer_sequence( - transformer_decoder) - self.decoder_embed_dims = self.transformer_decoder.embed_dims - - self.decoder_input_projs = ModuleList() - # from low resolution to high resolution - for _ in range(num_transformer_feat_level): - if (self.decoder_embed_dims != feat_channels - or enforce_decoder_input_project): - self.decoder_input_projs.append( - Conv2d( - feat_channels, self.decoder_embed_dims, kernel_size=1)) - else: - self.decoder_input_projs.append(nn.Identity()) - self.decoder_positional_encoding = build_positional_encoding( - positional_encoding) - self.query_embed = nn.Embedding(self.num_queries, feat_channels) - self.query_feat = nn.Embedding(self.num_queries, feat_channels) - # from low resolution to high resolution - self.level_embed = nn.Embedding(self.num_transformer_feat_level, - feat_channels) - - self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) - self.mask_embed = nn.Sequential( - nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), - nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), - nn.Linear(feat_channels, out_channels)) - - self.test_cfg = test_cfg - self.train_cfg = train_cfg - if train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - self.sampler = build_sampler(self.train_cfg.sampler, context=self) - self.num_points = self.train_cfg.get('num_points', 12544) - self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0) - self.importance_sample_ratio = self.train_cfg.get( - 'importance_sample_ratio', 0.75) - - self.class_weight = loss_cls.class_weight - self.loss_cls = build_loss(loss_cls) - self.loss_mask = build_loss(loss_mask) - self.loss_dice = build_loss(loss_dice) - - def init_weights(self): - for m in self.decoder_input_projs: - if isinstance(m, Conv2d): - caffe2_xavier_init(m, bias=0) - - self.pixel_decoder.init_weights() - - for p in self.transformer_decoder.parameters(): - if p.dim() > 1: - nn.init.xavier_normal_(p) - - def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, - img_metas): - """Compute classification and mask targets for one image. - - Args: - cls_score (Tensor): Mask score logits from a single decoder layer - for one image. Shape (num_queries, cls_out_channels). - mask_pred (Tensor): Mask logits for a single decoder layer for one - image. Shape (num_queries, h, w). - gt_labels (Tensor): Ground truth class indices for one image with - shape (num_gts, ). - gt_masks (Tensor): Ground truth mask for each image, each with - shape (num_gts, h, w). - img_metas (dict): Image informtation. - - Returns: - tuple[Tensor]: A tuple containing the following for one image. - - - labels (Tensor): Labels of each image. \ - shape (num_queries, ). - - label_weights (Tensor): Label weights of each image. \ - shape (num_queries, ). - - mask_targets (Tensor): Mask targets of each image. \ - shape (num_queries, h, w). - - mask_weights (Tensor): Mask weights of each image. \ - shape (num_queries, ). - - pos_inds (Tensor): Sampled positive indices for each \ - image. - - neg_inds (Tensor): Sampled negative indices for each \ - image. - """ - # sample points - num_queries = cls_score.shape[0] - num_gts = gt_labels.shape[0] - - point_coords = torch.rand((1, self.num_points, 2), - device=cls_score.device) - # shape (num_queries, num_points) - mask_points_pred = point_sample( - mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1, - 1)).squeeze(1) - # shape (num_gts, num_points) - gt_points_masks = point_sample( - gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1, - 1)).squeeze(1) - - # assign and sample - assign_result = self.assigner.assign(cls_score, mask_points_pred, - gt_labels, gt_points_masks, - img_metas) - sampling_result = self.sampler.sample(assign_result, mask_pred, - gt_masks) - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - - # label target - labels = gt_labels.new_full((self.num_queries, ), - self.num_classes, - dtype=torch.long) - labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] - label_weights = gt_labels.new_ones((self.num_queries, )) - - # mask target - mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] - mask_weights = mask_pred.new_zeros((self.num_queries, )) - mask_weights[pos_inds] = 1.0 - - return (labels, label_weights, mask_targets, mask_weights, pos_inds, - neg_inds) - - def loss_single(self, cls_scores, mask_preds, gt_labels_list, - gt_masks_list, img_metas): - """Loss function for outputs from a single decoder layer. - - Args: - cls_scores (Tensor): Mask score logits from a single decoder layer - for all images. Shape (batch_size, num_queries, - cls_out_channels). Note `cls_out_channels` should includes - background. - mask_preds (Tensor): Mask logits for a pixel decoder for all - images. Shape (batch_size, num_queries, h, w). - gt_labels_list (list[Tensor]): Ground truth class indices for each - image, each with shape (num_gts, ). - gt_masks_list (list[Tensor]): Ground truth mask for each image, - each with shape (num_gts, h, w). - img_metas (list[dict]): List of image meta information. - - Returns: - tuple[Tensor]: Loss components for outputs from a single \ - decoder layer. - """ - num_imgs = cls_scores.size(0) - cls_scores_list = [cls_scores[i] for i in range(num_imgs)] - mask_preds_list = [mask_preds[i] for i in range(num_imgs)] - (labels_list, label_weights_list, mask_targets_list, mask_weights_list, - num_total_pos, - num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, - gt_labels_list, gt_masks_list, - img_metas) - # shape (batch_size, num_queries) - labels = torch.stack(labels_list, dim=0) - # shape (batch_size, num_queries) - label_weights = torch.stack(label_weights_list, dim=0) - # shape (num_total_gts, h, w) - mask_targets = torch.cat(mask_targets_list, dim=0) - # shape (batch_size, num_queries) - mask_weights = torch.stack(mask_weights_list, dim=0) - - # classfication loss - # shape (batch_size * num_queries, ) - cls_scores = cls_scores.flatten(0, 1) - labels = labels.flatten(0, 1) - label_weights = label_weights.flatten(0, 1) - - class_weight = cls_scores.new_tensor(self.class_weight) - loss_cls = self.loss_cls( - cls_scores, - labels, - label_weights, - avg_factor=class_weight[labels].sum()) - - num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) - num_total_masks = max(num_total_masks, 1) - - # extract positive ones - # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) - mask_preds = mask_preds[mask_weights > 0] - - if mask_targets.shape[0] == 0: - # zero match - loss_dice = mask_preds.sum() - loss_mask = mask_preds.sum() - return loss_cls, loss_mask, loss_dice - - with torch.no_grad(): - points_coords = get_uncertain_point_coords_with_randomness( - mask_preds.unsqueeze(1), None, self.num_points, - self.oversample_ratio, self.importance_sample_ratio) - # shape (num_total_gts, h, w) -> (num_total_gts, num_points) - mask_point_targets = point_sample( - mask_targets.unsqueeze(1).float(), points_coords).squeeze(1) - # shape (num_queries, h, w) -> (num_queries, num_points) - mask_point_preds = point_sample( - mask_preds.unsqueeze(1), points_coords).squeeze(1) - - # dice loss - loss_dice = self.loss_dice( - mask_point_preds, mask_point_targets, avg_factor=num_total_masks) - - # mask loss - # shape (num_queries, num_points) -> (num_queries * num_points, ) - mask_point_preds = mask_point_preds.reshape(-1) - # shape (num_total_gts, num_points) -> (num_total_gts * num_points, ) - mask_point_targets = mask_point_targets.reshape(-1) - loss_mask = self.loss_mask( - mask_point_preds, - mask_point_targets, - avg_factor=num_total_masks * self.num_points) - - return loss_cls, loss_mask, loss_dice - - def forward_head(self, decoder_out, mask_feature, attn_mask_target_size): - """Forward for head part which is called after every decoder layer. - - Args: - decoder_out (Tensor): in shape (num_queries, batch_size, c). - mask_feature (Tensor): in shape (batch_size, c, h, w). - attn_mask_target_size (tuple[int, int]): target attention - mask size. - - Returns: - tuple: A tuple contain three elements. - - - cls_pred (Tensor): Classification scores in shape \ - (batch_size, num_queries, cls_out_channels). \ - Note `cls_out_channels` should includes background. - - mask_pred (Tensor): Mask scores in shape \ - (batch_size, num_queries,h, w). - - attn_mask (Tensor): Attention mask in shape \ - (batch_size * num_heads, num_queries, h, w). - """ - decoder_out = self.transformer_decoder.post_norm(decoder_out) - decoder_out = decoder_out.transpose(0, 1) - # shape (batch_size, num_queries, c) - cls_pred = self.cls_embed(decoder_out) - # shape (batch_size, num_queries, c) - mask_embed = self.mask_embed(decoder_out) - # shape (batch_size, num_queries, h, w) - mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature) - attn_mask = F.interpolate( - mask_pred, - attn_mask_target_size, - mode='bilinear', - align_corners=False) - # shape (batch_size, num_queries, h, w) -> - # (batch_size * num_head, num_queries, h*w) - attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat( - (1, self.num_heads, 1, 1)).flatten(0, 1) - attn_mask = attn_mask.sigmoid() < 0.5 - attn_mask = attn_mask.detach() - - return cls_pred, mask_pred, attn_mask - - def forward(self, feats, img_metas): - """Forward function. - - Args: - feats (list[Tensor]): Multi scale Features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple: A tuple contains two elements. - - - cls_pred_list (list[Tensor)]: Classification logits \ - for each decoder layer. Each is a 3D-tensor with shape \ - (batch_size, num_queries, cls_out_channels). \ - Note `cls_out_channels` should includes background. - - mask_pred_list (list[Tensor]): Mask logits for each \ - decoder layer. Each with shape (batch_size, num_queries, \ - h, w). - """ - batch_size = len(img_metas) - mask_features, multi_scale_memorys = self.pixel_decoder(feats) - # multi_scale_memorys (from low resolution to high resolution) - decoder_inputs = [] - decoder_positional_encodings = [] - for i in range(self.num_transformer_feat_level): - decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) - # shape (batch_size, c, h, w) -> (h*w, batch_size, c) - decoder_input = decoder_input.flatten(2).permute(2, 0, 1) - level_embed = self.level_embed.weight[i].view(1, 1, -1) - decoder_input = decoder_input + level_embed - # shape (batch_size, c, h, w) -> (h*w, batch_size, c) - mask = decoder_input.new_zeros( - (batch_size, ) + multi_scale_memorys[i].shape[-2:], - dtype=torch.bool) - decoder_positional_encoding = self.decoder_positional_encoding( - mask) - decoder_positional_encoding = decoder_positional_encoding.flatten( - 2).permute(2, 0, 1) - decoder_inputs.append(decoder_input) - decoder_positional_encodings.append(decoder_positional_encoding) - # shape (num_queries, c) -> (num_queries, batch_size, c) - query_feat = self.query_feat.weight.unsqueeze(1).repeat( - (1, batch_size, 1)) - query_embed = self.query_embed.weight.unsqueeze(1).repeat( - (1, batch_size, 1)) - - cls_pred_list = [] - mask_pred_list = [] - cls_pred, mask_pred, attn_mask = self.forward_head( - query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) - cls_pred_list.append(cls_pred) - mask_pred_list.append(mask_pred) - - for i in range(self.num_transformer_decoder_layers): - level_idx = i % self.num_transformer_feat_level - # if a mask is all True(all background), then set it all False. - attn_mask[torch.where( - attn_mask.sum(-1) == attn_mask.shape[-1])] = False - - # cross_attn + self_attn - layer = self.transformer_decoder.layers[i] - attn_masks = [attn_mask, None] - query_feat = layer( - query=query_feat, - key=decoder_inputs[level_idx], - value=decoder_inputs[level_idx], - query_pos=query_embed, - key_pos=decoder_positional_encodings[level_idx], - attn_masks=attn_masks, - query_key_padding_mask=None, - # here we do not apply masking on padded region - key_padding_mask=None) - cls_pred, mask_pred, attn_mask = self.forward_head( - query_feat, mask_features, multi_scale_memorys[ - (i + 1) % self.num_transformer_feat_level].shape[-2:]) - - cls_pred_list.append(cls_pred) - mask_pred_list.append(mask_pred) - - return cls_pred_list, mask_pred_list diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/maskformer_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/maskformer_head.py deleted file mode 100644 index 566dc074059ef770892d2916e7c44fa54b0f8758..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/maskformer_head.py +++ /dev/null @@ -1,556 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init -from mmcv.cnn.bricks.transformer import (build_positional_encoding, - build_transformer_layer_sequence) -from mmcv.runner import force_fp32 - -from mmdet.core import build_assigner, build_sampler, multi_apply, reduce_mean -from mmdet.models.utils import preprocess_panoptic_gt -from ..builder import HEADS, build_loss -from .anchor_free_head import AnchorFreeHead - - -@HEADS.register_module() -class MaskFormerHead(AnchorFreeHead): - """Implements the MaskFormer head. - - See `Per-Pixel Classification is Not All You Need for Semantic - Segmentation `_ for details. - - Args: - in_channels (list[int]): Number of channels in the input feature map. - feat_channels (int): Number of channels for feature. - out_channels (int): Number of channels for output. - num_things_classes (int): Number of things. - num_stuff_classes (int): Number of stuff. - num_queries (int): Number of query in Transformer. - pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel - decoder. Defaults to None. - enforce_decoder_input_project (bool, optional): Whether to add a layer - to change the embed_dim of tranformer encoder in pixel decoder to - the embed_dim of transformer decoder. Defaults to False. - transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for - transformer decoder. Defaults to None. - positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for - transformer decoder position encoding. Defaults to None. - loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification - loss. Defaults to `CrossEntropyLoss`. - loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss. - Defaults to `FocalLoss`. - loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss. - Defaults to `DiceLoss`. - train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of - Maskformer head. - test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of Maskformer - head. - init_cfg (dict or list[dict], optional): Initialization config dict. - Defaults to None. - """ - - def __init__(self, - in_channels, - feat_channels, - out_channels, - num_things_classes=80, - num_stuff_classes=53, - num_queries=100, - pixel_decoder=None, - enforce_decoder_input_project=False, - transformer_decoder=None, - positional_encoding=None, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0, - class_weight=[1.0] * 133 + [0.1]), - loss_mask=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=20.0), - loss_dice=dict( - type='DiceLoss', - use_sigmoid=True, - activate=True, - naive_dice=True, - loss_weight=1.0), - train_cfg=None, - test_cfg=None, - init_cfg=None, - **kwargs): - super(AnchorFreeHead, self).__init__(init_cfg) - self.num_things_classes = num_things_classes - self.num_stuff_classes = num_stuff_classes - self.num_classes = self.num_things_classes + self.num_stuff_classes - self.num_queries = num_queries - - pixel_decoder.update( - in_channels=in_channels, - feat_channels=feat_channels, - out_channels=out_channels) - self.pixel_decoder = build_plugin_layer(pixel_decoder)[1] - self.transformer_decoder = build_transformer_layer_sequence( - transformer_decoder) - self.decoder_embed_dims = self.transformer_decoder.embed_dims - pixel_decoder_type = pixel_decoder.get('type') - if pixel_decoder_type == 'PixelDecoder' and ( - self.decoder_embed_dims != in_channels[-1] - or enforce_decoder_input_project): - self.decoder_input_proj = Conv2d( - in_channels[-1], self.decoder_embed_dims, kernel_size=1) - else: - self.decoder_input_proj = nn.Identity() - self.decoder_pe = build_positional_encoding(positional_encoding) - self.query_embed = nn.Embedding(self.num_queries, out_channels) - - self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) - self.mask_embed = nn.Sequential( - nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), - nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), - nn.Linear(feat_channels, out_channels)) - - self.test_cfg = test_cfg - self.train_cfg = train_cfg - if train_cfg: - self.assigner = build_assigner(train_cfg.get('assigner', None)) - self.sampler = build_sampler( - train_cfg.get('sampler', None), context=self) - - self.class_weight = loss_cls.get('class_weight', None) - self.loss_cls = build_loss(loss_cls) - self.loss_mask = build_loss(loss_mask) - self.loss_dice = build_loss(loss_dice) - - def init_weights(self): - if isinstance(self.decoder_input_proj, Conv2d): - caffe2_xavier_init(self.decoder_input_proj, bias=0) - - self.pixel_decoder.init_weights() - - for p in self.transformer_decoder.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs, - img_metas): - """Preprocess the ground truth for all images. - - Args: - gt_labels_list (list[Tensor]): Each is ground truth - labels of each bbox, with shape (num_gts, ). - gt_masks_list (list[BitmapMasks]): Each is ground truth - masks of each instances of a image, shape - (num_gts, h, w). - gt_semantic_seg (Tensor | None): Ground truth of semantic - segmentation with the shape (batch_size, n, h, w). - [0, num_thing_class - 1] means things, - [num_thing_class, num_class-1] means stuff, - 255 means VOID. It's None when training instance segmentation. - img_metas (list[dict]): List of image meta information. - - Returns: - tuple: a tuple containing the following targets. - - labels (list[Tensor]): Ground truth class indices\ - for all images. Each with shape (n, ), n is the sum of\ - number of stuff type and number of instance in a image. - - masks (list[Tensor]): Ground truth mask for each\ - image, each with shape (n, h, w). - """ - num_things_list = [self.num_things_classes] * len(gt_labels_list) - num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list) - if gt_semantic_segs is None: - gt_semantic_segs = [None] * len(gt_labels_list) - - targets = multi_apply(preprocess_panoptic_gt, gt_labels_list, - gt_masks_list, gt_semantic_segs, num_things_list, - num_stuff_list, img_metas) - labels, masks = targets - return labels, masks - - def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list, - gt_masks_list, img_metas): - """Compute classification and mask targets for all images for a decoder - layer. - - Args: - cls_scores_list (list[Tensor]): Mask score logits from a single - decoder layer for all images. Each with shape (num_queries, - cls_out_channels). - mask_preds_list (list[Tensor]): Mask logits from a single decoder - layer for all images. Each with shape (num_queries, h, w). - gt_labels_list (list[Tensor]): Ground truth class indices for all - images. Each with shape (n, ), n is the sum of number of stuff - type and number of instance in a image. - gt_masks_list (list[Tensor]): Ground truth mask for each image, - each with shape (n, h, w). - img_metas (list[dict]): List of image meta information. - - Returns: - tuple[list[Tensor]]: a tuple containing the following targets. - - labels_list (list[Tensor]): Labels of all images.\ - Each with shape (num_queries, ). - - label_weights_list (list[Tensor]): Label weights\ - of all images. Each with shape (num_queries, ). - - mask_targets_list (list[Tensor]): Mask targets of\ - all images. Each with shape (num_queries, h, w). - - mask_weights_list (list[Tensor]): Mask weights of\ - all images. Each with shape (num_queries, ). - - num_total_pos (int): Number of positive samples in\ - all images. - - num_total_neg (int): Number of negative samples in\ - all images. - """ - (labels_list, label_weights_list, mask_targets_list, mask_weights_list, - pos_inds_list, - neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list, - mask_preds_list, gt_labels_list, - gt_masks_list, img_metas) - - num_total_pos = sum((inds.numel() for inds in pos_inds_list)) - num_total_neg = sum((inds.numel() for inds in neg_inds_list)) - return (labels_list, label_weights_list, mask_targets_list, - mask_weights_list, num_total_pos, num_total_neg) - - def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, - img_metas): - """Compute classification and mask targets for one image. - - Args: - cls_score (Tensor): Mask score logits from a single decoder layer - for one image. Shape (num_queries, cls_out_channels). - mask_pred (Tensor): Mask logits for a single decoder layer for one - image. Shape (num_queries, h, w). - gt_labels (Tensor): Ground truth class indices for one image with - shape (n, ). n is the sum of number of stuff type and number - of instance in a image. - gt_masks (Tensor): Ground truth mask for each image, each with - shape (n, h, w). - img_metas (dict): Image informtation. - - Returns: - tuple[Tensor]: a tuple containing the following for one image. - - labels (Tensor): Labels of each image. - shape (num_queries, ). - - label_weights (Tensor): Label weights of each image. - shape (num_queries, ). - - mask_targets (Tensor): Mask targets of each image. - shape (num_queries, h, w). - - mask_weights (Tensor): Mask weights of each image. - shape (num_queries, ). - - pos_inds (Tensor): Sampled positive indices for each image. - - neg_inds (Tensor): Sampled negative indices for each image. - """ - target_shape = mask_pred.shape[-2:] - if gt_masks.shape[0] > 0: - gt_masks_downsampled = F.interpolate( - gt_masks.unsqueeze(1).float(), target_shape, - mode='nearest').squeeze(1).long() - else: - gt_masks_downsampled = gt_masks - - # assign and sample - assign_result = self.assigner.assign(cls_score, mask_pred, gt_labels, - gt_masks_downsampled, img_metas) - sampling_result = self.sampler.sample(assign_result, mask_pred, - gt_masks) - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - - # label target - labels = gt_labels.new_full((self.num_queries, ), - self.num_classes, - dtype=torch.long) - labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] - label_weights = gt_labels.new_ones(self.num_queries) - - # mask target - mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] - mask_weights = mask_pred.new_zeros((self.num_queries, )) - mask_weights[pos_inds] = 1.0 - - return (labels, label_weights, mask_targets, mask_weights, pos_inds, - neg_inds) - - @force_fp32(apply_to=('all_cls_scores', 'all_mask_preds')) - def loss(self, all_cls_scores, all_mask_preds, gt_labels_list, - gt_masks_list, img_metas): - """Loss function. - - Args: - all_cls_scores (Tensor): Classification scores for all decoder - layers with shape (num_decoder, batch_size, num_queries, - cls_out_channels). Note `cls_out_channels` should includes - background. - all_mask_preds (Tensor): Mask scores for all decoder layers with - shape (num_decoder, batch_size, num_queries, h, w). - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (n, ). n is the sum of number of stuff type - and number of instance in a image. - gt_masks_list (list[Tensor]): Ground truth mask for each image with - shape (n, h, w). - img_metas (list[dict]): List of image meta information. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - num_dec_layers = len(all_cls_scores) - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - losses_cls, losses_mask, losses_dice = multi_apply( - self.loss_single, all_cls_scores, all_mask_preds, - all_gt_labels_list, all_gt_masks_list, img_metas_list) - - loss_dict = dict() - # loss from the last decoder layer - loss_dict['loss_cls'] = losses_cls[-1] - loss_dict['loss_mask'] = losses_mask[-1] - loss_dict['loss_dice'] = losses_dice[-1] - # loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_mask_i, loss_dice_i in zip( - losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i - loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i - num_dec_layer += 1 - return loss_dict - - def loss_single(self, cls_scores, mask_preds, gt_labels_list, - gt_masks_list, img_metas): - """Loss function for outputs from a single decoder layer. - - Args: - cls_scores (Tensor): Mask score logits from a single decoder layer - for all images. Shape (batch_size, num_queries, - cls_out_channels). Note `cls_out_channels` should includes - background. - mask_preds (Tensor): Mask logits for a pixel decoder for all - images. Shape (batch_size, num_queries, h, w). - gt_labels_list (list[Tensor]): Ground truth class indices for each - image, each with shape (n, ). n is the sum of number of stuff - types and number of instances in a image. - gt_masks_list (list[Tensor]): Ground truth mask for each image, - each with shape (n, h, w). - img_metas (list[dict]): List of image meta information. - - Returns: - tuple[Tensor]: Loss components for outputs from a single decoder\ - layer. - """ - num_imgs = cls_scores.size(0) - cls_scores_list = [cls_scores[i] for i in range(num_imgs)] - mask_preds_list = [mask_preds[i] for i in range(num_imgs)] - - (labels_list, label_weights_list, mask_targets_list, mask_weights_list, - num_total_pos, - num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, - gt_labels_list, gt_masks_list, - img_metas) - # shape (batch_size, num_queries) - labels = torch.stack(labels_list, dim=0) - # shape (batch_size, num_queries) - label_weights = torch.stack(label_weights_list, dim=0) - # shape (num_total_gts, h, w) - mask_targets = torch.cat(mask_targets_list, dim=0) - # shape (batch_size, num_queries) - mask_weights = torch.stack(mask_weights_list, dim=0) - - # classfication loss - # shape (batch_size * num_queries, ) - cls_scores = cls_scores.flatten(0, 1) - labels = labels.flatten(0, 1) - label_weights = label_weights.flatten(0, 1) - - class_weight = cls_scores.new_tensor(self.class_weight) - loss_cls = self.loss_cls( - cls_scores, - labels, - label_weights, - avg_factor=class_weight[labels].sum()) - - num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) - num_total_masks = max(num_total_masks, 1) - - # extract positive ones - # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) - mask_preds = mask_preds[mask_weights > 0] - target_shape = mask_targets.shape[-2:] - - if mask_targets.shape[0] == 0: - # zero match - loss_dice = mask_preds.sum() - loss_mask = mask_preds.sum() - return loss_cls, loss_mask, loss_dice - - # upsample to shape of target - # shape (num_total_gts, h, w) - mask_preds = F.interpolate( - mask_preds.unsqueeze(1), - target_shape, - mode='bilinear', - align_corners=False).squeeze(1) - - # dice loss - loss_dice = self.loss_dice( - mask_preds, mask_targets, avg_factor=num_total_masks) - - # mask loss - # FocalLoss support input of shape (n, num_class) - h, w = mask_preds.shape[-2:] - # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1) - mask_preds = mask_preds.reshape(-1, 1) - # shape (num_total_gts, h, w) -> (num_total_gts * h * w) - mask_targets = mask_targets.reshape(-1) - # target is (1 - mask_targets) !!! - loss_mask = self.loss_mask( - mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w) - - return loss_cls, loss_mask, loss_dice - - def forward(self, feats, img_metas): - """Forward function. - - Args: - feats (list[Tensor]): Features from the upstream network, each - is a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple: a tuple contains two elements. - - all_cls_scores (Tensor): Classification scores for each\ - scale level. Each is a 4D-tensor with shape\ - (num_decoder, batch_size, num_queries, cls_out_channels).\ - Note `cls_out_channels` should includes background. - - all_mask_preds (Tensor): Mask scores for each decoder\ - layer. Each with shape (num_decoder, batch_size,\ - num_queries, h, w). - """ - batch_size = len(img_metas) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - padding_mask = feats[-1].new_ones( - (batch_size, input_img_h, input_img_w), dtype=torch.float32) - for i in range(batch_size): - img_h, img_w, _ = img_metas[i]['img_shape'] - padding_mask[i, :img_h, :img_w] = 0 - padding_mask = F.interpolate( - padding_mask.unsqueeze(1), - size=feats[-1].shape[-2:], - mode='nearest').to(torch.bool).squeeze(1) - # when backbone is swin, memory is output of last stage of swin. - # when backbone is r50, memory is output of tranformer encoder. - mask_features, memory = self.pixel_decoder(feats, img_metas) - pos_embed = self.decoder_pe(padding_mask) - memory = self.decoder_input_proj(memory) - # shape (batch_size, c, h, w) -> (h*w, batch_size, c) - memory = memory.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1) - # shape (batch_size, h * w) - padding_mask = padding_mask.flatten(1) - # shape = (num_queries, embed_dims) - query_embed = self.query_embed.weight - # shape = (num_queries, batch_size, embed_dims) - query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) - target = torch.zeros_like(query_embed) - # shape (num_decoder, num_queries, batch_size, embed_dims) - out_dec = self.transformer_decoder( - query=target, - key=memory, - value=memory, - key_pos=pos_embed, - query_pos=query_embed, - key_padding_mask=padding_mask) - # shape (num_decoder, batch_size, num_queries, embed_dims) - out_dec = out_dec.transpose(1, 2) - - # cls_scores - all_cls_scores = self.cls_embed(out_dec) - - # mask_preds - mask_embed = self.mask_embed(out_dec) - all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed, - mask_features) - - return all_cls_scores, all_mask_preds - - def forward_train(self, - feats, - img_metas, - gt_bboxes, - gt_labels, - gt_masks, - gt_semantic_seg, - gt_bboxes_ignore=None): - """Forward function for training mode. - - Args: - feats (list[Tensor]): Multi-level features from the upstream - network, each is a 4D-tensor. - img_metas (list[Dict]): List of image information. - gt_bboxes (list[Tensor]): Each element is ground truth bboxes of - the image, shape (num_gts, 4). Not used here. - gt_labels (list[Tensor]): Each element is ground truth labels of - each box, shape (num_gts,). - gt_masks (list[BitmapMasks]): Each element is masks of instances - of a image, shape (num_gts, h, w). - gt_semantic_seg (list[tensor] | None): Each element is the ground - truth of semantic segmentation with the shape (N, H, W). - [0, num_thing_class - 1] means things, - [num_thing_class, num_class-1] means stuff, - 255 means VOID. It's None when training instance segmentation. - gt_bboxes_ignore (list[Tensor]): Ground truth bboxes to be - ignored. Defaults to None. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # not consider ignoring bboxes - assert gt_bboxes_ignore is None - - # forward - all_cls_scores, all_mask_preds = self(feats, img_metas) - - # preprocess ground truth - gt_labels, gt_masks = self.preprocess_gt(gt_labels, gt_masks, - gt_semantic_seg, img_metas) - - # loss - losses = self.loss(all_cls_scores, all_mask_preds, gt_labels, gt_masks, - img_metas) - - return losses - - def simple_test(self, feats, img_metas, **kwargs): - """Test without augmentaton. - - Args: - feats (list[Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - - Returns: - tuple: A tuple contains two tensors. - - - mask_cls_results (Tensor): Mask classification logits,\ - shape (batch_size, num_queries, cls_out_channels). - Note `cls_out_channels` should includes background. - - mask_pred_results (Tensor): Mask logits, shape \ - (batch_size, num_queries, h, w). - """ - all_cls_scores, all_mask_preds = self(feats, img_metas) - mask_cls_results = all_cls_scores[-1] - mask_pred_results = all_mask_preds[-1] - - # upsample masks - img_shape = img_metas[0]['batch_input_shape'] - mask_pred_results = F.interpolate( - mask_pred_results, - size=(img_shape[0], img_shape[1]), - mode='bilinear', - align_corners=False) - - return mask_cls_results, mask_pred_results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/nasfcos_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/nasfcos_head.py deleted file mode 100644 index 380c912c763445a32acad3be6da965966cd9ae53..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/nasfcos_head.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch.nn as nn -from mmcv.cnn import ConvModule, Scale - -from mmdet.models.dense_heads.fcos_head import FCOSHead -from ..builder import HEADS - - -@HEADS.register_module() -class NASFCOSHead(FCOSHead): - """Anchor-free head used in `NASFCOS `_. - - It is quite similar with FCOS head, except for the searched structure of - classification branch and bbox regression branch, where a structure of - "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead. - """ - - def __init__(self, *args, init_cfg=None, **kwargs): - if init_cfg is None: - init_cfg = [ - dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']), - dict( - type='Normal', - std=0.01, - override=[ - dict(name='conv_reg'), - dict(name='conv_centerness'), - dict( - name='conv_cls', - type='Normal', - std=0.01, - bias_prob=0.01) - ]), - ] - super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - dconv3x3_config = dict( - type='DCNv2', - kernel_size=3, - use_bias=True, - deform_groups=2, - padding=1) - conv3x3_config = dict(type='Conv', kernel_size=3, padding=1) - conv1x1_config = dict(type='Conv', kernel_size=1) - - self.arch_config = [ - dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config - ] - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i, op_ in enumerate(self.arch_config): - op = copy.deepcopy(op_) - chn = self.in_channels if i == 0 else self.feat_channels - assert isinstance(op, dict) - use_bias = op.pop('use_bias', False) - padding = op.pop('padding', 0) - kernel_size = op.pop('kernel_size') - module = ConvModule( - chn, - self.feat_channels, - kernel_size, - stride=1, - padding=padding, - norm_cfg=self.norm_cfg, - bias=use_bias, - conv_cfg=op) - - self.cls_convs.append(copy.deepcopy(module)) - self.reg_convs.append(copy.deepcopy(module)) - - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) - - self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/paa_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/paa_head.py deleted file mode 100644 index d79b5b9f40778fb775b76919cadc80579fa00ba0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/paa_head.py +++ /dev/null @@ -1,756 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply, multiclass_nms -from mmdet.core.bbox.iou_calculators import bbox_overlaps -from mmdet.models import HEADS -from mmdet.models.dense_heads import ATSSHead - -EPS = 1e-12 -try: - import sklearn.mixture as skm -except ImportError: - skm = None - - -def levels_to_images(mlvl_tensor): - """Concat multi-level feature maps by image. - - [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] - Convert the shape of each element in mlvl_tensor from (N, C, H, W) to - (N, H*W , C), then split the element to N elements with shape (H*W, C), and - concat elements in same image of all level along first dimension. - - Args: - mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from - corresponding level. Each element is of shape (N, C, H, W) - - Returns: - list[torch.Tensor]: A list that contains N tensors and each tensor is - of shape (num_elements, C) - """ - batch_size = mlvl_tensor[0].size(0) - batch_list = [[] for _ in range(batch_size)] - channels = mlvl_tensor[0].size(1) - for t in mlvl_tensor: - t = t.permute(0, 2, 3, 1) - t = t.view(batch_size, -1, channels).contiguous() - for img in range(batch_size): - batch_list[img].append(t[img]) - return [torch.cat(item, 0) for item in batch_list] - - -@HEADS.register_module() -class PAAHead(ATSSHead): - """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU - Prediction for Object Detection. - - Code is modified from the `official github repo - `_. - - More details can be found in the `paper - `_ . - - Args: - topk (int): Select topk samples with smallest loss in - each level. - score_voting (bool): Whether to use score voting in post-process. - covariance_type : String describing the type of covariance parameters - to be used in :class:`sklearn.mixture.GaussianMixture`. - It must be one of: - - - 'full': each component has its own general covariance matrix - - 'tied': all components share the same general covariance matrix - - 'diag': each component has its own diagonal covariance matrix - - 'spherical': each component has its own single variance - Default: 'diag'. From 'full' to 'spherical', the gmm fitting - process is faster yet the performance could be influenced. For most - cases, 'diag' should be a good choice. - """ - - def __init__(self, - *args, - topk=9, - score_voting=True, - covariance_type='diag', - **kwargs): - # topk used in paa reassign process - self.topk = topk - self.with_score_voting = score_voting - self.covariance_type = covariance_type - super(PAAHead, self).__init__(*args, **kwargs) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) - def loss(self, - cls_scores, - bbox_preds, - iou_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - iou_preds (list[Tensor]): iou_preds for each scale - level with shape (N, num_anchors * 1, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): Specify which bounding - boxes can be ignored when are computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss gmm_assignment. - """ - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - ) - (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, - pos_gt_index) = cls_reg_targets - cls_scores = levels_to_images(cls_scores) - cls_scores = [ - item.reshape(-1, self.cls_out_channels) for item in cls_scores - ] - bbox_preds = levels_to_images(bbox_preds) - bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] - iou_preds = levels_to_images(iou_preds) - iou_preds = [item.reshape(-1, 1) for item in iou_preds] - pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, - cls_scores, bbox_preds, labels, - labels_weight, bboxes_target, - bboxes_weight, pos_inds) - - with torch.no_grad(): - reassign_labels, reassign_label_weight, \ - reassign_bbox_weights, num_pos = multi_apply( - self.paa_reassign, - pos_losses_list, - labels, - labels_weight, - bboxes_weight, - pos_inds, - pos_gt_index, - anchor_list) - num_pos = sum(num_pos) - # convert all tensor list to a flatten tensor - cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) - bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) - iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) - labels = torch.cat(reassign_labels, 0).view(-1) - flatten_anchors = torch.cat( - [torch.cat(item, 0) for item in anchor_list]) - labels_weight = torch.cat(reassign_label_weight, 0).view(-1) - bboxes_target = torch.cat(bboxes_target, - 0).view(-1, bboxes_target[0].size(-1)) - - pos_inds_flatten = ((labels >= 0) - & - (labels < self.num_classes)).nonzero().reshape(-1) - - losses_cls = self.loss_cls( - cls_scores, - labels, - labels_weight, - avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 - if num_pos: - pos_bbox_pred = self.bbox_coder.decode( - flatten_anchors[pos_inds_flatten], - bbox_preds[pos_inds_flatten]) - pos_bbox_target = bboxes_target[pos_inds_flatten] - iou_target = bbox_overlaps( - pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) - losses_iou = self.loss_centerness( - iou_preds[pos_inds_flatten], - iou_target.unsqueeze(-1), - avg_factor=num_pos) - losses_bbox = self.loss_bbox( - pos_bbox_pred, - pos_bbox_target, - iou_target.clamp(min=EPS), - avg_factor=iou_target.sum()) - else: - losses_iou = iou_preds.sum() * 0 - losses_bbox = bbox_preds.sum() * 0 - - return dict( - loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) - - def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight, - bbox_target, bbox_weight, pos_inds): - """Calculate loss of all potential positive samples obtained from first - match process. - - Args: - anchors (list[Tensor]): Anchors of each scale. - cls_score (Tensor): Box scores of single image with shape - (num_anchors, num_classes) - bbox_pred (Tensor): Box energies / deltas of single image - with shape (num_anchors, 4) - label (Tensor): classification target of each anchor with - shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each - anchor with shape (num_anchors). - bbox_target (dict): Regression target of each anchor with - shape (num_anchors, 4). - bbox_weight (Tensor): Bbox weight of each anchor with shape - (num_anchors, 4). - pos_inds (Tensor): Index of all positive samples got from - first assign process. - - Returns: - Tensor: Losses of all positive samples in single image. - """ - if not len(pos_inds): - return cls_score.new([]), - anchors_all_level = torch.cat(anchors, 0) - pos_scores = cls_score[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_label = label[pos_inds] - pos_label_weight = label_weight[pos_inds] - pos_bbox_target = bbox_target[pos_inds] - pos_bbox_weight = bbox_weight[pos_inds] - pos_anchors = anchors_all_level[pos_inds] - pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred) - - # to keep loss dimension - loss_cls = self.loss_cls( - pos_scores, - pos_label, - pos_label_weight, - avg_factor=1.0, - reduction_override='none') - - loss_bbox = self.loss_bbox( - pos_bbox_pred, - pos_bbox_target, - pos_bbox_weight, - avg_factor=1.0, # keep same loss weight before reassign - reduction_override='none') - - loss_cls = loss_cls.sum(-1) - pos_loss = loss_bbox + loss_cls - return pos_loss, - - def paa_reassign(self, pos_losses, label, label_weight, bbox_weight, - pos_inds, pos_gt_inds, anchors): - """Fit loss to GMM distribution and separate positive, ignore, negative - samples again with GMM model. - - Args: - pos_losses (Tensor): Losses of all positive samples in - single image. - label (Tensor): classification target of each anchor with - shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each - anchor with shape (num_anchors). - bbox_weight (Tensor): Bbox weight of each anchor with shape - (num_anchors, 4). - pos_inds (Tensor): Index of all positive samples got from - first assign process. - pos_gt_inds (Tensor): Gt_index of all positive samples got - from first assign process. - anchors (list[Tensor]): Anchors of each scale. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - - label (Tensor): classification target of each anchor after - paa assign, with shape (num_anchors,) - - label_weight (Tensor): Classification loss weight of each - anchor after paa assign, with shape (num_anchors). - - bbox_weight (Tensor): Bbox weight of each anchor with shape - (num_anchors, 4). - - num_pos (int): The number of positive samples after paa - assign. - """ - if not len(pos_inds): - return label, label_weight, bbox_weight, 0 - label = label.clone() - label_weight = label_weight.clone() - bbox_weight = bbox_weight.clone() - num_gt = pos_gt_inds.max() + 1 - num_level = len(anchors) - num_anchors_each_level = [item.size(0) for item in anchors] - num_anchors_each_level.insert(0, 0) - inds_level_interval = np.cumsum(num_anchors_each_level) - pos_level_mask = [] - for i in range(num_level): - mask = (pos_inds >= inds_level_interval[i]) & ( - pos_inds < inds_level_interval[i + 1]) - pos_level_mask.append(mask) - pos_inds_after_paa = [label.new_tensor([])] - ignore_inds_after_paa = [label.new_tensor([])] - for gt_ind in range(num_gt): - pos_inds_gmm = [] - pos_loss_gmm = [] - gt_mask = pos_gt_inds == gt_ind - for level in range(num_level): - level_mask = pos_level_mask[level] - level_gt_mask = level_mask & gt_mask - value, topk_inds = pos_losses[level_gt_mask].topk( - min(level_gt_mask.sum(), self.topk), largest=False) - pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) - pos_loss_gmm.append(value) - pos_inds_gmm = torch.cat(pos_inds_gmm) - pos_loss_gmm = torch.cat(pos_loss_gmm) - # fix gmm need at least two sample - if len(pos_inds_gmm) < 2: - continue - device = pos_inds_gmm.device - pos_loss_gmm, sort_inds = pos_loss_gmm.sort() - pos_inds_gmm = pos_inds_gmm[sort_inds] - pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy() - min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max() - means_init = np.array([min_loss, max_loss]).reshape(2, 1) - weights_init = np.array([0.5, 0.5]) - precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full - if self.covariance_type == 'spherical': - precisions_init = precisions_init.reshape(2) - elif self.covariance_type == 'diag': - precisions_init = precisions_init.reshape(2, 1) - elif self.covariance_type == 'tied': - precisions_init = np.array([[1.0]]) - if skm is None: - raise ImportError('Please run "pip install sklearn" ' - 'to install sklearn first.') - gmm = skm.GaussianMixture( - 2, - weights_init=weights_init, - means_init=means_init, - precisions_init=precisions_init, - covariance_type=self.covariance_type) - gmm.fit(pos_loss_gmm) - gmm_assignment = gmm.predict(pos_loss_gmm) - scores = gmm.score_samples(pos_loss_gmm) - gmm_assignment = torch.from_numpy(gmm_assignment).to(device) - scores = torch.from_numpy(scores).to(device) - - pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme( - gmm_assignment, scores, pos_inds_gmm) - pos_inds_after_paa.append(pos_inds_temp) - ignore_inds_after_paa.append(ignore_inds_temp) - - pos_inds_after_paa = torch.cat(pos_inds_after_paa) - ignore_inds_after_paa = torch.cat(ignore_inds_after_paa) - reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1) - reassign_ids = pos_inds[reassign_mask] - label[reassign_ids] = self.num_classes - label_weight[ignore_inds_after_paa] = 0 - bbox_weight[reassign_ids] = 0 - num_pos = len(pos_inds_after_paa) - return label, label_weight, bbox_weight, num_pos - - def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm): - """A general separation scheme for gmm model. - - It separates a GMM distribution of candidate samples into three - parts, 0 1 and uncertain areas, and you can implement other - separation schemes by rewriting this function. - - Args: - gmm_assignment (Tensor): The prediction of GMM which is of shape - (num_samples,). The 0/1 value indicates the distribution - that each sample comes from. - scores (Tensor): The probability of sample coming from the - fit GMM distribution. The tensor is of shape (num_samples,). - pos_inds_gmm (Tensor): All the indexes of samples which are used - to fit GMM model. The tensor is of shape (num_samples,) - - Returns: - tuple[Tensor]: The indices of positive and ignored samples. - - - pos_inds_temp (Tensor): Indices of positive samples. - - ignore_inds_temp (Tensor): Indices of ignore samples. - """ - # The implementation is (c) in Fig.3 in origin paper instead of (b). - # You can refer to issues such as - # https://github.com/kkhoot/PAA/issues/8 and - # https://github.com/kkhoot/PAA/issues/9. - fgs = gmm_assignment == 0 - pos_inds_temp = fgs.new_tensor([], dtype=torch.long) - ignore_inds_temp = fgs.new_tensor([], dtype=torch.long) - if fgs.nonzero().numel(): - _, pos_thr_ind = scores[fgs].topk(1) - pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1] - ignore_inds_temp = pos_inds_gmm.new_tensor([]) - return pos_inds_temp, ignore_inds_temp - - def get_targets( - self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True, - ): - """Get targets for PAA head. - - This method is almost the same as `AnchorHead.get_targets()`. We direct - return the results from _get_targets_single instead map it to levels - by images_to_levels function. - - Args: - anchor_list (list[list[Tensor]]): Multi level anchors of each - image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, 4). - valid_flag_list (list[list[Tensor]]): Multi level valid flags of - each image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be - ignored. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - - labels (list[Tensor]): Labels of all anchors, each with - shape (num_anchors,). - - label_weights (list[Tensor]): Label weights of all anchor. - each with shape (num_anchors,). - - bbox_targets (list[Tensor]): BBox targets of all anchors. - each with shape (num_anchors, 4). - - bbox_weights (list[Tensor]): BBox weights of all anchors. - each with shape (num_anchors, 4). - - pos_inds (list[Tensor]): Contains all index of positive - sample in all anchor. - - gt_inds (list[Tensor]): Contains all gt_index of positive - sample in all anchor. - """ - - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - concat_anchor_list = [] - concat_valid_flag_list = [] - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - concat_anchor_list.append(torch.cat(anchor_list[i])) - concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - results = multi_apply( - self._get_targets_single, - concat_anchor_list, - concat_valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - - (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds, - valid_neg_inds, sampling_result) = results - - # Due to valid flag of anchors, we have to calculate the real pos_inds - # in origin anchor set. - pos_inds = [] - for i, single_labels in enumerate(labels): - pos_mask = (0 <= single_labels) & ( - single_labels < self.num_classes) - pos_inds.append(pos_mask.nonzero().view(-1)) - - gt_inds = [item.pos_assigned_gt_inds for item in sampling_result] - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - gt_inds) - - def _get_targets_single(self, - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - This method is same as `AnchorHead._get_targets_single()`. - """ - assert unmap_outputs, 'We must map outputs back to the original' \ - 'set of anchors in PAAhead' - return super(ATSSHead, self)._get_targets_single( - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - score_factors=None, - img_metas=None, - cfg=None, - rescale=False, - with_nms=True, - **kwargs): - assert with_nms, 'PAA only supports "with_nms=True" now and it ' \ - 'means PAAHead does not support ' \ - 'test-time augmentation' - return super(ATSSHead, self).get_bboxes(cls_scores, bbox_preds, - score_factors, img_metas, cfg, - rescale, with_nms, **kwargs) - - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - score_factor_list, - mlvl_priors, - img_meta, - cfg, - rescale=False, - with_nms=True, - **kwargs): - """Transform outputs of a single image into bbox predictions. - - Args: - cls_score_list (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_priors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has shape - (num_priors * 4, H, W). - score_factor_list (list[Tensor]): Score factors from all scale - levels of a single image, each item has shape - (num_priors * 1, H, W). - mlvl_priors (list[Tensor]): Each element in the list is - the priors of a single level in feature pyramid, has shape - (num_priors, 4). - img_meta (dict): Image meta info. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape \ - [num_bboxes, 5], where the first 4 columns are bounding \ - box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding \ - box with shape [num_bboxes]. - """ - cfg = self.test_cfg if cfg is None else cfg - img_shape = img_meta['img_shape'] - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_score_factors = [] - for level_idx, (cls_score, bbox_pred, score_factor, priors) in \ - enumerate(zip(cls_score_list, bbox_pred_list, - score_factor_list, mlvl_priors)): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - - scores = cls_score.permute(1, 2, 0).reshape( - -1, self.cls_out_channels).sigmoid() - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() - - if 0 < nms_pre < scores.shape[0]: - max_scores, _ = (scores * - score_factor[:, None]).sqrt().max(dim=1) - _, topk_inds = max_scores.topk(nms_pre) - priors = priors[topk_inds, :] - bbox_pred = bbox_pred[topk_inds, :] - scores = scores[topk_inds, :] - score_factor = score_factor[topk_inds] - - bboxes = self.bbox_coder.decode( - priors, bbox_pred, max_shape=img_shape) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_score_factors.append(score_factor) - - return self._bbox_post_process(mlvl_scores, mlvl_bboxes, - img_meta['scale_factor'], cfg, rescale, - with_nms, mlvl_score_factors, **kwargs) - - def _bbox_post_process(self, - mlvl_scores, - mlvl_bboxes, - scale_factor, - cfg, - rescale=False, - with_nms=True, - mlvl_score_factors=None, - **kwargs): - """bbox post-processing method. - - The boxes would be rescaled to the original image scale and do - the nms operation. Usually with_nms is False is used for aug test. - - Args: - mlvl_scores (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_bboxes, num_class). - mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale - levels of a single image, each item has shape (num_bboxes, 4). - scale_factor (ndarray, optional): Scale factor of the image arange - as (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - mlvl_score_factors (list[Tensor], optional): Score factor from - all scale levels of a single image, each item has shape - (num_bboxes, ). Default: None. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape \ - [num_bboxes, 5], where the first 4 columns are bounding \ - box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding \ - box with shape [num_bboxes]. - """ - mlvl_bboxes = torch.cat(mlvl_bboxes) - if rescale: - mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) - mlvl_scores = torch.cat(mlvl_scores) - # Add a dummy background class to the backend when using sigmoid - # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 - # BG cat_id: num_class - padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) - mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) - - mlvl_iou_preds = torch.cat(mlvl_score_factors) - mlvl_nms_scores = (mlvl_scores * mlvl_iou_preds[:, None]).sqrt() - det_bboxes, det_labels = multiclass_nms( - mlvl_bboxes, - mlvl_nms_scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=None) - if self.with_score_voting and len(det_bboxes) > 0: - det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels, - mlvl_bboxes, - mlvl_nms_scores, - cfg.score_thr) - - return det_bboxes, det_labels - - def score_voting(self, det_bboxes, det_labels, mlvl_bboxes, - mlvl_nms_scores, score_thr): - """Implementation of score voting method works on each remaining boxes - after NMS procedure. - - Args: - det_bboxes (Tensor): Remaining boxes after NMS procedure, - with shape (k, 5), each dimension means - (x1, y1, x2, y2, score). - det_labels (Tensor): The label of remaining boxes, with shape - (k, 1),Labels are 0-based. - mlvl_bboxes (Tensor): All boxes before the NMS procedure, - with shape (num_anchors,4). - mlvl_nms_scores (Tensor): The scores of all boxes which is used - in the NMS procedure, with shape (num_anchors, num_class) - score_thr (float): The score threshold of bboxes. - - Returns: - tuple: Usually returns a tuple containing voting results. - - - det_bboxes_voted (Tensor): Remaining boxes after - score voting procedure, with shape (k, 5), each - dimension means (x1, y1, x2, y2, score). - - det_labels_voted (Tensor): Label of remaining bboxes - after voting, with shape (num_anchors,). - """ - candidate_mask = mlvl_nms_scores > score_thr - candidate_mask_nonzeros = candidate_mask.nonzero(as_tuple=False) - candidate_inds = candidate_mask_nonzeros[:, 0] - candidate_labels = candidate_mask_nonzeros[:, 1] - candidate_bboxes = mlvl_bboxes[candidate_inds] - candidate_scores = mlvl_nms_scores[candidate_mask] - det_bboxes_voted = [] - det_labels_voted = [] - for cls in range(self.cls_out_channels): - candidate_cls_mask = candidate_labels == cls - if not candidate_cls_mask.any(): - continue - candidate_cls_scores = candidate_scores[candidate_cls_mask] - candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask] - det_cls_mask = det_labels == cls - det_cls_bboxes = det_bboxes[det_cls_mask].view( - -1, det_bboxes.size(-1)) - det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4], - candidate_cls_bboxes) - for det_ind in range(len(det_cls_bboxes)): - single_det_ious = det_candidate_ious[det_ind] - pos_ious_mask = single_det_ious > 0.01 - pos_ious = single_det_ious[pos_ious_mask] - pos_bboxes = candidate_cls_bboxes[pos_ious_mask] - pos_scores = candidate_cls_scores[pos_ious_mask] - pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) * - pos_scores)[:, None] - voted_box = torch.sum( - pis * pos_bboxes, dim=0) / torch.sum( - pis, dim=0) - voted_score = det_cls_bboxes[det_ind][-1:][None, :] - det_bboxes_voted.append( - torch.cat((voted_box[None, :], voted_score), dim=1)) - det_labels_voted.append(cls) - - det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0) - det_labels_voted = det_labels.new_tensor(det_labels_voted) - return det_bboxes_voted, det_labels_voted diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/pisa_retinanet_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/pisa_retinanet_head.py deleted file mode 100644 index 8654ef453a849f038f68c78df64b4fdc4b26549b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/pisa_retinanet_head.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import images_to_levels -from ..builder import HEADS -from ..losses import carl_loss, isr_p -from .retina_head import RetinaHead - - -@HEADS.register_module() -class PISARetinaHead(RetinaHead): - """PISA Retinanet Head. - - The head owns the same structure with Retinanet Head, but differs in two - aspects: - 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to - change the positive loss weights. - 2. Classification-aware regression loss is adopted as a third loss. - """ - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes of each image - with shape (num_obj, 4). - gt_labels (list[Tensor]): Ground truth labels of each image - with shape (num_obj, 4). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. - Default: None. - - Returns: - dict: Loss dict, comprise classification loss, regression loss and - carl loss. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - - num_imgs = len(img_metas) - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) - for cls_score in cls_scores - ] - flatten_cls_scores = torch.cat( - flatten_cls_scores, dim=1).reshape(-1, - flatten_cls_scores[0].size(-1)) - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) - for bbox_pred in bbox_preds - ] - flatten_bbox_preds = torch.cat( - flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) - flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) - flatten_label_weights = torch.cat( - label_weights_list, dim=1).reshape(-1) - flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) - flatten_bbox_targets = torch.cat( - bbox_targets_list, dim=1).reshape(-1, 4) - flatten_bbox_weights = torch.cat( - bbox_weights_list, dim=1).reshape(-1, 4) - - # Apply ISR-P - isr_cfg = self.train_cfg.get('isr', None) - if isr_cfg is not None: - all_targets = (flatten_labels, flatten_label_weights, - flatten_bbox_targets, flatten_bbox_weights) - with torch.no_grad(): - all_targets = isr_p( - flatten_cls_scores, - flatten_bbox_preds, - all_targets, - flatten_anchors, - sampling_results_list, - bbox_coder=self.bbox_coder, - loss_cls=self.loss_cls, - num_class=self.num_classes, - **self.train_cfg.isr) - (flatten_labels, flatten_label_weights, flatten_bbox_targets, - flatten_bbox_weights) = all_targets - - # For convenience we compute loss once instead separating by fpn level, - # so that we don't need to separate the weights by level again. - # The result should be the same - losses_cls = self.loss_cls( - flatten_cls_scores, - flatten_labels, - flatten_label_weights, - avg_factor=num_total_samples) - losses_bbox = self.loss_bbox( - flatten_bbox_preds, - flatten_bbox_targets, - flatten_bbox_weights, - avg_factor=num_total_samples) - loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - - # CARL Loss - carl_cfg = self.train_cfg.get('carl', None) - if carl_cfg is not None: - loss_carl = carl_loss( - flatten_cls_scores, - flatten_labels, - flatten_bbox_preds, - flatten_bbox_targets, - self.loss_bbox, - **self.train_cfg.carl, - avg_factor=num_total_pos, - sigmoid=True, - num_class=self.num_classes) - loss_dict.update(loss_carl) - - return loss_dict diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/pisa_ssd_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/pisa_ssd_head.py deleted file mode 100644 index 86b67abe932262c7f0177a34cb94ea43a12ac5d4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/pisa_ssd_head.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core import multi_apply -from ..builder import HEADS -from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p -from .ssd_head import SSDHead - - -# TODO: add loss evaluator for SSD -@HEADS.register_module() -class PISASSDHead(SSDHead): - - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes of each image - with shape (num_obj, 4). - gt_labels (list[Tensor]): Ground truth labels of each image - with shape (num_obj, 4). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. - Default: None. - - Returns: - dict: Loss dict, comprise classification loss regression loss and - carl loss. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=1, - unmap_outputs=False, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets - - num_images = len(img_metas) - all_cls_scores = torch.cat([ - s.permute(0, 2, 3, 1).reshape( - num_images, -1, self.cls_out_channels) for s in cls_scores - ], 1) - all_labels = torch.cat(labels_list, -1).view(num_images, -1) - all_label_weights = torch.cat(label_weights_list, - -1).view(num_images, -1) - all_bbox_preds = torch.cat([ - b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) - for b in bbox_preds - ], -2) - all_bbox_targets = torch.cat(bbox_targets_list, - -2).view(num_images, -1, 4) - all_bbox_weights = torch.cat(bbox_weights_list, - -2).view(num_images, -1, 4) - - # concat all level anchors to a single tensor - all_anchors = [] - for i in range(num_images): - all_anchors.append(torch.cat(anchor_list[i])) - - isr_cfg = self.train_cfg.get('isr', None) - all_targets = (all_labels.view(-1), all_label_weights.view(-1), - all_bbox_targets.view(-1, - 4), all_bbox_weights.view(-1, 4)) - # apply ISR-P - if isr_cfg is not None: - all_targets = isr_p( - all_cls_scores.view(-1, all_cls_scores.size(-1)), - all_bbox_preds.view(-1, 4), - all_targets, - torch.cat(all_anchors), - sampling_results_list, - loss_cls=CrossEntropyLoss(), - bbox_coder=self.bbox_coder, - **self.train_cfg.isr, - num_class=self.num_classes) - (new_labels, new_label_weights, new_bbox_targets, - new_bbox_weights) = all_targets - all_labels = new_labels.view(all_labels.shape) - all_label_weights = new_label_weights.view(all_label_weights.shape) - all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) - all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) - - # add CARL loss - carl_loss_cfg = self.train_cfg.get('carl', None) - if carl_loss_cfg is not None: - loss_carl = carl_loss( - all_cls_scores.view(-1, all_cls_scores.size(-1)), - all_targets[0], - all_bbox_preds.view(-1, 4), - all_targets[2], - SmoothL1Loss(beta=1.), - **self.train_cfg.carl, - avg_factor=num_total_pos, - num_class=self.num_classes) - - # check NaN and Inf - assert torch.isfinite(all_cls_scores).all().item(), \ - 'classification scores become infinite or NaN!' - assert torch.isfinite(all_bbox_preds).all().item(), \ - 'bbox predications become infinite or NaN!' - - losses_cls, losses_bbox = multi_apply( - self.loss_single, - all_cls_scores, - all_bbox_preds, - all_anchors, - all_labels, - all_label_weights, - all_bbox_targets, - all_bbox_weights, - num_total_samples=num_total_pos) - loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - if carl_loss_cfg is not None: - loss_dict.update(loss_carl) - return loss_dict diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/reppoints_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/reppoints_head.py deleted file mode 100644 index f7204141db43a3754031bc175c87876a2d7df3e5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/reppoints_head.py +++ /dev/null @@ -1,764 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.ops import DeformConv2d - -from mmdet.core import (build_assigner, build_sampler, images_to_levels, - multi_apply, unmap) -from mmdet.core.anchor.point_generator import MlvlPointGenerator -from mmdet.core.utils import filter_scores_and_topk -from ..builder import HEADS, build_loss -from .anchor_free_head import AnchorFreeHead - - -@HEADS.register_module() -class RepPointsHead(AnchorFreeHead): - """RepPoint head. - - Args: - point_feat_channels (int): Number of channels of points features. - gradient_mul (float): The multiplier to gradients from - points refinement and recognition. - point_strides (Iterable): points strides. - point_base_scale (int): bbox scale for assigning labels. - loss_cls (dict): Config of classification loss. - loss_bbox_init (dict): Config of initial points loss. - loss_bbox_refine (dict): Config of points loss in refinement. - use_grid_points (bool): If we use bounding box representation, the - reppoints is represented as grid points on the bounding box. - center_init (bool): Whether to use center point assignment. - transform_method (str): The methods to transform RepPoints to bbox. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ # noqa: W605 - - def __init__(self, - num_classes, - in_channels, - point_feat_channels=256, - num_points=9, - gradient_mul=0.1, - point_strides=[8, 16, 32, 64, 128], - point_base_scale=4, - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_init=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5), - loss_bbox_refine=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), - use_grid_points=False, - center_init=True, - transform_method='moment', - moment_mul=0.01, - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='reppoints_cls_out', - std=0.01, - bias_prob=0.01)), - **kwargs): - self.num_points = num_points - self.point_feat_channels = point_feat_channels - self.use_grid_points = use_grid_points - self.center_init = center_init - - # we use deform conv to extract points features - self.dcn_kernel = int(np.sqrt(num_points)) - self.dcn_pad = int((self.dcn_kernel - 1) / 2) - assert self.dcn_kernel * self.dcn_kernel == num_points, \ - 'The points number should be a square number.' - assert self.dcn_kernel % 2 == 1, \ - 'The points number should be an odd square number.' - dcn_base = np.arange(-self.dcn_pad, - self.dcn_pad + 1).astype(np.float64) - dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) - dcn_base_x = np.tile(dcn_base, self.dcn_kernel) - dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( - (-1)) - self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) - - super().__init__( - num_classes, - in_channels, - loss_cls=loss_cls, - init_cfg=init_cfg, - **kwargs) - - self.gradient_mul = gradient_mul - self.point_base_scale = point_base_scale - self.point_strides = point_strides - self.prior_generator = MlvlPointGenerator( - self.point_strides, offset=0.) - - self.sampling = loss_cls['type'] not in ['FocalLoss'] - if self.train_cfg: - self.init_assigner = build_assigner(self.train_cfg.init.assigner) - self.refine_assigner = build_assigner( - self.train_cfg.refine.assigner) - # use PseudoSampler when sampling is False - if self.sampling and hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.transform_method = transform_method - if self.transform_method == 'moment': - self.moment_transfer = nn.Parameter( - data=torch.zeros(2), requires_grad=True) - self.moment_mul = moment_mul - - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - if self.use_sigmoid_cls: - self.cls_out_channels = self.num_classes - else: - self.cls_out_channels = self.num_classes + 1 - self.loss_bbox_init = build_loss(loss_bbox_init) - self.loss_bbox_refine = build_loss(loss_bbox_refine) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points - self.reppoints_cls_conv = DeformConv2d(self.feat_channels, - self.point_feat_channels, - self.dcn_kernel, 1, - self.dcn_pad) - self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels, - self.cls_out_channels, 1, 1, 0) - self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels, - self.point_feat_channels, 3, - 1, 1) - self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels, - pts_out_dim, 1, 1, 0) - self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels, - self.point_feat_channels, - self.dcn_kernel, 1, - self.dcn_pad) - self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels, - pts_out_dim, 1, 1, 0) - - def points2bbox(self, pts, y_first=True): - """Converting the points set into bounding box. - - :param pts: the input points sets (fields), each points - set (fields) is represented as 2n scalar. - :param y_first: if y_first=True, the point set is represented as - [y1, x1, y2, x2 ... yn, xn], otherwise the point set is - represented as [x1, y1, x2, y2 ... xn, yn]. - :return: each points set is converting to a bbox [x1, y1, x2, y2]. - """ - pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:]) - pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1, - ...] - pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0, - ...] - if self.transform_method == 'minmax': - bbox_left = pts_x.min(dim=1, keepdim=True)[0] - bbox_right = pts_x.max(dim=1, keepdim=True)[0] - bbox_up = pts_y.min(dim=1, keepdim=True)[0] - bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] - bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], - dim=1) - elif self.transform_method == 'partial_minmax': - pts_y = pts_y[:, :4, ...] - pts_x = pts_x[:, :4, ...] - bbox_left = pts_x.min(dim=1, keepdim=True)[0] - bbox_right = pts_x.max(dim=1, keepdim=True)[0] - bbox_up = pts_y.min(dim=1, keepdim=True)[0] - bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] - bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], - dim=1) - elif self.transform_method == 'moment': - pts_y_mean = pts_y.mean(dim=1, keepdim=True) - pts_x_mean = pts_x.mean(dim=1, keepdim=True) - pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True) - pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True) - moment_transfer = (self.moment_transfer * self.moment_mul) + ( - self.moment_transfer.detach() * (1 - self.moment_mul)) - moment_width_transfer = moment_transfer[0] - moment_height_transfer = moment_transfer[1] - half_width = pts_x_std * torch.exp(moment_width_transfer) - half_height = pts_y_std * torch.exp(moment_height_transfer) - bbox = torch.cat([ - pts_x_mean - half_width, pts_y_mean - half_height, - pts_x_mean + half_width, pts_y_mean + half_height - ], - dim=1) - else: - raise NotImplementedError - return bbox - - def gen_grid_from_reg(self, reg, previous_boxes): - """Base on the previous bboxes and regression values, we compute the - regressed bboxes and generate the grids on the bboxes. - - :param reg: the regression value to previous bboxes. - :param previous_boxes: previous bboxes. - :return: generate grids on the regressed bboxes. - """ - b, _, h, w = reg.shape - bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2. - bwh = (previous_boxes[:, 2:, ...] - - previous_boxes[:, :2, ...]).clamp(min=1e-6) - grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp( - reg[:, 2:, ...]) - grid_wh = bwh * torch.exp(reg[:, 2:, ...]) - grid_left = grid_topleft[:, [0], ...] - grid_top = grid_topleft[:, [1], ...] - grid_width = grid_wh[:, [0], ...] - grid_height = grid_wh[:, [1], ...] - intervel = torch.linspace(0., 1., self.dcn_kernel).view( - 1, self.dcn_kernel, 1, 1).type_as(reg) - grid_x = grid_left + grid_width * intervel - grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1) - grid_x = grid_x.view(b, -1, h, w) - grid_y = grid_top + grid_height * intervel - grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1) - grid_y = grid_y.view(b, -1, h, w) - grid_yx = torch.stack([grid_y, grid_x], dim=2) - grid_yx = grid_yx.view(b, -1, h, w) - regressed_bbox = torch.cat([ - grid_left, grid_top, grid_left + grid_width, grid_top + grid_height - ], 1) - return grid_yx, regressed_bbox - - def forward(self, feats): - return multi_apply(self.forward_single, feats) - - def forward_single(self, x): - """Forward feature map of a single FPN level.""" - dcn_base_offset = self.dcn_base_offset.type_as(x) - # If we use center_init, the initial reppoints is from center points. - # If we use bounding bbox representation, the initial reppoints is - # from regular grid placed on a pre-defined bbox. - if self.use_grid_points or not self.center_init: - scale = self.point_base_scale / 2 - points_init = dcn_base_offset / dcn_base_offset.max() * scale - bbox_init = x.new_tensor([-scale, -scale, scale, - scale]).view(1, 4, 1, 1) - else: - points_init = 0 - cls_feat = x - pts_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - pts_feat = reg_conv(pts_feat) - # initialize reppoints - pts_out_init = self.reppoints_pts_init_out( - self.relu(self.reppoints_pts_init_conv(pts_feat))) - if self.use_grid_points: - pts_out_init, bbox_out_init = self.gen_grid_from_reg( - pts_out_init, bbox_init.detach()) - else: - pts_out_init = pts_out_init + points_init - # refine and classify reppoints - pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach( - ) + self.gradient_mul * pts_out_init - dcn_offset = pts_out_init_grad_mul - dcn_base_offset - cls_out = self.reppoints_cls_out( - self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset))) - pts_out_refine = self.reppoints_pts_refine_out( - self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset))) - if self.use_grid_points: - pts_out_refine, bbox_out_refine = self.gen_grid_from_reg( - pts_out_refine, bbox_out_init.detach()) - else: - pts_out_refine = pts_out_refine + pts_out_init.detach() - - if self.training: - return cls_out, pts_out_init, pts_out_refine - else: - return cls_out, self.points2bbox(pts_out_refine) - - def get_points(self, featmap_sizes, img_metas, device): - """Get points according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - img_metas (list[dict]): Image meta info. - - Returns: - tuple: points of each image, valid flags of each image - """ - num_imgs = len(img_metas) - - # since feature map sizes of all images are the same, we only compute - # points center for one time - multi_level_points = self.prior_generator.grid_priors( - featmap_sizes, device=device, with_stride=True) - points_list = [[point.clone() for point in multi_level_points] - for _ in range(num_imgs)] - - # for each image, we compute valid flags of multi level grids - valid_flag_list = [] - for img_id, img_meta in enumerate(img_metas): - multi_level_flags = self.prior_generator.valid_flags( - featmap_sizes, img_meta['pad_shape']) - valid_flag_list.append(multi_level_flags) - - return points_list, valid_flag_list - - def centers_to_bboxes(self, point_list): - """Get bboxes according to center points. - - Only used in :class:`MaxIoUAssigner`. - """ - bbox_list = [] - for i_img, point in enumerate(point_list): - bbox = [] - for i_lvl in range(len(self.point_strides)): - scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5 - bbox_shift = torch.Tensor([-scale, -scale, scale, - scale]).view(1, 4).type_as(point[0]) - bbox_center = torch.cat( - [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1) - bbox.append(bbox_center + bbox_shift) - bbox_list.append(bbox) - return bbox_list - - def offset_to_pts(self, center_list, pred_list): - """Change from point offset to point coordinate.""" - pts_list = [] - for i_lvl in range(len(self.point_strides)): - pts_lvl = [] - for i_img in range(len(center_list)): - pts_center = center_list[i_img][i_lvl][:, :2].repeat( - 1, self.num_points) - pts_shift = pred_list[i_lvl][i_img] - yx_pts_shift = pts_shift.permute(1, 2, 0).view( - -1, 2 * self.num_points) - y_pts_shift = yx_pts_shift[..., 0::2] - x_pts_shift = yx_pts_shift[..., 1::2] - xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1) - xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1) - pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center - pts_lvl.append(pts) - pts_lvl = torch.stack(pts_lvl, 0) - pts_list.append(pts_lvl) - return pts_list - - def _point_target_single(self, - flat_proposals, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - stage='init', - unmap_outputs=True): - inside_flags = valid_flags - if not inside_flags.any(): - return (None, ) * 7 - # assign gt and sample proposals - proposals = flat_proposals[inside_flags, :] - - if stage == 'init': - assigner = self.init_assigner - pos_weight = self.train_cfg.init.pos_weight - else: - assigner = self.refine_assigner - pos_weight = self.train_cfg.refine.pos_weight - assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore, - None if self.sampling else gt_labels) - sampling_result = self.sampler.sample(assign_result, proposals, - gt_bboxes) - - num_valid_proposals = proposals.shape[0] - bbox_gt = proposals.new_zeros([num_valid_proposals, 4]) - pos_proposals = torch.zeros_like(proposals) - proposals_weights = proposals.new_zeros([num_valid_proposals, 4]) - labels = proposals.new_full((num_valid_proposals, ), - self.num_classes, - dtype=torch.long) - label_weights = proposals.new_zeros( - num_valid_proposals, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - pos_gt_bboxes = sampling_result.pos_gt_bboxes - bbox_gt[pos_inds, :] = pos_gt_bboxes - pos_proposals[pos_inds, :] = proposals[pos_inds, :] - proposals_weights[pos_inds, :] = 1.0 - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of proposals - if unmap_outputs: - num_total_proposals = flat_proposals.size(0) - labels = unmap(labels, num_total_proposals, inside_flags) - label_weights = unmap(label_weights, num_total_proposals, - inside_flags) - bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags) - pos_proposals = unmap(pos_proposals, num_total_proposals, - inside_flags) - proposals_weights = unmap(proposals_weights, num_total_proposals, - inside_flags) - - return (labels, label_weights, bbox_gt, pos_proposals, - proposals_weights, pos_inds, neg_inds) - - def get_targets(self, - proposals_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - stage='init', - label_channels=1, - unmap_outputs=True): - """Compute corresponding GT box and classification targets for - proposals. - - Args: - proposals_list (list[list]): Multi level points/bboxes of each - image. - valid_flag_list (list[list]): Multi level valid flags of each - image. - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be - ignored. - gt_bboxes_list (list[Tensor]): Ground truth labels of each box. - stage (str): `init` or `refine`. Generate target for init stage or - refine stage - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: - - labels_list (list[Tensor]): Labels of each level. - - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501 - - bbox_gt_list (list[Tensor]): Ground truth bbox of each level. - - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501 - - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501 - - num_total_pos (int): Number of positive samples in all images. # noqa: E501 - - num_total_neg (int): Number of negative samples in all images. # noqa: E501 - """ - assert stage in ['init', 'refine'] - num_imgs = len(img_metas) - assert len(proposals_list) == len(valid_flag_list) == num_imgs - - # points number of multi levels - num_level_proposals = [points.size(0) for points in proposals_list[0]] - - # concat all level points and flags to a single tensor - for i in range(num_imgs): - assert len(proposals_list[i]) == len(valid_flag_list[i]) - proposals_list[i] = torch.cat(proposals_list[i]) - valid_flag_list[i] = torch.cat(valid_flag_list[i]) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - (all_labels, all_label_weights, all_bbox_gt, all_proposals, - all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply( - self._point_target_single, - proposals_list, - valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - stage=stage, - unmap_outputs=unmap_outputs) - # no valid points - if any([labels is None for labels in all_labels]): - return None - # sampled points of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - labels_list = images_to_levels(all_labels, num_level_proposals) - label_weights_list = images_to_levels(all_label_weights, - num_level_proposals) - bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals) - proposals_list = images_to_levels(all_proposals, num_level_proposals) - proposal_weights_list = images_to_levels(all_proposal_weights, - num_level_proposals) - return (labels_list, label_weights_list, bbox_gt_list, proposals_list, - proposal_weights_list, num_total_pos, num_total_neg) - - def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels, - label_weights, bbox_gt_init, bbox_weights_init, - bbox_gt_refine, bbox_weights_refine, stride, - num_total_samples_init, num_total_samples_refine): - # classification loss - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - cls_score = cls_score.contiguous() - loss_cls = self.loss_cls( - cls_score, - labels, - label_weights, - avg_factor=num_total_samples_refine) - - # points loss - bbox_gt_init = bbox_gt_init.reshape(-1, 4) - bbox_weights_init = bbox_weights_init.reshape(-1, 4) - bbox_pred_init = self.points2bbox( - pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False) - bbox_gt_refine = bbox_gt_refine.reshape(-1, 4) - bbox_weights_refine = bbox_weights_refine.reshape(-1, 4) - bbox_pred_refine = self.points2bbox( - pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False) - normalize_term = self.point_base_scale * stride - loss_pts_init = self.loss_bbox_init( - bbox_pred_init / normalize_term, - bbox_gt_init / normalize_term, - bbox_weights_init, - avg_factor=num_total_samples_init) - loss_pts_refine = self.loss_bbox_refine( - bbox_pred_refine / normalize_term, - bbox_gt_refine / normalize_term, - bbox_weights_refine, - avg_factor=num_total_samples_refine) - return loss_cls, loss_pts_init, loss_pts_refine - - def loss(self, - cls_scores, - pts_preds_init, - pts_preds_refine, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - device = cls_scores[0].device - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - # target for initial stage - center_list, valid_flag_list = self.get_points(featmap_sizes, - img_metas, device) - pts_coordinate_preds_init = self.offset_to_pts(center_list, - pts_preds_init) - if self.train_cfg.init.assigner['type'] == 'PointAssigner': - # Assign target for center list - candidate_list = center_list - else: - # transform center list to bbox list and - # assign target for bbox list - bbox_list = self.centers_to_bboxes(center_list) - candidate_list = bbox_list - cls_reg_targets_init = self.get_targets( - candidate_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - stage='init', - label_channels=label_channels) - (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init, - num_total_pos_init, num_total_neg_init) = cls_reg_targets_init - num_total_samples_init = ( - num_total_pos_init + - num_total_neg_init if self.sampling else num_total_pos_init) - - # target for refinement stage - center_list, valid_flag_list = self.get_points(featmap_sizes, - img_metas, device) - pts_coordinate_preds_refine = self.offset_to_pts( - center_list, pts_preds_refine) - bbox_list = [] - for i_img, center in enumerate(center_list): - bbox = [] - for i_lvl in range(len(pts_preds_refine)): - bbox_preds_init = self.points2bbox( - pts_preds_init[i_lvl].detach()) - bbox_shift = bbox_preds_init * self.point_strides[i_lvl] - bbox_center = torch.cat( - [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1) - bbox.append(bbox_center + - bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4)) - bbox_list.append(bbox) - cls_reg_targets_refine = self.get_targets( - bbox_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - stage='refine', - label_channels=label_channels) - (labels_list, label_weights_list, bbox_gt_list_refine, - candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine, - num_total_neg_refine) = cls_reg_targets_refine - num_total_samples_refine = ( - num_total_pos_refine + - num_total_neg_refine if self.sampling else num_total_pos_refine) - - # compute loss - losses_cls, losses_pts_init, losses_pts_refine = multi_apply( - self.loss_single, - cls_scores, - pts_coordinate_preds_init, - pts_coordinate_preds_refine, - labels_list, - label_weights_list, - bbox_gt_list_init, - bbox_weights_list_init, - bbox_gt_list_refine, - bbox_weights_list_refine, - self.point_strides, - num_total_samples_init=num_total_samples_init, - num_total_samples_refine=num_total_samples_refine) - loss_dict_all = { - 'loss_cls': losses_cls, - 'loss_pts_init': losses_pts_init, - 'loss_pts_refine': losses_pts_refine - } - return loss_dict_all - - # Same as base_dense_head/_get_bboxes_single except self._bbox_decode - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - score_factor_list, - mlvl_priors, - img_meta, - cfg, - rescale=False, - with_nms=True, - **kwargs): - """Transform outputs of a single image into bbox predictions. - - Args: - cls_score_list (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_priors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has shape - (num_priors * 4, H, W). - score_factor_list (list[Tensor]): Score factor from all scale - levels of a single image. RepPoints head does not need - this value. - mlvl_priors (list[Tensor]): Each element in the list is - the priors of a single level in feature pyramid, has shape - (num_priors, 2). - img_meta (dict): Image meta info. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape \ - [num_bboxes, 5], where the first 4 columns are bounding \ - box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding \ - box with shape [num_bboxes]. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(cls_score_list) == len(bbox_pred_list) - img_shape = img_meta['img_shape'] - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_labels = [] - for level_idx, (cls_score, bbox_pred, priors) in enumerate( - zip(cls_score_list, bbox_pred_list, mlvl_priors)): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - - cls_score = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - scores = cls_score.softmax(-1)[:, :-1] - - # After https://github.com/open-mmlab/mmdetection/pull/6268/, - # this operation keeps fewer bboxes under the same `nms_pre`. - # There is no difference in performance for most models. If you - # find a slight drop in performance, you can set a larger - # `nms_pre` than before. - results = filter_scores_and_topk( - scores, cfg.score_thr, nms_pre, - dict(bbox_pred=bbox_pred, priors=priors)) - scores, labels, _, filtered_results = results - - bbox_pred = filtered_results['bbox_pred'] - priors = filtered_results['priors'] - - bboxes = self._bbox_decode(priors, bbox_pred, - self.point_strides[level_idx], - img_shape) - - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_labels.append(labels) - - return self._bbox_post_process( - mlvl_scores, - mlvl_labels, - mlvl_bboxes, - img_meta['scale_factor'], - cfg, - rescale=rescale, - with_nms=with_nms) - - def _bbox_decode(self, points, bbox_pred, stride, max_shape): - bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) - bboxes = bbox_pred * stride + bbox_pos_center - x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1]) - y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0]) - x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1]) - y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0]) - decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - return decoded_bboxes diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/retina_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/retina_head.py deleted file mode 100644 index a48720c2ee88c47c9602d6e49b3b4f60a129e380..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/retina_head.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule - -from ..builder import HEADS -from .anchor_head import AnchorHead - - -@HEADS.register_module() -class RetinaHead(AnchorHead): - r"""An anchor-based head used in `RetinaNet - `_. - - The head contains two subnetworks. The first classifies anchor boxes and - the second regresses deltas for the anchors. - - Example: - >>> import torch - >>> self = RetinaHead(11, 7) - >>> x = torch.rand(1, 7, 32, 32) - >>> cls_score, bbox_pred = self.forward_single(x) - >>> # Each anchor predicts a score for each class except background - >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors - >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors - >>> assert cls_per_anchor == (self.num_classes) - >>> assert box_per_anchor == 4 - """ - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=None, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='retina_cls', - std=0.01, - bias_prob=0.01)), - **kwargs): - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - super(RetinaHead, self).__init__( - num_classes, - in_channels, - anchor_generator=anchor_generator, - init_cfg=init_cfg, - **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.retina_cls = nn.Conv2d( - self.feat_channels, - self.num_base_priors * self.cls_out_channels, - 3, - padding=1) - self.retina_reg = nn.Conv2d( - self.feat_channels, self.num_base_priors * 4, 3, padding=1) - - def forward_single(self, x): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level - the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale - level, the channels number is num_anchors * 4. - """ - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.retina_cls(cls_feat) - bbox_pred = self.retina_reg(reg_feat) - return cls_score, bbox_pred diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/retina_sepbn_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/retina_sepbn_head.py deleted file mode 100644 index b385c61816fd24d091589635ad0211d73b8fdd9f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/retina_sepbn_head.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init - -from ..builder import HEADS -from .anchor_head import AnchorHead - - -@HEADS.register_module() -class RetinaSepBNHead(AnchorHead): - """"RetinaHead with separate BN. - - In RetinaHead, conv/norm layers are shared across different FPN levels, - while in RetinaSepBNHead, conv layers are shared across different FPN - levels, but BN layers are separated. - """ - - def __init__(self, - num_classes, - num_ins, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=None, - init_cfg=None, - **kwargs): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.num_ins = num_ins - super(RetinaSepBNHead, self).__init__( - num_classes, in_channels, init_cfg=init_cfg, **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.num_ins): - cls_convs = nn.ModuleList() - reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.cls_convs.append(cls_convs) - self.reg_convs.append(reg_convs) - for i in range(self.stacked_convs): - for j in range(1, self.num_ins): - self.cls_convs[j][i].conv = self.cls_convs[0][i].conv - self.reg_convs[j][i].conv = self.reg_convs[0][i].conv - self.retina_cls = nn.Conv2d( - self.feat_channels, - self.num_base_priors * self.cls_out_channels, - 3, - padding=1) - self.retina_reg = nn.Conv2d( - self.feat_channels, self.num_base_priors * 4, 3, padding=1) - - def init_weights(self): - """Initialize weights of the head.""" - super(RetinaSepBNHead, self).init_weights() - for m in self.cls_convs[0]: - normal_init(m.conv, std=0.01) - for m in self.reg_convs[0]: - normal_init(m.conv, std=0.01) - bias_cls = bias_init_with_prob(0.01) - normal_init(self.retina_cls, std=0.01, bias=bias_cls) - normal_init(self.retina_reg, std=0.01) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * 4. - """ - cls_scores = [] - bbox_preds = [] - for i, x in enumerate(feats): - cls_feat = feats[i] - reg_feat = feats[i] - for cls_conv in self.cls_convs[i]: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs[i]: - reg_feat = reg_conv(reg_feat) - cls_score = self.retina_cls(cls_feat) - bbox_pred = self.retina_reg(reg_feat) - cls_scores.append(cls_score) - bbox_preds.append(bbox_pred) - return cls_scores, bbox_preds diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/rpn_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/rpn_head.py deleted file mode 100644 index 54cd39a213e4da120435e972addd40553d880a20..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/rpn_head.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.ops import batched_nms - -from ..builder import HEADS -from .anchor_head import AnchorHead - - -@HEADS.register_module() -class RPNHead(AnchorHead): - """RPN head. - - Args: - in_channels (int): Number of channels in the input feature map. - init_cfg (dict or list[dict], optional): Initialization config dict. - num_convs (int): Number of convolution layers in the head. Default 1. - """ # noqa: W605 - - def __init__(self, - in_channels, - init_cfg=dict(type='Normal', layer='Conv2d', std=0.01), - num_convs=1, - **kwargs): - self.num_convs = num_convs - super(RPNHead, self).__init__( - 1, in_channels, init_cfg=init_cfg, **kwargs) - - def _init_layers(self): - """Initialize layers of the head.""" - if self.num_convs > 1: - rpn_convs = [] - for i in range(self.num_convs): - if i == 0: - in_channels = self.in_channels - else: - in_channels = self.feat_channels - # use ``inplace=False`` to avoid error: one of the variables - # needed for gradient computation has been modified by an - # inplace operation. - rpn_convs.append( - ConvModule( - in_channels, - self.feat_channels, - 3, - padding=1, - inplace=False)) - self.rpn_conv = nn.Sequential(*rpn_convs) - else: - self.rpn_conv = nn.Conv2d( - self.in_channels, self.feat_channels, 3, padding=1) - self.rpn_cls = nn.Conv2d(self.feat_channels, - self.num_base_priors * self.cls_out_channels, - 1) - self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_base_priors * 4, - 1) - - def forward_single(self, x): - """Forward feature map of a single scale level.""" - x = self.rpn_conv(x) - x = F.relu(x, inplace=False) - rpn_cls_score = self.rpn_cls(x) - rpn_bbox_pred = self.rpn_reg(x) - return rpn_cls_score, rpn_bbox_pred - - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - losses = super(RPNHead, self).loss( - cls_scores, - bbox_preds, - gt_bboxes, - None, - img_metas, - gt_bboxes_ignore=gt_bboxes_ignore) - return dict( - loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) - - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - score_factor_list, - mlvl_anchors, - img_meta, - cfg, - rescale=False, - with_nms=True, - **kwargs): - """Transform outputs of a single image into bbox predictions. - - Args: - cls_score_list (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_anchors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has - shape (num_anchors * 4, H, W). - score_factor_list (list[Tensor]): Score factor from all scale - levels of a single image. RPN head does not need this value. - mlvl_anchors (list[Tensor]): Anchors of all scale level - each item has shape (num_anchors, 4). - img_meta (dict): Image meta info. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - Tensor: Labeled boxes in shape (n, 5), where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. - """ - cfg = self.test_cfg if cfg is None else cfg - cfg = copy.deepcopy(cfg) - img_shape = img_meta['img_shape'] - - # bboxes from different level should be independent during NMS, - # level_ids are used as labels for batched NMS to separate them - level_ids = [] - mlvl_scores = [] - mlvl_bbox_preds = [] - mlvl_valid_anchors = [] - nms_pre = cfg.get('nms_pre', -1) - for level_idx in range(len(cls_score_list)): - rpn_cls_score = cls_score_list[level_idx] - rpn_bbox_pred = bbox_pred_list[level_idx] - assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] - rpn_cls_score = rpn_cls_score.permute(1, 2, 0) - if self.use_sigmoid_cls: - rpn_cls_score = rpn_cls_score.reshape(-1) - scores = rpn_cls_score.sigmoid() - else: - rpn_cls_score = rpn_cls_score.reshape(-1, 2) - # We set FG labels to [0, num_class-1] and BG label to - # num_class in RPN head since mmdet v2.5, which is unified to - # be consistent with other head since mmdet v2.0. In mmdet v2.0 - # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. - scores = rpn_cls_score.softmax(dim=1)[:, 0] - rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) - - anchors = mlvl_anchors[level_idx] - if 0 < nms_pre < scores.shape[0]: - # sort is faster than topk - # _, topk_inds = scores.topk(cfg.nms_pre) - ranked_scores, rank_inds = scores.sort(descending=True) - topk_inds = rank_inds[:nms_pre] - scores = ranked_scores[:nms_pre] - rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] - anchors = anchors[topk_inds, :] - - mlvl_scores.append(scores) - mlvl_bbox_preds.append(rpn_bbox_pred) - mlvl_valid_anchors.append(anchors) - level_ids.append( - scores.new_full((scores.size(0), ), - level_idx, - dtype=torch.long)) - - return self._bbox_post_process(mlvl_scores, mlvl_bbox_preds, - mlvl_valid_anchors, level_ids, cfg, - img_shape) - - def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, mlvl_valid_anchors, - level_ids, cfg, img_shape, **kwargs): - """bbox post-processing method. - - Do the nms operation for bboxes in same level. - - Args: - mlvl_scores (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_bboxes, ). - mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale - levels of a single image, each item has shape (num_bboxes, 4). - mlvl_valid_anchors (list[Tensor]): Anchors of all scale level - each item has shape (num_bboxes, 4). - level_ids (list[Tensor]): Indexes from all scale levels of a - single image, each item has shape (num_bboxes, ). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, `self.test_cfg` would be used. - img_shape (tuple(int)): The shape of model's input image. - - Returns: - Tensor: Labeled boxes in shape (n, 5), where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. - """ - scores = torch.cat(mlvl_scores) - anchors = torch.cat(mlvl_valid_anchors) - rpn_bbox_pred = torch.cat(mlvl_bboxes) - proposals = self.bbox_coder.decode( - anchors, rpn_bbox_pred, max_shape=img_shape) - ids = torch.cat(level_ids) - - if cfg.min_bbox_size >= 0: - w = proposals[:, 2] - proposals[:, 0] - h = proposals[:, 3] - proposals[:, 1] - valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size) - if not valid_mask.all(): - proposals = proposals[valid_mask] - scores = scores[valid_mask] - ids = ids[valid_mask] - - if proposals.numel() > 0: - dets, _ = batched_nms(proposals, scores, ids, cfg.nms) - else: - return proposals.new_zeros(0, 5) - - return dets[:cfg.max_per_img] - - def onnx_export(self, x, img_metas): - """Test without augmentation. - - Args: - x (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - img_metas (list[dict]): Meta info of each image. - Returns: - Tensor: dets of shape [N, num_det, 5]. - """ - cls_scores, bbox_preds = self(x) - - assert len(cls_scores) == len(bbox_preds) - - batch_bboxes, batch_scores = super(RPNHead, self).onnx_export( - cls_scores, bbox_preds, img_metas=img_metas, with_nms=False) - # Use ONNX::NonMaxSuppression in deployment - from mmdet.core.export import add_dummy_nms_for_onnx - cfg = copy.deepcopy(self.test_cfg) - score_threshold = cfg.nms.get('score_thr', 0.0) - nms_pre = cfg.get('deploy_nms_pre', -1) - # Different from the normal forward doing NMS level by level, - # we do NMS across all levels when exporting ONNX. - dets, _ = add_dummy_nms_for_onnx(batch_bboxes, batch_scores, - cfg.max_per_img, - cfg.nms.iou_threshold, - score_threshold, nms_pre, - cfg.max_per_img) - return dets diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/sabl_retina_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/sabl_retina_head.py deleted file mode 100644 index 4fede7109dfcb36ab4e43df3da6900cef6a6a1c8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/sabl_retina_head.py +++ /dev/null @@ -1,630 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import force_fp32 - -from mmdet.core import (build_assigner, build_bbox_coder, - build_prior_generator, build_sampler, images_to_levels, - multi_apply, unmap) -from mmdet.core.utils import filter_scores_and_topk -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin -from .guided_anchor_head import GuidedAnchorHead - - -@HEADS.register_module() -class SABLRetinaHead(BaseDenseHead, BBoxTestMixin): - """Side-Aware Boundary Localization (SABL) for RetinaNet. - - The anchor generation, assigning and sampling in SABLRetinaHead - are the same as GuidedAnchorHead for guided anchoring. - - Please refer to https://arxiv.org/abs/1912.04260 for more details. - - Args: - num_classes (int): Number of classes. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): Number of Convs for classification \ - and regression branches. Defaults to 4. - feat_channels (int): Number of hidden channels. \ - Defaults to 256. - approx_anchor_generator (dict): Config dict for approx generator. - square_anchor_generator (dict): Config dict for square generator. - conv_cfg (dict): Config dict for ConvModule. Defaults to None. - norm_cfg (dict): Config dict for Norm Layer. Defaults to None. - bbox_coder (dict): Config dict for bbox coder. - reg_decoded_bbox (bool): If true, the regression loss would be - applied directly on decoded bounding boxes, converting both - the predicted boxes and regression targets to absolute - coordinates format. Default False. It should be `True` when - using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. - train_cfg (dict): Training config of SABLRetinaHead. - test_cfg (dict): Testing config of SABLRetinaHead. - loss_cls (dict): Config of classification loss. - loss_bbox_cls (dict): Config of classification loss for bbox branch. - loss_bbox_reg (dict): Config of regression loss for bbox branch. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - conv_cfg=None, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', - num_buckets=14, - scale_factor=3.0), - reg_decoded_bbox=False, - train_cfg=None, - test_cfg=None, - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.5), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5), - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='retina_cls', - std=0.01, - bias_prob=0.01))): - super(SABLRetinaHead, self).__init__(init_cfg) - self.in_channels = in_channels - self.num_classes = num_classes - self.feat_channels = feat_channels - self.num_buckets = bbox_coder['num_buckets'] - self.side_num = int(np.ceil(self.num_buckets / 2)) - - assert (approx_anchor_generator['octave_base_scale'] == - square_anchor_generator['scales'][0]) - assert (approx_anchor_generator['strides'] == - square_anchor_generator['strides']) - - self.approx_anchor_generator = build_prior_generator( - approx_anchor_generator) - self.square_anchor_generator = build_prior_generator( - square_anchor_generator) - self.approxs_per_octave = ( - self.approx_anchor_generator.num_base_priors[0]) - - # one anchor per location - self.num_base_priors = self.square_anchor_generator.num_base_priors[0] - - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.reg_decoded_bbox = reg_decoded_bbox - - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - self.sampling = loss_cls['type'] not in [ - 'FocalLoss', 'GHMC', 'QualityFocalLoss' - ] - if self.use_sigmoid_cls: - self.cls_out_channels = num_classes - else: - self.cls_out_channels = num_classes + 1 - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.loss_cls = build_loss(loss_cls) - self.loss_bbox_cls = build_loss(loss_bbox_cls) - self.loss_bbox_reg = build_loss(loss_bbox_reg) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # use PseudoSampler when sampling is False - if self.sampling and hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - self.fp16_enabled = False - self._init_layers() - - @property - def num_anchors(self): - warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' - 'please use "num_base_priors" instead') - return self.square_anchor_generator.num_base_priors[0] - - def _init_layers(self): - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.retina_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.retina_bbox_reg = nn.Conv2d( - self.feat_channels, self.side_num * 4, 3, padding=1) - self.retina_bbox_cls = nn.Conv2d( - self.feat_channels, self.side_num * 4, 3, padding=1) - - def forward_single(self, x): - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.retina_cls(cls_feat) - bbox_cls_pred = self.retina_bbox_cls(reg_feat) - bbox_reg_pred = self.retina_bbox_reg(reg_feat) - bbox_pred = (bbox_cls_pred, bbox_reg_pred) - return cls_score, bbox_pred - - def forward(self, feats): - return multi_apply(self.forward_single, feats) - - def get_anchors(self, featmap_sizes, img_metas, device='cuda'): - """Get squares according to feature map sizes and guided anchors. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - img_metas (list[dict]): Image meta info. - device (torch.device | str): device for returned tensors - - Returns: - tuple: square approxs of each image - """ - num_imgs = len(img_metas) - - # since feature map sizes of all images are the same, we only compute - # squares for one time - multi_level_squares = self.square_anchor_generator.grid_priors( - featmap_sizes, device=device) - squares_list = [multi_level_squares for _ in range(num_imgs)] - - return squares_list - - def get_target(self, - approx_list, - inside_flag_list, - square_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=None, - sampling=True, - unmap_outputs=True): - """Compute bucketing targets. - Args: - approx_list (list[list]): Multi level approxs of each image. - inside_flag_list (list[list]): Multi level inside flags of each - image. - square_list (list[list]): Multi level squares of each image. - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. - gt_bboxes_list (list[Tensor]): Gt bboxes of each image. - label_channels (int): Channel of label. - sampling (bool): Sample Anchors or not. - unmap_outputs (bool): unmap outputs or not. - - Returns: - tuple: Returns a tuple containing learning targets. - - - labels_list (list[Tensor]): Labels of each level. - - label_weights_list (list[Tensor]): Label weights of each \ - level. - - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ - each level. - - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ - each level. - - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ - each level. - - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ - each level. - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - """ - num_imgs = len(img_metas) - assert len(approx_list) == len(inside_flag_list) == len( - square_list) == num_imgs - # anchor number of multi levels - num_level_squares = [squares.size(0) for squares in square_list[0]] - # concat all level anchors and flags to a single tensor - inside_flag_flat_list = [] - approx_flat_list = [] - square_flat_list = [] - for i in range(num_imgs): - assert len(square_list[i]) == len(inside_flag_list[i]) - inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) - approx_flat_list.append(torch.cat(approx_list[i])) - square_flat_list.append(torch.cat(square_list[i])) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - (all_labels, all_label_weights, all_bbox_cls_targets, - all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, - pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, - approx_flat_list, - inside_flag_flat_list, - square_flat_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - sampling=sampling, - unmap_outputs=unmap_outputs) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - labels_list = images_to_levels(all_labels, num_level_squares) - label_weights_list = images_to_levels(all_label_weights, - num_level_squares) - bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, - num_level_squares) - bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, - num_level_squares) - bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, - num_level_squares) - bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, - num_level_squares) - return (labels_list, label_weights_list, bbox_cls_targets_list, - bbox_cls_weights_list, bbox_reg_targets_list, - bbox_reg_weights_list, num_total_pos, num_total_neg) - - def _get_target_single(self, - flat_approxs, - inside_flags, - flat_squares, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=None, - sampling=True, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - Args: - flat_approxs (Tensor): flat approxs of a single image, - shape (n, 4) - inside_flags (Tensor): inside flags of a single image, - shape (n, ). - flat_squares (Tensor): flat squares of a single image, - shape (approxs_per_octave * n, 4) - gt_bboxes (Tensor): Ground truth bboxes of a single image, \ - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - img_meta (dict): Meta info of the image. - label_channels (int): Channel of label. - sampling (bool): Sample Anchors or not. - unmap_outputs (bool): unmap outputs or not. - - Returns: - tuple: - - - labels_list (Tensor): Labels in a single image - - label_weights (Tensor): Label weights in a single image - - bbox_cls_targets (Tensor): BBox cls targets in a single image - - bbox_cls_weights (Tensor): BBox cls weights in a single image - - bbox_reg_targets (Tensor): BBox reg targets in a single image - - bbox_reg_weights (Tensor): BBox reg weights in a single image - - num_total_pos (int): Number of positive samples \ - in a single image - - num_total_neg (int): Number of negative samples \ - in a single image - """ - if not inside_flags.any(): - return (None, ) * 8 - # assign gt and sample anchors - expand_inside_flags = inside_flags[:, None].expand( - -1, self.approxs_per_octave).reshape(-1) - approxs = flat_approxs[expand_inside_flags, :] - squares = flat_squares[inside_flags, :] - - assign_result = self.assigner.assign(approxs, squares, - self.approxs_per_octave, - gt_bboxes, gt_bboxes_ignore) - sampling_result = self.sampler.sample(assign_result, squares, - gt_bboxes) - - num_valid_squares = squares.shape[0] - bbox_cls_targets = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_cls_weights = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_reg_targets = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_reg_weights = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - labels = squares.new_full((num_valid_squares, ), - self.num_classes, - dtype=torch.long) - label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, - pos_bbox_cls_weights) = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - - bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets - bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets - bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights - bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_squares.size(0) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, - inside_flags) - bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, - inside_flags) - bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, - inside_flags) - bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, - inside_flags) - return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, - bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds) - - def loss_single(self, cls_score, bbox_pred, labels, label_weights, - bbox_cls_targets, bbox_cls_weights, bbox_reg_targets, - bbox_reg_weights, num_total_samples): - # classification loss - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - # regression loss - bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) - bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) - bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) - bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) - (bbox_cls_pred, bbox_reg_pred) = bbox_pred - bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( - -1, self.side_num * 4) - bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( - -1, self.side_num * 4) - loss_bbox_cls = self.loss_bbox_cls( - bbox_cls_pred, - bbox_cls_targets.long(), - bbox_cls_weights, - avg_factor=num_total_samples * 4 * self.side_num) - loss_bbox_reg = self.loss_bbox_reg( - bbox_reg_pred, - bbox_reg_targets, - bbox_reg_weights, - avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk) - return loss_cls, loss_bbox_cls, loss_bbox_reg - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.approx_anchor_generator.num_levels - - device = cls_scores[0].device - - # get sampled approxes - approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( - self, featmap_sizes, img_metas, device=device) - - square_list = self.get_anchors(featmap_sizes, img_metas, device=device) - - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - cls_reg_targets = self.get_target( - approxs_list, - inside_flag_list, - square_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - sampling=self.sampling) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_cls_targets_list, - bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - labels_list, - label_weights_list, - bbox_cls_targets_list, - bbox_cls_weights_list, - bbox_reg_targets_list, - bbox_reg_weights_list, - num_total_samples=num_total_samples) - return dict( - loss_cls=losses_cls, - loss_bbox_cls=losses_bbox_cls, - loss_bbox_reg=losses_bbox_reg) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - img_metas, - cfg=None, - rescale=False): - assert len(cls_scores) == len(bbox_preds) - num_levels = len(cls_scores) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - - device = cls_scores[0].device - mlvl_anchors = self.get_anchors( - featmap_sizes, img_metas, device=device) - result_list = [] - for img_id in range(len(img_metas)): - cls_score_list = [ - cls_scores[i][img_id].detach() for i in range(num_levels) - ] - bbox_cls_pred_list = [ - bbox_preds[i][0][img_id].detach() for i in range(num_levels) - ] - bbox_reg_pred_list = [ - bbox_preds[i][1][img_id].detach() for i in range(num_levels) - ] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self._get_bboxes_single( - cls_score_list, bbox_cls_pred_list, bbox_reg_pred_list, - mlvl_anchors[img_id], img_shape, scale_factor, cfg, rescale) - result_list.append(proposals) - return result_list - - def _get_bboxes_single(self, - cls_scores, - bbox_cls_preds, - bbox_reg_preds, - mlvl_anchors, - img_shape, - scale_factor, - cfg, - rescale=False): - cfg = self.test_cfg if cfg is None else cfg - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_confids = [] - mlvl_labels = [] - assert len(cls_scores) == len(bbox_cls_preds) == len( - bbox_reg_preds) == len(mlvl_anchors) - for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( - cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): - assert cls_score.size()[-2:] == bbox_cls_pred.size( - )[-2:] == bbox_reg_pred.size()[-2::] - cls_score = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - scores = cls_score.softmax(-1)[:, :-1] - bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( - -1, self.side_num * 4) - bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( - -1, self.side_num * 4) - - # After https://github.com/open-mmlab/mmdetection/pull/6268/, - # this operation keeps fewer bboxes under the same `nms_pre`. - # There is no difference in performance for most models. If you - # find a slight drop in performance, you can set a larger - # `nms_pre` than before. - results = filter_scores_and_topk( - scores, cfg.score_thr, nms_pre, - dict( - anchors=anchors, - bbox_cls_pred=bbox_cls_pred, - bbox_reg_pred=bbox_reg_pred)) - scores, labels, _, filtered_results = results - - anchors = filtered_results['anchors'] - bbox_cls_pred = filtered_results['bbox_cls_pred'] - bbox_reg_pred = filtered_results['bbox_reg_pred'] - - bbox_preds = [ - bbox_cls_pred.contiguous(), - bbox_reg_pred.contiguous() - ] - bboxes, confids = self.bbox_coder.decode( - anchors.contiguous(), bbox_preds, max_shape=img_shape) - - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_confids.append(confids) - mlvl_labels.append(labels) - return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, - scale_factor, cfg, rescale, True, - mlvl_confids) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/solo_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/solo_head.py deleted file mode 100644 index e89aacb420af4f5df11183e656e04c87f3dc8fe4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/solo_head.py +++ /dev/null @@ -1,1197 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule - -from mmdet.core import InstanceData, mask_matrix_nms, multi_apply -from mmdet.core.utils import center_of_mass, generate_coordinate -from mmdet.models.builder import HEADS, build_loss -from mmdet.utils.misc import floordiv -from .base_mask_head import BaseMaskHead - - -@HEADS.register_module() -class SOLOHead(BaseMaskHead): - """SOLO mask head used in `SOLO: Segmenting Objects by Locations. - - `_ - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels. Used in child classes. - Default: 256. - stacked_convs (int): Number of stacking convs of the head. - Default: 4. - strides (tuple): Downsample factor of each feature map. - scale_ranges (tuple[tuple[int, int]]): Area range of multiple - level masks, in the format [(min1, max1), (min2, max2), ...]. - A range of (16, 64) means the area range between (16, 64). - pos_scale (float): Constant scale factor to control the center region. - num_grids (list[int]): Divided image into a uniform grids, each - feature map has a different grid value. The number of output - channels is grid ** 2. Default: [40, 36, 24, 16, 12]. - cls_down_index (int): The index of downsample operation in - classification branch. Default: 0. - loss_mask (dict): Config of mask loss. - loss_cls (dict): Config of classification loss. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: norm_cfg=dict(type='GN', num_groups=32, - requires_grad=True). - train_cfg (dict): Training config of head. - test_cfg (dict): Testing config of head. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__( - self, - num_classes, - in_channels, - feat_channels=256, - stacked_convs=4, - strides=(4, 8, 16, 32, 64), - scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), - pos_scale=0.2, - num_grids=[40, 36, 24, 16, 12], - cls_down_index=0, - loss_mask=None, - loss_cls=None, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - train_cfg=None, - test_cfg=None, - init_cfg=[ - dict(type='Normal', layer='Conv2d', std=0.01), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_mask_list')), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_cls')) - ], - ): - super(SOLOHead, self).__init__(init_cfg) - self.num_classes = num_classes - self.cls_out_channels = self.num_classes - self.in_channels = in_channels - self.feat_channels = feat_channels - self.stacked_convs = stacked_convs - self.strides = strides - self.num_grids = num_grids - # number of FPN feats - self.num_levels = len(strides) - assert self.num_levels == len(scale_ranges) == len(num_grids) - self.scale_ranges = scale_ranges - self.pos_scale = pos_scale - - self.cls_down_index = cls_down_index - self.loss_cls = build_loss(loss_cls) - self.loss_mask = build_loss(loss_mask) - self.norm_cfg = norm_cfg - self.init_cfg = init_cfg - self.train_cfg = train_cfg - self.test_cfg = test_cfg - self._init_layers() - - def _init_layers(self): - self.mask_convs = nn.ModuleList() - self.cls_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels + 2 if i == 0 else self.feat_channels - self.mask_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - norm_cfg=self.norm_cfg)) - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - norm_cfg=self.norm_cfg)) - self.conv_mask_list = nn.ModuleList() - for num_grid in self.num_grids: - self.conv_mask_list.append( - nn.Conv2d(self.feat_channels, num_grid**2, 1)) - - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - - def resize_feats(self, feats): - """Downsample the first feat and upsample last feat in feats.""" - out = [] - for i in range(len(feats)): - if i == 0: - out.append( - F.interpolate( - feats[0], - size=feats[i + 1].shape[-2:], - mode='bilinear', - align_corners=False)) - elif i == len(feats) - 1: - out.append( - F.interpolate( - feats[i], - size=feats[i - 1].shape[-2:], - mode='bilinear', - align_corners=False)) - else: - out.append(feats[i]) - return out - - def forward(self, feats): - assert len(feats) == self.num_levels - feats = self.resize_feats(feats) - mlvl_mask_preds = [] - mlvl_cls_preds = [] - for i in range(self.num_levels): - x = feats[i] - mask_feat = x - cls_feat = x - # generate and concat the coordinate - coord_feat = generate_coordinate(mask_feat.size(), - mask_feat.device) - mask_feat = torch.cat([mask_feat, coord_feat], 1) - - for mask_layer in (self.mask_convs): - mask_feat = mask_layer(mask_feat) - - mask_feat = F.interpolate( - mask_feat, scale_factor=2, mode='bilinear') - mask_pred = self.conv_mask_list[i](mask_feat) - - # cls branch - for j, cls_layer in enumerate(self.cls_convs): - if j == self.cls_down_index: - num_grid = self.num_grids[i] - cls_feat = F.interpolate( - cls_feat, size=num_grid, mode='bilinear') - cls_feat = cls_layer(cls_feat) - - cls_pred = self.conv_cls(cls_feat) - - if not self.training: - feat_wh = feats[0].size()[-2:] - upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) - mask_pred = F.interpolate( - mask_pred.sigmoid(), size=upsampled_size, mode='bilinear') - cls_pred = cls_pred.sigmoid() - # get local maximum - local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) - keep_mask = local_max[:, :, :-1, :-1] == cls_pred - cls_pred = cls_pred * keep_mask - - mlvl_mask_preds.append(mask_pred) - mlvl_cls_preds.append(cls_pred) - return mlvl_mask_preds, mlvl_cls_preds - - def loss(self, - mlvl_mask_preds, - mlvl_cls_preds, - gt_labels, - gt_masks, - img_metas, - gt_bboxes=None, - **kwargs): - """Calculate the loss of total batch. - - Args: - mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. - Each element in the list has shape - (batch_size, num_grids**2 ,h ,w). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element - in the list has shape - (batch_size, num_classes, num_grids ,num_grids). - gt_labels (list[Tensor]): Labels of multiple images. - gt_masks (list[Tensor]): Ground truth masks of multiple images. - Each has shape (num_instances, h, w). - img_metas (list[dict]): Meta information of multiple images. - gt_bboxes (list[Tensor]): Ground truth bboxes of multiple - images. Default: None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - num_levels = self.num_levels - num_imgs = len(gt_labels) - - featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds] - - # `BoolTensor` in `pos_masks` represent - # whether the corresponding point is - # positive - pos_mask_targets, labels, pos_masks = multi_apply( - self._get_targets_single, - gt_bboxes, - gt_labels, - gt_masks, - featmap_sizes=featmap_sizes) - - # change from the outside list meaning multi images - # to the outside list meaning multi levels - mlvl_pos_mask_targets = [[] for _ in range(num_levels)] - mlvl_pos_mask_preds = [[] for _ in range(num_levels)] - mlvl_pos_masks = [[] for _ in range(num_levels)] - mlvl_labels = [[] for _ in range(num_levels)] - for img_id in range(num_imgs): - assert num_levels == len(pos_mask_targets[img_id]) - for lvl in range(num_levels): - mlvl_pos_mask_targets[lvl].append( - pos_mask_targets[img_id][lvl]) - mlvl_pos_mask_preds[lvl].append( - mlvl_mask_preds[lvl][img_id, pos_masks[img_id][lvl], ...]) - mlvl_pos_masks[lvl].append(pos_masks[img_id][lvl].flatten()) - mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) - - # cat multiple image - temp_mlvl_cls_preds = [] - for lvl in range(num_levels): - mlvl_pos_mask_targets[lvl] = torch.cat( - mlvl_pos_mask_targets[lvl], dim=0) - mlvl_pos_mask_preds[lvl] = torch.cat( - mlvl_pos_mask_preds[lvl], dim=0) - mlvl_pos_masks[lvl] = torch.cat(mlvl_pos_masks[lvl], dim=0) - mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) - temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( - 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) - - num_pos = sum(item.sum() for item in mlvl_pos_masks) - # dice loss - loss_mask = [] - for pred, target in zip(mlvl_pos_mask_preds, mlvl_pos_mask_targets): - if pred.size()[0] == 0: - loss_mask.append(pred.sum().unsqueeze(0)) - continue - loss_mask.append( - self.loss_mask(pred, target, reduction_override='none')) - if num_pos > 0: - loss_mask = torch.cat(loss_mask).sum() / num_pos - else: - loss_mask = torch.cat(loss_mask).mean() - - flatten_labels = torch.cat(mlvl_labels) - flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) - loss_cls = self.loss_cls( - flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) - return dict(loss_mask=loss_mask, loss_cls=loss_cls) - - def _get_targets_single(self, - gt_bboxes, - gt_labels, - gt_masks, - featmap_sizes=None): - """Compute targets for predictions of single image. - - Args: - gt_bboxes (Tensor): Ground truth bbox of each instance, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth label of each instance, - shape (num_gts,). - gt_masks (Tensor): Ground truth mask of each instance, - shape (num_gts, h, w). - featmap_sizes (list[:obj:`torch.size`]): Size of each - feature map from feature pyramid, each element - means (feat_h, feat_w). Default: None. - - Returns: - Tuple: Usually returns a tuple containing targets for predictions. - - - mlvl_pos_mask_targets (list[Tensor]): Each element represent - the binary mask targets for positive points in this - level, has shape (num_pos, out_h, out_w). - - mlvl_labels (list[Tensor]): Each element is - classification labels for all - points in this level, has shape - (num_grid, num_grid). - - mlvl_pos_masks (list[Tensor]): Each element is - a `BoolTensor` to represent whether the - corresponding point in single level - is positive, has shape (num_grid **2). - """ - device = gt_labels.device - gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * - (gt_bboxes[:, 3] - gt_bboxes[:, 1])) - - mlvl_pos_mask_targets = [] - mlvl_labels = [] - mlvl_pos_masks = [] - for (lower_bound, upper_bound), stride, featmap_size, num_grid \ - in zip(self.scale_ranges, self.strides, - featmap_sizes, self.num_grids): - - mask_target = torch.zeros( - [num_grid**2, featmap_size[0], featmap_size[1]], - dtype=torch.uint8, - device=device) - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - labels = torch.zeros([num_grid, num_grid], - dtype=torch.int64, - device=device) + self.num_classes - pos_mask = torch.zeros([num_grid**2], - dtype=torch.bool, - device=device) - - gt_inds = ((gt_areas >= lower_bound) & - (gt_areas <= upper_bound)).nonzero().flatten() - if len(gt_inds) == 0: - mlvl_pos_mask_targets.append( - mask_target.new_zeros(0, featmap_size[0], featmap_size[1])) - mlvl_labels.append(labels) - mlvl_pos_masks.append(pos_mask) - continue - hit_gt_bboxes = gt_bboxes[gt_inds] - hit_gt_labels = gt_labels[gt_inds] - hit_gt_masks = gt_masks[gt_inds, ...] - - pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - - hit_gt_bboxes[:, 0]) * self.pos_scale - pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - - hit_gt_bboxes[:, 1]) * self.pos_scale - - # Make sure hit_gt_masks has a value - valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 - output_stride = stride / 2 - - for gt_mask, gt_label, pos_h_range, pos_w_range, \ - valid_mask_flag in \ - zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, - pos_w_ranges, valid_mask_flags): - if not valid_mask_flag: - continue - upsampled_size = (featmap_sizes[0][0] * 4, - featmap_sizes[0][1] * 4) - center_h, center_w = center_of_mass(gt_mask) - - coord_w = int( - floordiv((center_w / upsampled_size[1]), (1. / num_grid), - rounding_mode='trunc')) - coord_h = int( - floordiv((center_h / upsampled_size[0]), (1. / num_grid), - rounding_mode='trunc')) - - # left, top, right, down - top_box = max( - 0, - int( - floordiv( - (center_h - pos_h_range) / upsampled_size[0], - (1. / num_grid), - rounding_mode='trunc'))) - down_box = min( - num_grid - 1, - int( - floordiv( - (center_h + pos_h_range) / upsampled_size[0], - (1. / num_grid), - rounding_mode='trunc'))) - left_box = max( - 0, - int( - floordiv( - (center_w - pos_w_range) / upsampled_size[1], - (1. / num_grid), - rounding_mode='trunc'))) - right_box = min( - num_grid - 1, - int( - floordiv( - (center_w + pos_w_range) / upsampled_size[1], - (1. / num_grid), - rounding_mode='trunc'))) - - top = max(top_box, coord_h - 1) - down = min(down_box, coord_h + 1) - left = max(coord_w - 1, left_box) - right = min(right_box, coord_w + 1) - - labels[top:(down + 1), left:(right + 1)] = gt_label - # ins - gt_mask = np.uint8(gt_mask.cpu().numpy()) - # Follow the original implementation, F.interpolate is - # different from cv2 and opencv - gt_mask = mmcv.imrescale(gt_mask, scale=1. / output_stride) - gt_mask = torch.from_numpy(gt_mask).to(device=device) - - for i in range(top, down + 1): - for j in range(left, right + 1): - index = int(i * num_grid + j) - mask_target[index, :gt_mask.shape[0], :gt_mask. - shape[1]] = gt_mask - pos_mask[index] = True - mlvl_pos_mask_targets.append(mask_target[pos_mask]) - mlvl_labels.append(labels) - mlvl_pos_masks.append(pos_mask) - return mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks - - def get_results(self, mlvl_mask_preds, mlvl_cls_scores, img_metas, - **kwargs): - """Get multi-image mask results. - - Args: - mlvl_mask_preds (list[Tensor]): Multi-level mask prediction. - Each element in the list has shape - (batch_size, num_grids**2 ,h ,w). - mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element - in the list has shape - (batch_size, num_classes, num_grids ,num_grids). - img_metas (list[dict]): Meta information of all images. - - Returns: - list[:obj:`InstanceData`]: Processed results of multiple - images.Each :obj:`InstanceData` usually contains - following keys. - - - scores (Tensor): Classification scores, has shape - (num_instance,). - - labels (Tensor): Has shape (num_instances,). - - masks (Tensor): Processed mask results, has - shape (num_instances, h, w). - """ - mlvl_cls_scores = [ - item.permute(0, 2, 3, 1) for item in mlvl_cls_scores - ] - assert len(mlvl_mask_preds) == len(mlvl_cls_scores) - num_levels = len(mlvl_cls_scores) - - results_list = [] - for img_id in range(len(img_metas)): - cls_pred_list = [ - mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) - for lvl in range(num_levels) - ] - mask_pred_list = [ - mlvl_mask_preds[lvl][img_id] for lvl in range(num_levels) - ] - - cls_pred_list = torch.cat(cls_pred_list, dim=0) - mask_pred_list = torch.cat(mask_pred_list, dim=0) - - results = self._get_results_single( - cls_pred_list, mask_pred_list, img_meta=img_metas[img_id]) - results_list.append(results) - - return results_list - - def _get_results_single(self, cls_scores, mask_preds, img_meta, cfg=None): - """Get processed mask related results of single image. - - Args: - cls_scores (Tensor): Classification score of all points - in single image, has shape (num_points, num_classes). - mask_preds (Tensor): Mask prediction of all points in - single image, has shape (num_points, feat_h, feat_w). - img_meta (dict): Meta information of corresponding image. - cfg (dict, optional): Config used in test phase. - Default: None. - - Returns: - :obj:`InstanceData`: Processed results of single image. - it usually contains following keys. - - - scores (Tensor): Classification scores, has shape - (num_instance,). - - labels (Tensor): Has shape (num_instances,). - - masks (Tensor): Processed mask results, has - shape (num_instances, h, w). - """ - - def empty_results(results, cls_scores): - """Generate a empty results.""" - results.scores = cls_scores.new_ones(0) - results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) - results.labels = cls_scores.new_ones(0) - return results - - cfg = self.test_cfg if cfg is None else cfg - assert len(cls_scores) == len(mask_preds) - results = InstanceData(img_meta) - - featmap_size = mask_preds.size()[-2:] - - img_shape = results.img_shape - ori_shape = results.ori_shape - - h, w, _ = img_shape - upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) - - score_mask = (cls_scores > cfg.score_thr) - cls_scores = cls_scores[score_mask] - if len(cls_scores) == 0: - return empty_results(results, cls_scores) - - inds = score_mask.nonzero() - cls_labels = inds[:, 1] - - # Filter the mask mask with an area is smaller than - # stride of corresponding feature level - lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) - strides = cls_scores.new_ones(lvl_interval[-1]) - strides[:lvl_interval[0]] *= self.strides[0] - for lvl in range(1, self.num_levels): - strides[lvl_interval[lvl - - 1]:lvl_interval[lvl]] *= self.strides[lvl] - strides = strides[inds[:, 0]] - mask_preds = mask_preds[inds[:, 0]] - - masks = mask_preds > cfg.mask_thr - sum_masks = masks.sum((1, 2)).float() - keep = sum_masks > strides - if keep.sum() == 0: - return empty_results(results, cls_scores) - masks = masks[keep] - mask_preds = mask_preds[keep] - sum_masks = sum_masks[keep] - cls_scores = cls_scores[keep] - cls_labels = cls_labels[keep] - - # maskness. - mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks - cls_scores *= mask_scores - - scores, labels, _, keep_inds = mask_matrix_nms( - masks, - cls_labels, - cls_scores, - mask_area=sum_masks, - nms_pre=cfg.nms_pre, - max_num=cfg.max_per_img, - kernel=cfg.kernel, - sigma=cfg.sigma, - filter_thr=cfg.filter_thr) - mask_preds = mask_preds[keep_inds] - mask_preds = F.interpolate( - mask_preds.unsqueeze(0), size=upsampled_size, - mode='bilinear')[:, :, :h, :w] - mask_preds = F.interpolate( - mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0) - masks = mask_preds > cfg.mask_thr - - results.masks = masks - results.labels = labels - results.scores = scores - - return results - - -@HEADS.register_module() -class DecoupledSOLOHead(SOLOHead): - """Decoupled SOLO mask head used in `SOLO: Segmenting Objects by Locations. - - `_ - - Args: - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - *args, - init_cfg=[ - dict(type='Normal', layer='Conv2d', std=0.01), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_mask_list_x')), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_mask_list_y')), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_cls')) - ], - **kwargs): - super(DecoupledSOLOHead, self).__init__( - *args, init_cfg=init_cfg, **kwargs) - - def _init_layers(self): - self.mask_convs_x = nn.ModuleList() - self.mask_convs_y = nn.ModuleList() - self.cls_convs = nn.ModuleList() - - for i in range(self.stacked_convs): - chn = self.in_channels + 1 if i == 0 else self.feat_channels - self.mask_convs_x.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - norm_cfg=self.norm_cfg)) - self.mask_convs_y.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - norm_cfg=self.norm_cfg)) - - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - norm_cfg=self.norm_cfg)) - - self.conv_mask_list_x = nn.ModuleList() - self.conv_mask_list_y = nn.ModuleList() - for num_grid in self.num_grids: - self.conv_mask_list_x.append( - nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) - self.conv_mask_list_y.append( - nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - - def forward(self, feats): - assert len(feats) == self.num_levels - feats = self.resize_feats(feats) - mask_preds_x = [] - mask_preds_y = [] - cls_preds = [] - for i in range(self.num_levels): - x = feats[i] - mask_feat = x - cls_feat = x - # generate and concat the coordinate - coord_feat = generate_coordinate(mask_feat.size(), - mask_feat.device) - mask_feat_x = torch.cat([mask_feat, coord_feat[:, 0:1, ...]], 1) - mask_feat_y = torch.cat([mask_feat, coord_feat[:, 1:2, ...]], 1) - - for mask_layer_x, mask_layer_y in \ - zip(self.mask_convs_x, self.mask_convs_y): - mask_feat_x = mask_layer_x(mask_feat_x) - mask_feat_y = mask_layer_y(mask_feat_y) - - mask_feat_x = F.interpolate( - mask_feat_x, scale_factor=2, mode='bilinear') - mask_feat_y = F.interpolate( - mask_feat_y, scale_factor=2, mode='bilinear') - - mask_pred_x = self.conv_mask_list_x[i](mask_feat_x) - mask_pred_y = self.conv_mask_list_y[i](mask_feat_y) - - # cls branch - for j, cls_layer in enumerate(self.cls_convs): - if j == self.cls_down_index: - num_grid = self.num_grids[i] - cls_feat = F.interpolate( - cls_feat, size=num_grid, mode='bilinear') - cls_feat = cls_layer(cls_feat) - - cls_pred = self.conv_cls(cls_feat) - - if not self.training: - feat_wh = feats[0].size()[-2:] - upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) - mask_pred_x = F.interpolate( - mask_pred_x.sigmoid(), - size=upsampled_size, - mode='bilinear') - mask_pred_y = F.interpolate( - mask_pred_y.sigmoid(), - size=upsampled_size, - mode='bilinear') - cls_pred = cls_pred.sigmoid() - # get local maximum - local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) - keep_mask = local_max[:, :, :-1, :-1] == cls_pred - cls_pred = cls_pred * keep_mask - - mask_preds_x.append(mask_pred_x) - mask_preds_y.append(mask_pred_y) - cls_preds.append(cls_pred) - return mask_preds_x, mask_preds_y, cls_preds - - def loss(self, - mlvl_mask_preds_x, - mlvl_mask_preds_y, - mlvl_cls_preds, - gt_labels, - gt_masks, - img_metas, - gt_bboxes=None, - **kwargs): - """Calculate the loss of total batch. - - Args: - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction - from x branch. Each element in the list has shape - (batch_size, num_grids ,h ,w). - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction - from y branch. Each element in the list has shape - (batch_size, num_grids ,h ,w). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element - in the list has shape - (batch_size, num_classes, num_grids ,num_grids). - gt_labels (list[Tensor]): Labels of multiple images. - gt_masks (list[Tensor]): Ground truth masks of multiple images. - Each has shape (num_instances, h, w). - img_metas (list[dict]): Meta information of multiple images. - gt_bboxes (list[Tensor]): Ground truth bboxes of multiple - images. Default: None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - num_levels = self.num_levels - num_imgs = len(gt_labels) - featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds_x] - - pos_mask_targets, labels, \ - xy_pos_indexes = \ - multi_apply(self._get_targets_single, - gt_bboxes, - gt_labels, - gt_masks, - featmap_sizes=featmap_sizes) - - # change from the outside list meaning multi images - # to the outside list meaning multi levels - mlvl_pos_mask_targets = [[] for _ in range(num_levels)] - mlvl_pos_mask_preds_x = [[] for _ in range(num_levels)] - mlvl_pos_mask_preds_y = [[] for _ in range(num_levels)] - mlvl_labels = [[] for _ in range(num_levels)] - for img_id in range(num_imgs): - - for lvl in range(num_levels): - mlvl_pos_mask_targets[lvl].append( - pos_mask_targets[img_id][lvl]) - mlvl_pos_mask_preds_x[lvl].append( - mlvl_mask_preds_x[lvl][img_id, - xy_pos_indexes[img_id][lvl][:, 1]]) - mlvl_pos_mask_preds_y[lvl].append( - mlvl_mask_preds_y[lvl][img_id, - xy_pos_indexes[img_id][lvl][:, 0]]) - mlvl_labels[lvl].append(labels[img_id][lvl].flatten()) - - # cat multiple image - temp_mlvl_cls_preds = [] - for lvl in range(num_levels): - mlvl_pos_mask_targets[lvl] = torch.cat( - mlvl_pos_mask_targets[lvl], dim=0) - mlvl_pos_mask_preds_x[lvl] = torch.cat( - mlvl_pos_mask_preds_x[lvl], dim=0) - mlvl_pos_mask_preds_y[lvl] = torch.cat( - mlvl_pos_mask_preds_y[lvl], dim=0) - mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0) - temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute( - 0, 2, 3, 1).reshape(-1, self.cls_out_channels)) - - num_pos = 0. - # dice loss - loss_mask = [] - for pred_x, pred_y, target in \ - zip(mlvl_pos_mask_preds_x, - mlvl_pos_mask_preds_y, mlvl_pos_mask_targets): - num_masks = pred_x.size(0) - if num_masks == 0: - # make sure can get grad - loss_mask.append((pred_x.sum() + pred_y.sum()).unsqueeze(0)) - continue - num_pos += num_masks - pred_mask = pred_y.sigmoid() * pred_x.sigmoid() - loss_mask.append( - self.loss_mask(pred_mask, target, reduction_override='none')) - if num_pos > 0: - loss_mask = torch.cat(loss_mask).sum() / num_pos - else: - loss_mask = torch.cat(loss_mask).mean() - - # cate - flatten_labels = torch.cat(mlvl_labels) - flatten_cls_preds = torch.cat(temp_mlvl_cls_preds) - - loss_cls = self.loss_cls( - flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) - return dict(loss_mask=loss_mask, loss_cls=loss_cls) - - def _get_targets_single(self, - gt_bboxes, - gt_labels, - gt_masks, - featmap_sizes=None): - """Compute targets for predictions of single image. - - Args: - gt_bboxes (Tensor): Ground truth bbox of each instance, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth label of each instance, - shape (num_gts,). - gt_masks (Tensor): Ground truth mask of each instance, - shape (num_gts, h, w). - featmap_sizes (list[:obj:`torch.size`]): Size of each - feature map from feature pyramid, each element - means (feat_h, feat_w). Default: None. - - Returns: - Tuple: Usually returns a tuple containing targets for predictions. - - - mlvl_pos_mask_targets (list[Tensor]): Each element represent - the binary mask targets for positive points in this - level, has shape (num_pos, out_h, out_w). - - mlvl_labels (list[Tensor]): Each element is - classification labels for all - points in this level, has shape - (num_grid, num_grid). - - mlvl_xy_pos_indexes (list[Tensor]): Each element - in the list contains the index of positive samples in - corresponding level, has shape (num_pos, 2), last - dimension 2 present (index_x, index_y). - """ - mlvl_pos_mask_targets, mlvl_labels, \ - mlvl_pos_masks = \ - super()._get_targets_single(gt_bboxes, gt_labels, gt_masks, - featmap_sizes=featmap_sizes) - - mlvl_xy_pos_indexes = [(item - self.num_classes).nonzero() - for item in mlvl_labels] - - return mlvl_pos_mask_targets, mlvl_labels, mlvl_xy_pos_indexes - - def get_results(self, - mlvl_mask_preds_x, - mlvl_mask_preds_y, - mlvl_cls_scores, - img_metas, - rescale=None, - **kwargs): - """Get multi-image mask results. - - Args: - mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction - from x branch. Each element in the list has shape - (batch_size, num_grids ,h ,w). - mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction - from y branch. Each element in the list has shape - (batch_size, num_grids ,h ,w). - mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element - in the list has shape - (batch_size, num_classes ,num_grids ,num_grids). - img_metas (list[dict]): Meta information of all images. - - Returns: - list[:obj:`InstanceData`]: Processed results of multiple - images.Each :obj:`InstanceData` usually contains - following keys. - - - scores (Tensor): Classification scores, has shape - (num_instance,). - - labels (Tensor): Has shape (num_instances,). - - masks (Tensor): Processed mask results, has - shape (num_instances, h, w). - """ - mlvl_cls_scores = [ - item.permute(0, 2, 3, 1) for item in mlvl_cls_scores - ] - assert len(mlvl_mask_preds_x) == len(mlvl_cls_scores) - num_levels = len(mlvl_cls_scores) - - results_list = [] - for img_id in range(len(img_metas)): - cls_pred_list = [ - mlvl_cls_scores[i][img_id].view( - -1, self.cls_out_channels).detach() - for i in range(num_levels) - ] - mask_pred_list_x = [ - mlvl_mask_preds_x[i][img_id] for i in range(num_levels) - ] - mask_pred_list_y = [ - mlvl_mask_preds_y[i][img_id] for i in range(num_levels) - ] - - cls_pred_list = torch.cat(cls_pred_list, dim=0) - mask_pred_list_x = torch.cat(mask_pred_list_x, dim=0) - mask_pred_list_y = torch.cat(mask_pred_list_y, dim=0) - - results = self._get_results_single( - cls_pred_list, - mask_pred_list_x, - mask_pred_list_y, - img_meta=img_metas[img_id], - cfg=self.test_cfg) - results_list.append(results) - return results_list - - def _get_results_single(self, cls_scores, mask_preds_x, mask_preds_y, - img_meta, cfg): - """Get processed mask related results of single image. - - Args: - cls_scores (Tensor): Classification score of all points - in single image, has shape (num_points, num_classes). - mask_preds_x (Tensor): Mask prediction of x branch of - all points in single image, has shape - (sum_num_grids, feat_h, feat_w). - mask_preds_y (Tensor): Mask prediction of y branch of - all points in single image, has shape - (sum_num_grids, feat_h, feat_w). - img_meta (dict): Meta information of corresponding image. - cfg (dict): Config used in test phase. - - Returns: - :obj:`InstanceData`: Processed results of single image. - it usually contains following keys. - - - scores (Tensor): Classification scores, has shape - (num_instance,). - - labels (Tensor): Has shape (num_instances,). - - masks (Tensor): Processed mask results, has - shape (num_instances, h, w). - """ - - def empty_results(results, cls_scores): - """Generate a empty results.""" - results.scores = cls_scores.new_ones(0) - results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) - results.labels = cls_scores.new_ones(0) - return results - - cfg = self.test_cfg if cfg is None else cfg - - results = InstanceData(img_meta) - img_shape = results.img_shape - ori_shape = results.ori_shape - h, w, _ = img_shape - featmap_size = mask_preds_x.size()[-2:] - upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4) - - score_mask = (cls_scores > cfg.score_thr) - cls_scores = cls_scores[score_mask] - inds = score_mask.nonzero() - lvl_interval = inds.new_tensor(self.num_grids).pow(2).cumsum(0) - num_all_points = lvl_interval[-1] - lvl_start_index = inds.new_ones(num_all_points) - num_grids = inds.new_ones(num_all_points) - seg_size = inds.new_tensor(self.num_grids).cumsum(0) - mask_lvl_start_index = inds.new_ones(num_all_points) - strides = inds.new_ones(num_all_points) - - lvl_start_index[:lvl_interval[0]] *= 0 - mask_lvl_start_index[:lvl_interval[0]] *= 0 - num_grids[:lvl_interval[0]] *= self.num_grids[0] - strides[:lvl_interval[0]] *= self.strides[0] - - for lvl in range(1, self.num_levels): - lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ - lvl_interval[lvl - 1] - mask_lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ - seg_size[lvl - 1] - num_grids[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ - self.num_grids[lvl] - strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \ - self.strides[lvl] - - lvl_start_index = lvl_start_index[inds[:, 0]] - mask_lvl_start_index = mask_lvl_start_index[inds[:, 0]] - num_grids = num_grids[inds[:, 0]] - strides = strides[inds[:, 0]] - - y_lvl_offset = (inds[:, 0] - lvl_start_index) // num_grids - x_lvl_offset = (inds[:, 0] - lvl_start_index) % num_grids - y_inds = mask_lvl_start_index + y_lvl_offset - x_inds = mask_lvl_start_index + x_lvl_offset - - cls_labels = inds[:, 1] - mask_preds = mask_preds_x[x_inds, ...] * mask_preds_y[y_inds, ...] - - masks = mask_preds > cfg.mask_thr - sum_masks = masks.sum((1, 2)).float() - keep = sum_masks > strides - if keep.sum() == 0: - return empty_results(results, cls_scores) - - masks = masks[keep] - mask_preds = mask_preds[keep] - sum_masks = sum_masks[keep] - cls_scores = cls_scores[keep] - cls_labels = cls_labels[keep] - - # maskness. - mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks - cls_scores *= mask_scores - - scores, labels, _, keep_inds = mask_matrix_nms( - masks, - cls_labels, - cls_scores, - mask_area=sum_masks, - nms_pre=cfg.nms_pre, - max_num=cfg.max_per_img, - kernel=cfg.kernel, - sigma=cfg.sigma, - filter_thr=cfg.filter_thr) - mask_preds = mask_preds[keep_inds] - mask_preds = F.interpolate( - mask_preds.unsqueeze(0), size=upsampled_size, - mode='bilinear')[:, :, :h, :w] - mask_preds = F.interpolate( - mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0) - masks = mask_preds > cfg.mask_thr - - results.masks = masks - results.labels = labels - results.scores = scores - - return results - - -@HEADS.register_module() -class DecoupledSOLOLightHead(DecoupledSOLOHead): - """Decoupled Light SOLO mask head used in `SOLO: Segmenting Objects by - Locations `_ - - Args: - with_dcn (bool): Whether use dcn in mask_convs and cls_convs, - default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - *args, - dcn_cfg=None, - init_cfg=[ - dict(type='Normal', layer='Conv2d', std=0.01), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_mask_list_x')), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_mask_list_y')), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_cls')) - ], - **kwargs): - assert dcn_cfg is None or isinstance(dcn_cfg, dict) - self.dcn_cfg = dcn_cfg - super(DecoupledSOLOLightHead, self).__init__( - *args, init_cfg=init_cfg, **kwargs) - - def _init_layers(self): - self.mask_convs = nn.ModuleList() - self.cls_convs = nn.ModuleList() - - for i in range(self.stacked_convs): - if self.dcn_cfg is not None\ - and i == self.stacked_convs - 1: - conv_cfg = self.dcn_cfg - else: - conv_cfg = None - - chn = self.in_channels + 2 if i == 0 else self.feat_channels - self.mask_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg)) - - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg)) - - self.conv_mask_list_x = nn.ModuleList() - self.conv_mask_list_y = nn.ModuleList() - for num_grid in self.num_grids: - self.conv_mask_list_x.append( - nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) - self.conv_mask_list_y.append( - nn.Conv2d(self.feat_channels, num_grid, 3, padding=1)) - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - - def forward(self, feats): - assert len(feats) == self.num_levels - feats = self.resize_feats(feats) - mask_preds_x = [] - mask_preds_y = [] - cls_preds = [] - for i in range(self.num_levels): - x = feats[i] - mask_feat = x - cls_feat = x - # generate and concat the coordinate - coord_feat = generate_coordinate(mask_feat.size(), - mask_feat.device) - mask_feat = torch.cat([mask_feat, coord_feat], 1) - - for mask_layer in self.mask_convs: - mask_feat = mask_layer(mask_feat) - - mask_feat = F.interpolate( - mask_feat, scale_factor=2, mode='bilinear') - - mask_pred_x = self.conv_mask_list_x[i](mask_feat) - mask_pred_y = self.conv_mask_list_y[i](mask_feat) - - # cls branch - for j, cls_layer in enumerate(self.cls_convs): - if j == self.cls_down_index: - num_grid = self.num_grids[i] - cls_feat = F.interpolate( - cls_feat, size=num_grid, mode='bilinear') - cls_feat = cls_layer(cls_feat) - - cls_pred = self.conv_cls(cls_feat) - - if not self.training: - feat_wh = feats[0].size()[-2:] - upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2) - mask_pred_x = F.interpolate( - mask_pred_x.sigmoid(), - size=upsampled_size, - mode='bilinear') - mask_pred_y = F.interpolate( - mask_pred_y.sigmoid(), - size=upsampled_size, - mode='bilinear') - cls_pred = cls_pred.sigmoid() - # get local maximum - local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1) - keep_mask = local_max[:, :, :-1, :-1] == cls_pred - cls_pred = cls_pred * keep_mask - - mask_preds_x.append(mask_pred_x) - mask_preds_y.append(mask_pred_y) - cls_preds.append(cls_pred) - return mask_preds_x, mask_preds_y, cls_preds diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/solov2_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/solov2_head.py deleted file mode 100644 index 9edf99d8c26249ad9b659183de8e7cf1b6ef8534..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/solov2_head.py +++ /dev/null @@ -1,766 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import mmcv -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, auto_fp16, force_fp32 - -from mmdet.core import InstanceData, mask_matrix_nms, multi_apply -from mmdet.core.utils import center_of_mass, generate_coordinate -from mmdet.models.builder import HEADS -from mmdet.utils.misc import floordiv -from .solo_head import SOLOHead - - -class MaskFeatModule(BaseModule): - """SOLOv2 mask feature map branch used in `SOLOv2: Dynamic and Fast - Instance Segmentation. `_ - - Args: - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels of the mask feature - map branch. - start_level (int): The starting feature map level from RPN that - will be used to predict the mask feature map. - end_level (int): The ending feature map level from rpn that - will be used to predict the mask feature map. - out_channels (int): Number of output channels of the mask feature - map branch. This is the channel count of the mask - feature map that to be dynamically convolved with the predicted - kernel. - mask_stride (int): Downsample factor of the mask feature map output. - Default: 4. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - feat_channels, - start_level, - end_level, - out_channels, - mask_stride=4, - conv_cfg=None, - norm_cfg=None, - init_cfg=[dict(type='Normal', layer='Conv2d', std=0.01)]): - super().__init__(init_cfg=init_cfg) - - self.in_channels = in_channels - self.feat_channels = feat_channels - self.start_level = start_level - self.end_level = end_level - self.mask_stride = mask_stride - assert start_level >= 0 and end_level >= start_level - self.out_channels = out_channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self._init_layers() - self.fp16_enabled = False - - def _init_layers(self): - self.convs_all_levels = nn.ModuleList() - for i in range(self.start_level, self.end_level + 1): - convs_per_level = nn.Sequential() - if i == 0: - convs_per_level.add_module( - f'conv{i}', - ConvModule( - self.in_channels, - self.feat_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=False)) - self.convs_all_levels.append(convs_per_level) - continue - - for j in range(i): - if j == 0: - if i == self.end_level: - chn = self.in_channels + 2 - else: - chn = self.in_channels - convs_per_level.add_module( - f'conv{j}', - ConvModule( - chn, - self.feat_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=False)) - convs_per_level.add_module( - f'upsample{j}', - nn.Upsample( - scale_factor=2, - mode='bilinear', - align_corners=False)) - continue - - convs_per_level.add_module( - f'conv{j}', - ConvModule( - self.feat_channels, - self.feat_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=False)) - convs_per_level.add_module( - f'upsample{j}', - nn.Upsample( - scale_factor=2, mode='bilinear', align_corners=False)) - - self.convs_all_levels.append(convs_per_level) - - self.conv_pred = ConvModule( - self.feat_channels, - self.out_channels, - 1, - padding=0, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - - @auto_fp16() - def forward(self, feats): - inputs = feats[self.start_level:self.end_level + 1] - assert len(inputs) == (self.end_level - self.start_level + 1) - feature_add_all_level = self.convs_all_levels[0](inputs[0]) - for i in range(1, len(inputs)): - input_p = inputs[i] - if i == len(inputs) - 1: - coord_feat = generate_coordinate(input_p.size(), - input_p.device) - input_p = torch.cat([input_p, coord_feat], 1) - - # fix runtime error of "+=" inplace operation in PyTorch 1.10 - feature_add_all_level = feature_add_all_level + \ - self.convs_all_levels[i](input_p) - - feature_pred = self.conv_pred(feature_add_all_level) - return feature_pred - - -@HEADS.register_module() -class SOLOV2Head(SOLOHead): - """SOLOv2 mask head used in `SOLOv2: Dynamic and Fast Instance - Segmentation. `_ - - Args: - mask_feature_head (dict): Config of SOLOv2MaskFeatHead. - dynamic_conv_size (int): Dynamic Conv kernel size. Default: 1. - dcn_cfg (dict): Dcn conv configurations in kernel_convs and cls_conv. - default: None. - dcn_apply_to_all_conv (bool): Whether to use dcn in every layer of - kernel_convs and cls_convs, or only the last layer. It shall be set - `True` for the normal version of SOLOv2 and `False` for the - light-weight version. default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - *args, - mask_feature_head, - dynamic_conv_size=1, - dcn_cfg=None, - dcn_apply_to_all_conv=True, - init_cfg=[ - dict(type='Normal', layer='Conv2d', std=0.01), - dict( - type='Normal', - std=0.01, - bias_prob=0.01, - override=dict(name='conv_cls')) - ], - **kwargs): - assert dcn_cfg is None or isinstance(dcn_cfg, dict) - self.dcn_cfg = dcn_cfg - self.with_dcn = dcn_cfg is not None - self.dcn_apply_to_all_conv = dcn_apply_to_all_conv - self.dynamic_conv_size = dynamic_conv_size - mask_out_channels = mask_feature_head.get('out_channels') - self.kernel_out_channels = \ - mask_out_channels * self.dynamic_conv_size * self.dynamic_conv_size - - super().__init__(*args, init_cfg=init_cfg, **kwargs) - - # update the in_channels of mask_feature_head - if mask_feature_head.get('in_channels', None) is not None: - if mask_feature_head.in_channels != self.in_channels: - warnings.warn('The `in_channels` of SOLOv2MaskFeatHead and ' - 'SOLOv2Head should be same, changing ' - 'mask_feature_head.in_channels to ' - f'{self.in_channels}') - mask_feature_head.update(in_channels=self.in_channels) - else: - mask_feature_head.update(in_channels=self.in_channels) - - self.mask_feature_head = MaskFeatModule(**mask_feature_head) - self.mask_stride = self.mask_feature_head.mask_stride - self.fp16_enabled = False - - def _init_layers(self): - self.cls_convs = nn.ModuleList() - self.kernel_convs = nn.ModuleList() - conv_cfg = None - for i in range(self.stacked_convs): - if self.with_dcn: - if self.dcn_apply_to_all_conv: - conv_cfg = self.dcn_cfg - elif i == self.stacked_convs - 1: - # light head - conv_cfg = self.dcn_cfg - - chn = self.in_channels + 2 if i == 0 else self.feat_channels - self.kernel_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.norm_cfg is None)) - - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.norm_cfg is None)) - - self.conv_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - - self.conv_kernel = nn.Conv2d( - self.feat_channels, self.kernel_out_channels, 3, padding=1) - - @auto_fp16() - def forward(self, feats): - assert len(feats) == self.num_levels - mask_feats = self.mask_feature_head(feats) - feats = self.resize_feats(feats) - mlvl_kernel_preds = [] - mlvl_cls_preds = [] - for i in range(self.num_levels): - ins_kernel_feat = feats[i] - # ins branch - # concat coord - coord_feat = generate_coordinate(ins_kernel_feat.size(), - ins_kernel_feat.device) - ins_kernel_feat = torch.cat([ins_kernel_feat, coord_feat], 1) - - # kernel branch - kernel_feat = ins_kernel_feat - kernel_feat = F.interpolate( - kernel_feat, - size=self.num_grids[i], - mode='bilinear', - align_corners=False) - - cate_feat = kernel_feat[:, :-2, :, :] - - kernel_feat = kernel_feat.contiguous() - for i, kernel_conv in enumerate(self.kernel_convs): - kernel_feat = kernel_conv(kernel_feat) - kernel_pred = self.conv_kernel(kernel_feat) - - # cate branch - cate_feat = cate_feat.contiguous() - for i, cls_conv in enumerate(self.cls_convs): - cate_feat = cls_conv(cate_feat) - cate_pred = self.conv_cls(cate_feat) - - mlvl_kernel_preds.append(kernel_pred) - mlvl_cls_preds.append(cate_pred) - - return mlvl_kernel_preds, mlvl_cls_preds, mask_feats - - def _get_targets_single(self, - gt_bboxes, - gt_labels, - gt_masks, - featmap_size=None): - """Compute targets for predictions of single image. - - Args: - gt_bboxes (Tensor): Ground truth bbox of each instance, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth label of each instance, - shape (num_gts,). - gt_masks (Tensor): Ground truth mask of each instance, - shape (num_gts, h, w). - featmap_sizes (:obj:`torch.size`): Size of UNified mask - feature map used to generate instance segmentation - masks by dynamic convolution, each element means - (feat_h, feat_w). Default: None. - - Returns: - Tuple: Usually returns a tuple containing targets for predictions. - - - mlvl_pos_mask_targets (list[Tensor]): Each element represent - the binary mask targets for positive points in this - level, has shape (num_pos, out_h, out_w). - - mlvl_labels (list[Tensor]): Each element is - classification labels for all - points in this level, has shape - (num_grid, num_grid). - - mlvl_pos_masks (list[Tensor]): Each element is - a `BoolTensor` to represent whether the - corresponding point in single level - is positive, has shape (num_grid **2). - - mlvl_pos_indexes (list[list]): Each element - in the list contains the positive index in - corresponding level, has shape (num_pos). - """ - - device = gt_labels.device - gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * - (gt_bboxes[:, 3] - gt_bboxes[:, 1])) - - mlvl_pos_mask_targets = [] - mlvl_pos_indexes = [] - mlvl_labels = [] - mlvl_pos_masks = [] - for (lower_bound, upper_bound), num_grid \ - in zip(self.scale_ranges, self.num_grids): - mask_target = [] - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - pos_index = [] - labels = torch.zeros([num_grid, num_grid], - dtype=torch.int64, - device=device) + self.num_classes - pos_mask = torch.zeros([num_grid**2], - dtype=torch.bool, - device=device) - - gt_inds = ((gt_areas >= lower_bound) & - (gt_areas <= upper_bound)).nonzero().flatten() - if len(gt_inds) == 0: - mlvl_pos_mask_targets.append( - torch.zeros([0, featmap_size[0], featmap_size[1]], - dtype=torch.uint8, - device=device)) - mlvl_labels.append(labels) - mlvl_pos_masks.append(pos_mask) - mlvl_pos_indexes.append([]) - continue - hit_gt_bboxes = gt_bboxes[gt_inds] - hit_gt_labels = gt_labels[gt_inds] - hit_gt_masks = gt_masks[gt_inds, ...] - - pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] - - hit_gt_bboxes[:, 0]) * self.pos_scale - pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] - - hit_gt_bboxes[:, 1]) * self.pos_scale - - # Make sure hit_gt_masks has a value - valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0 - - for gt_mask, gt_label, pos_h_range, pos_w_range, \ - valid_mask_flag in \ - zip(hit_gt_masks, hit_gt_labels, pos_h_ranges, - pos_w_ranges, valid_mask_flags): - if not valid_mask_flag: - continue - upsampled_size = (featmap_size[0] * self.mask_stride, - featmap_size[1] * self.mask_stride) - center_h, center_w = center_of_mass(gt_mask) - - coord_w = int( - floordiv((center_w / upsampled_size[1]), (1. / num_grid), - rounding_mode='trunc')) - coord_h = int( - floordiv((center_h / upsampled_size[0]), (1. / num_grid), - rounding_mode='trunc')) - - # left, top, right, down - top_box = max( - 0, - int( - floordiv( - (center_h - pos_h_range) / upsampled_size[0], - (1. / num_grid), - rounding_mode='trunc'))) - down_box = min( - num_grid - 1, - int( - floordiv( - (center_h + pos_h_range) / upsampled_size[0], - (1. / num_grid), - rounding_mode='trunc'))) - left_box = max( - 0, - int( - floordiv( - (center_w - pos_w_range) / upsampled_size[1], - (1. / num_grid), - rounding_mode='trunc'))) - right_box = min( - num_grid - 1, - int( - floordiv( - (center_w + pos_w_range) / upsampled_size[1], - (1. / num_grid), - rounding_mode='trunc'))) - - top = max(top_box, coord_h - 1) - down = min(down_box, coord_h + 1) - left = max(coord_w - 1, left_box) - right = min(right_box, coord_w + 1) - - labels[top:(down + 1), left:(right + 1)] = gt_label - # ins - gt_mask = np.uint8(gt_mask.cpu().numpy()) - # Follow the original implementation, F.interpolate is - # different from cv2 and opencv - gt_mask = mmcv.imrescale(gt_mask, scale=1. / self.mask_stride) - gt_mask = torch.from_numpy(gt_mask).to(device=device) - - for i in range(top, down + 1): - for j in range(left, right + 1): - index = int(i * num_grid + j) - this_mask_target = torch.zeros( - [featmap_size[0], featmap_size[1]], - dtype=torch.uint8, - device=device) - this_mask_target[:gt_mask.shape[0], :gt_mask. - shape[1]] = gt_mask - mask_target.append(this_mask_target) - pos_mask[index] = True - pos_index.append(index) - if len(mask_target) == 0: - mask_target = torch.zeros( - [0, featmap_size[0], featmap_size[1]], - dtype=torch.uint8, - device=device) - else: - mask_target = torch.stack(mask_target, 0) - mlvl_pos_mask_targets.append(mask_target) - mlvl_labels.append(labels) - mlvl_pos_masks.append(pos_mask) - mlvl_pos_indexes.append(pos_index) - return (mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks, - mlvl_pos_indexes) - - @force_fp32(apply_to=('mlvl_kernel_preds', 'mlvl_cls_preds', 'mask_feats')) - def loss(self, - mlvl_kernel_preds, - mlvl_cls_preds, - mask_feats, - gt_labels, - gt_masks, - img_metas, - gt_bboxes=None, - **kwargs): - """Calculate the loss of total batch. - - Args: - mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel - prediction. The kernel is used to generate instance - segmentation masks by dynamic convolution. Each element in the - list has shape - (batch_size, kernel_out_channels, num_grids, num_grids). - mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element - in the list has shape - (batch_size, num_classes, num_grids, num_grids). - mask_feats (Tensor): Unified mask feature map used to generate - instance segmentation masks by dynamic convolution. Has shape - (batch_size, mask_out_channels, h, w). - gt_labels (list[Tensor]): Labels of multiple images. - gt_masks (list[Tensor]): Ground truth masks of multiple images. - Each has shape (num_instances, h, w). - img_metas (list[dict]): Meta information of multiple images. - gt_bboxes (list[Tensor]): Ground truth bboxes of multiple - images. Default: None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_size = mask_feats.size()[-2:] - - pos_mask_targets, labels, pos_masks, pos_indexes = multi_apply( - self._get_targets_single, - gt_bboxes, - gt_labels, - gt_masks, - featmap_size=featmap_size) - - mlvl_mask_targets = [ - torch.cat(lvl_mask_targets, 0) - for lvl_mask_targets in zip(*pos_mask_targets) - ] - - mlvl_pos_kernel_preds = [] - for lvl_kernel_preds, lvl_pos_indexes in zip(mlvl_kernel_preds, - zip(*pos_indexes)): - lvl_pos_kernel_preds = [] - for img_lvl_kernel_preds, img_lvl_pos_indexes in zip( - lvl_kernel_preds, lvl_pos_indexes): - img_lvl_pos_kernel_preds = img_lvl_kernel_preds.view( - img_lvl_kernel_preds.shape[0], -1)[:, img_lvl_pos_indexes] - lvl_pos_kernel_preds.append(img_lvl_pos_kernel_preds) - mlvl_pos_kernel_preds.append(lvl_pos_kernel_preds) - - # make multilevel mlvl_mask_pred - mlvl_mask_preds = [] - for lvl_pos_kernel_preds in mlvl_pos_kernel_preds: - lvl_mask_preds = [] - for img_id, img_lvl_pos_kernel_pred in enumerate( - lvl_pos_kernel_preds): - if img_lvl_pos_kernel_pred.size()[-1] == 0: - continue - img_mask_feats = mask_feats[[img_id]] - h, w = img_mask_feats.shape[-2:] - num_kernel = img_lvl_pos_kernel_pred.shape[1] - img_lvl_mask_pred = F.conv2d( - img_mask_feats, - img_lvl_pos_kernel_pred.permute(1, 0).view( - num_kernel, -1, self.dynamic_conv_size, - self.dynamic_conv_size), - stride=1).view(-1, h, w) - lvl_mask_preds.append(img_lvl_mask_pred) - if len(lvl_mask_preds) == 0: - lvl_mask_preds = None - else: - lvl_mask_preds = torch.cat(lvl_mask_preds, 0) - mlvl_mask_preds.append(lvl_mask_preds) - # dice loss - num_pos = 0 - for img_pos_masks in pos_masks: - for lvl_img_pos_masks in img_pos_masks: - num_pos += lvl_img_pos_masks.count_nonzero() - - loss_mask = [] - for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds, - mlvl_mask_targets): - if lvl_mask_preds is None: - continue - loss_mask.append( - self.loss_mask( - lvl_mask_preds, - lvl_mask_targets, - reduction_override='none')) - if num_pos > 0: - loss_mask = torch.cat(loss_mask).sum() / num_pos - else: - loss_mask = torch.cat(loss_mask).mean() - - # cate - flatten_labels = [ - torch.cat( - [img_lvl_labels.flatten() for img_lvl_labels in lvl_labels]) - for lvl_labels in zip(*labels) - ] - flatten_labels = torch.cat(flatten_labels) - - flatten_cls_preds = [ - lvl_cls_preds.permute(0, 2, 3, 1).reshape(-1, self.num_classes) - for lvl_cls_preds in mlvl_cls_preds - ] - flatten_cls_preds = torch.cat(flatten_cls_preds) - - loss_cls = self.loss_cls( - flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1) - return dict(loss_mask=loss_mask, loss_cls=loss_cls) - - @force_fp32( - apply_to=('mlvl_kernel_preds', 'mlvl_cls_scores', 'mask_feats')) - def get_results(self, mlvl_kernel_preds, mlvl_cls_scores, mask_feats, - img_metas, **kwargs): - """Get multi-image mask results. - - Args: - mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel - prediction. The kernel is used to generate instance - segmentation masks by dynamic convolution. Each element in the - list has shape - (batch_size, kernel_out_channels, num_grids, num_grids). - mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element - in the list has shape - (batch_size, num_classes, num_grids, num_grids). - mask_feats (Tensor): Unified mask feature map used to generate - instance segmentation masks by dynamic convolution. Has shape - (batch_size, mask_out_channels, h, w). - img_metas (list[dict]): Meta information of all images. - - Returns: - list[:obj:`InstanceData`]: Processed results of multiple - images.Each :obj:`InstanceData` usually contains - following keys. - - - scores (Tensor): Classification scores, has shape - (num_instance,). - - labels (Tensor): Has shape (num_instances,). - - masks (Tensor): Processed mask results, has - shape (num_instances, h, w). - """ - num_levels = len(mlvl_cls_scores) - assert len(mlvl_kernel_preds) == len(mlvl_cls_scores) - - for lvl in range(num_levels): - cls_scores = mlvl_cls_scores[lvl] - cls_scores = cls_scores.sigmoid() - local_max = F.max_pool2d(cls_scores, 2, stride=1, padding=1) - keep_mask = local_max[:, :, :-1, :-1] == cls_scores - cls_scores = cls_scores * keep_mask - mlvl_cls_scores[lvl] = cls_scores.permute(0, 2, 3, 1) - - result_list = [] - for img_id in range(len(img_metas)): - img_cls_pred = [ - mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels) - for lvl in range(num_levels) - ] - img_mask_feats = mask_feats[[img_id]] - img_kernel_pred = [ - mlvl_kernel_preds[lvl][img_id].permute(1, 2, 0).view( - -1, self.kernel_out_channels) for lvl in range(num_levels) - ] - img_cls_pred = torch.cat(img_cls_pred, dim=0) - img_kernel_pred = torch.cat(img_kernel_pred, dim=0) - result = self._get_results_single( - img_kernel_pred, - img_cls_pred, - img_mask_feats, - img_meta=img_metas[img_id]) - result_list.append(result) - return result_list - - def _get_results_single(self, - kernel_preds, - cls_scores, - mask_feats, - img_meta, - cfg=None): - """Get processed mask related results of single image. - - Args: - kernel_preds (Tensor): Dynamic kernel prediction of all points - in single image, has shape - (num_points, kernel_out_channels). - cls_scores (Tensor): Classification score of all points - in single image, has shape (num_points, num_classes). - mask_preds (Tensor): Mask prediction of all points in - single image, has shape (num_points, feat_h, feat_w). - img_meta (dict): Meta information of corresponding image. - cfg (dict, optional): Config used in test phase. - Default: None. - - Returns: - :obj:`InstanceData`: Processed results of single image. - it usually contains following keys. - - scores (Tensor): Classification scores, has shape - (num_instance,). - - labels (Tensor): Has shape (num_instances,). - - masks (Tensor): Processed mask results, has - shape (num_instances, h, w). - """ - - def empty_results(results, cls_scores): - """Generate a empty results.""" - results.scores = cls_scores.new_ones(0) - results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2]) - results.labels = cls_scores.new_ones(0) - return results - - cfg = self.test_cfg if cfg is None else cfg - assert len(kernel_preds) == len(cls_scores) - results = InstanceData(img_meta) - - featmap_size = mask_feats.size()[-2:] - - img_shape = results.img_shape - ori_shape = results.ori_shape - - # overall info - h, w, _ = img_shape - upsampled_size = (featmap_size[0] * self.mask_stride, - featmap_size[1] * self.mask_stride) - - # process. - score_mask = (cls_scores > cfg.score_thr) - cls_scores = cls_scores[score_mask] - if len(cls_scores) == 0: - return empty_results(results, cls_scores) - - # cate_labels & kernel_preds - inds = score_mask.nonzero() - cls_labels = inds[:, 1] - kernel_preds = kernel_preds[inds[:, 0]] - - # trans vector. - lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0) - strides = kernel_preds.new_ones(lvl_interval[-1]) - - strides[:lvl_interval[0]] *= self.strides[0] - for lvl in range(1, self.num_levels): - strides[lvl_interval[lvl - - 1]:lvl_interval[lvl]] *= self.strides[lvl] - strides = strides[inds[:, 0]] - - # mask encoding. - kernel_preds = kernel_preds.view( - kernel_preds.size(0), -1, self.dynamic_conv_size, - self.dynamic_conv_size) - mask_preds = F.conv2d( - mask_feats, kernel_preds, stride=1).squeeze(0).sigmoid() - # mask. - masks = mask_preds > cfg.mask_thr - sum_masks = masks.sum((1, 2)).float() - keep = sum_masks > strides - if keep.sum() == 0: - return empty_results(results, cls_scores) - masks = masks[keep] - mask_preds = mask_preds[keep] - sum_masks = sum_masks[keep] - cls_scores = cls_scores[keep] - cls_labels = cls_labels[keep] - - # maskness. - mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks - cls_scores *= mask_scores - - scores, labels, _, keep_inds = mask_matrix_nms( - masks, - cls_labels, - cls_scores, - mask_area=sum_masks, - nms_pre=cfg.nms_pre, - max_num=cfg.max_per_img, - kernel=cfg.kernel, - sigma=cfg.sigma, - filter_thr=cfg.filter_thr) - mask_preds = mask_preds[keep_inds] - mask_preds = F.interpolate( - mask_preds.unsqueeze(0), - size=upsampled_size, - mode='bilinear', - align_corners=False)[:, :, :h, :w] - mask_preds = F.interpolate( - mask_preds, - size=ori_shape[:2], - mode='bilinear', - align_corners=False).squeeze(0) - masks = mask_preds > cfg.mask_thr - - results.masks = masks - results.labels = labels - results.scores = scores - - return results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ssd_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ssd_head.py deleted file mode 100644 index e362fd8016a0b0f7d0d371adb4fc39249ceb2f6a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/ssd_head.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule -from mmcv.runner import force_fp32 - -from mmdet.core import (build_assigner, build_bbox_coder, - build_prior_generator, build_sampler, multi_apply) -from ..builder import HEADS -from ..losses import smooth_l1_loss -from .anchor_head import AnchorHead - - -# TODO: add loss evaluator for SSD -@HEADS.register_module() -class SSDHead(AnchorHead): - """SSD head used in https://arxiv.org/abs/1512.02325. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): Number of conv layers in cls and reg tower. - Default: 0. - feat_channels (int): Number of hidden channels when stacked_convs - > 0. Default: 256. - use_depthwise (bool): Whether to use DepthwiseSeparableConv. - Default: False. - conv_cfg (dict): Dictionary to construct and config conv layer. - Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: None. - act_cfg (dict): Dictionary to construct and config activation layer. - Default: None. - anchor_generator (dict): Config dict for anchor generator - bbox_coder (dict): Config of bounding box coder. - reg_decoded_bbox (bool): If true, the regression loss would be - applied directly on decoded bounding boxes, converting both - the predicted boxes and regression targets to absolute - coordinates format. Default False. It should be `True` when - using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ # noqa: W605 - - def __init__(self, - num_classes=80, - in_channels=(512, 1024, 512, 256, 256, 256), - stacked_convs=0, - feat_channels=256, - use_depthwise=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None, - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - input_size=300, - strides=[8, 16, 32, 64, 100, 300], - ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), - basesize_ratio_range=(0.1, 0.9)), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - clip_border=True, - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0], - ), - reg_decoded_bbox=False, - train_cfg=None, - test_cfg=None, - init_cfg=dict( - type='Xavier', - layer='Conv2d', - distribution='uniform', - bias=0)): - super(AnchorHead, self).__init__(init_cfg) - self.num_classes = num_classes - self.in_channels = in_channels - self.stacked_convs = stacked_convs - self.feat_channels = feat_channels - self.use_depthwise = use_depthwise - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - - self.cls_out_channels = num_classes + 1 # add background class - self.prior_generator = build_prior_generator(anchor_generator) - - # Usually the numbers of anchors for each level are the same - # except SSD detectors. So it is an int in the most dense - # heads but a list of int in SSDHead - self.num_base_priors = self.prior_generator.num_base_priors - - self._init_layers() - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.reg_decoded_bbox = reg_decoded_bbox - self.use_sigmoid_cls = False - self.cls_focal_loss = False - self.train_cfg = train_cfg - self.test_cfg = test_cfg - # set sampling=False for archor_target - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # SSD sampling=False so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.fp16_enabled = False - - @property - def num_anchors(self): - """ - Returns: - list[int]: Number of base_anchors on each point of each level. - """ - warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' - 'please use "num_base_priors" instead') - return self.num_base_priors - - def _init_layers(self): - """Initialize layers of the head.""" - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - # TODO: Use registry to choose ConvModule type - conv = DepthwiseSeparableConvModule \ - if self.use_depthwise else ConvModule - - for channel, num_base_priors in zip(self.in_channels, - self.num_base_priors): - cls_layers = [] - reg_layers = [] - in_channel = channel - # build stacked conv tower, not used in default ssd - for i in range(self.stacked_convs): - cls_layers.append( - conv( - in_channel, - self.feat_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - reg_layers.append( - conv( - in_channel, - self.feat_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - in_channel = self.feat_channels - # SSD-Lite head - if self.use_depthwise: - cls_layers.append( - ConvModule( - in_channel, - in_channel, - 3, - padding=1, - groups=in_channel, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - reg_layers.append( - ConvModule( - in_channel, - in_channel, - 3, - padding=1, - groups=in_channel, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - cls_layers.append( - nn.Conv2d( - in_channel, - num_base_priors * self.cls_out_channels, - kernel_size=1 if self.use_depthwise else 3, - padding=0 if self.use_depthwise else 1)) - reg_layers.append( - nn.Conv2d( - in_channel, - num_base_priors * 4, - kernel_size=1 if self.use_depthwise else 3, - padding=0 if self.use_depthwise else 1)) - self.cls_convs.append(nn.Sequential(*cls_layers)) - self.reg_convs.append(nn.Sequential(*reg_layers)) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * 4. - """ - cls_scores = [] - bbox_preds = [] - for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, - self.cls_convs): - cls_scores.append(cls_conv(feat)) - bbox_preds.append(reg_conv(feat)) - return cls_scores, bbox_preds - - def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights, - bbox_targets, bbox_weights, num_total_samples): - """Compute loss of a single image. - - Args: - cls_score (Tensor): Box scores for eachimage - Has shape (num_total_anchors, num_classes). - bbox_pred (Tensor): Box energies / deltas for each image - level with shape (num_total_anchors, 4). - anchors (Tensor): Box reference for each scale level with shape - (num_total_anchors, 4). - labels (Tensor): Labels of each anchors with shape - (num_total_anchors,). - label_weights (Tensor): Label weights of each anchor with shape - (num_total_anchors,) - bbox_targets (Tensor): BBox regression targets of each anchor - weight shape (num_total_anchors, 4). - bbox_weights (Tensor): BBox regression loss weights of each anchor - with shape (num_total_anchors, 4). - num_total_samples (int): If sampling, num total samples equal to - the number of total anchors; Otherwise, it is the number of - positive anchors. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - - loss_cls_all = F.cross_entropy( - cls_score, labels, reduction='none') * label_weights - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( - as_tuple=False).reshape(-1) - neg_inds = (labels == self.num_classes).nonzero( - as_tuple=False).view(-1) - - num_pos_samples = pos_inds.size(0) - num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples - if num_neg_samples > neg_inds.size(0): - num_neg_samples = neg_inds.size(0) - topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) - loss_cls_pos = loss_cls_all[pos_inds].sum() - loss_cls_neg = topk_loss_cls_neg.sum() - loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples - - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, it - # decodes the already encoded coordinates to absolute format. - bbox_pred = self.bbox_coder.decode(anchor, bbox_pred) - - loss_bbox = smooth_l1_loss( - bbox_pred, - bbox_targets, - bbox_weights, - beta=self.train_cfg.smoothl1_beta, - avg_factor=num_total_samples) - return loss_cls[None], loss_bbox - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=1, - unmap_outputs=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - - num_images = len(img_metas) - all_cls_scores = torch.cat([ - s.permute(0, 2, 3, 1).reshape( - num_images, -1, self.cls_out_channels) for s in cls_scores - ], 1) - all_labels = torch.cat(labels_list, -1).view(num_images, -1) - all_label_weights = torch.cat(label_weights_list, - -1).view(num_images, -1) - all_bbox_preds = torch.cat([ - b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) - for b in bbox_preds - ], -2) - all_bbox_targets = torch.cat(bbox_targets_list, - -2).view(num_images, -1, 4) - all_bbox_weights = torch.cat(bbox_weights_list, - -2).view(num_images, -1, 4) - - # concat all level anchors to a single tensor - all_anchors = [] - for i in range(num_images): - all_anchors.append(torch.cat(anchor_list[i])) - - losses_cls, losses_bbox = multi_apply( - self.loss_single, - all_cls_scores, - all_bbox_preds, - all_anchors, - all_labels, - all_label_weights, - all_bbox_targets, - all_bbox_weights, - num_total_samples=num_total_pos) - return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/tood_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/tood_head.py deleted file mode 100644 index c64ebf7a8ce6d428e4e7f8cc60be06baed5752c9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/tood_head.py +++ /dev/null @@ -1,778 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init -from mmcv.ops import deform_conv2d -from mmcv.runner import force_fp32 - -from mmdet.core import (anchor_inside_flags, build_assigner, distance2bbox, - images_to_levels, multi_apply, reduce_mean, unmap) -from mmdet.core.utils import filter_scores_and_topk -from mmdet.models.utils import sigmoid_geometric_mean -from ..builder import HEADS, build_loss -from .atss_head import ATSSHead - - -class TaskDecomposition(nn.Module): - """Task decomposition module in task-aligned predictor of TOOD. - - Args: - feat_channels (int): Number of feature channels in TOOD head. - stacked_convs (int): Number of conv layers in TOOD head. - la_down_rate (int): Downsample rate of layer attention. - conv_cfg (dict): Config dict for convolution layer. - norm_cfg (dict): Config dict for normalization layer. - """ - - def __init__(self, - feat_channels, - stacked_convs, - la_down_rate=8, - conv_cfg=None, - norm_cfg=None): - super(TaskDecomposition, self).__init__() - self.feat_channels = feat_channels - self.stacked_convs = stacked_convs - self.in_channels = self.feat_channels * self.stacked_convs - self.norm_cfg = norm_cfg - self.layer_attention = nn.Sequential( - nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1), - nn.ReLU(inplace=True), - nn.Conv2d( - self.in_channels // la_down_rate, - self.stacked_convs, - 1, - padding=0), nn.Sigmoid()) - - self.reduction_conv = ConvModule( - self.in_channels, - self.feat_channels, - 1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - bias=norm_cfg is None) - - def init_weights(self): - for m in self.layer_attention.modules(): - if isinstance(m, nn.Conv2d): - normal_init(m, std=0.001) - normal_init(self.reduction_conv.conv, std=0.01) - - def forward(self, feat, avg_feat=None): - b, c, h, w = feat.shape - if avg_feat is None: - avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) - weight = self.layer_attention(avg_feat) - - # here we first compute the product between layer attention weight and - # conv weight, and then compute the convolution between new conv weight - # and feature map, in order to save memory and FLOPs. - conv_weight = weight.reshape( - b, 1, self.stacked_convs, - 1) * self.reduction_conv.conv.weight.reshape( - 1, self.feat_channels, self.stacked_convs, self.feat_channels) - conv_weight = conv_weight.reshape(b, self.feat_channels, - self.in_channels) - feat = feat.reshape(b, self.in_channels, h * w) - feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h, - w) - if self.norm_cfg is not None: - feat = self.reduction_conv.norm(feat) - feat = self.reduction_conv.activate(feat) - - return feat - - -@HEADS.register_module() -class TOODHead(ATSSHead): - """TOODHead used in `TOOD: Task-aligned One-stage Object Detection. - - `_. - - TOOD uses Task-aligned head (T-head) and is optimized by Task Alignment - Learning (TAL). - - Args: - num_dcn (int): Number of deformable convolution in the head. - Default: 0. - anchor_type (str): If set to `anchor_free`, the head will use centers - to regress bboxes. If set to `anchor_based`, the head will - regress bboxes based on anchors. Default: `anchor_free`. - initial_loss_cls (dict): Config of initial loss. - - Example: - >>> self = TOODHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_score, bbox_pred = self.forward(feats) - >>> assert len(cls_score) == len(self.scales) - """ - - def __init__(self, - num_classes, - in_channels, - num_dcn=0, - anchor_type='anchor_free', - initial_loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - activated=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - **kwargs): - assert anchor_type in ['anchor_free', 'anchor_based'] - self.num_dcn = num_dcn - self.anchor_type = anchor_type - self.epoch = 0 # which would be update in SetEpochInfoHook! - super(TOODHead, self).__init__(num_classes, in_channels, **kwargs) - - if self.train_cfg: - self.initial_epoch = self.train_cfg.initial_epoch - self.initial_assigner = build_assigner( - self.train_cfg.initial_assigner) - self.initial_loss_cls = build_loss(initial_loss_cls) - self.assigner = self.initial_assigner - self.alignment_assigner = build_assigner(self.train_cfg.assigner) - self.alpha = self.train_cfg.alpha - self.beta = self.train_cfg.beta - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.inter_convs = nn.ModuleList() - for i in range(self.stacked_convs): - if i < self.num_dcn: - conv_cfg = dict(type='DCNv2', deform_groups=4) - else: - conv_cfg = self.conv_cfg - chn = self.in_channels if i == 0 else self.feat_channels - self.inter_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg)) - - self.cls_decomp = TaskDecomposition(self.feat_channels, - self.stacked_convs, - self.stacked_convs * 8, - self.conv_cfg, self.norm_cfg) - self.reg_decomp = TaskDecomposition(self.feat_channels, - self.stacked_convs, - self.stacked_convs * 8, - self.conv_cfg, self.norm_cfg) - - self.tood_cls = nn.Conv2d( - self.feat_channels, - self.num_base_priors * self.cls_out_channels, - 3, - padding=1) - self.tood_reg = nn.Conv2d( - self.feat_channels, self.num_base_priors * 4, 3, padding=1) - - self.cls_prob_module = nn.Sequential( - nn.Conv2d(self.feat_channels * self.stacked_convs, - self.feat_channels // 4, 1), nn.ReLU(inplace=True), - nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1)) - self.reg_offset_module = nn.Sequential( - nn.Conv2d(self.feat_channels * self.stacked_convs, - self.feat_channels // 4, 1), nn.ReLU(inplace=True), - nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1)) - - self.scales = nn.ModuleList( - [Scale(1.0) for _ in self.prior_generator.strides]) - - def init_weights(self): - """Initialize weights of the head.""" - bias_cls = bias_init_with_prob(0.01) - for m in self.inter_convs: - normal_init(m.conv, std=0.01) - for m in self.cls_prob_module: - if isinstance(m, nn.Conv2d): - normal_init(m, std=0.01) - for m in self.reg_offset_module: - if isinstance(m, nn.Conv2d): - normal_init(m, std=0.001) - normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls) - - self.cls_decomp.init_weights() - self.reg_decomp.init_weights() - - normal_init(self.tood_cls, std=0.01, bias=bias_cls) - normal_init(self.tood_reg, std=0.01) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * num_classes. - bbox_preds (list[Tensor]): Decoded box for all scale levels, - each is a 4D-tensor, the channels number is - num_anchors * 4. In [tl_x, tl_y, br_x, br_y] format. - """ - cls_scores = [] - bbox_preds = [] - for idx, (x, scale, stride) in enumerate( - zip(feats, self.scales, self.prior_generator.strides)): - b, c, h, w = x.shape - anchor = self.prior_generator.single_level_grid_priors( - (h, w), idx, device=x.device) - anchor = torch.cat([anchor for _ in range(b)]) - # extract task interactive features - inter_feats = [] - for inter_conv in self.inter_convs: - x = inter_conv(x) - inter_feats.append(x) - feat = torch.cat(inter_feats, 1) - - # task decomposition - avg_feat = F.adaptive_avg_pool2d(feat, (1, 1)) - cls_feat = self.cls_decomp(feat, avg_feat) - reg_feat = self.reg_decomp(feat, avg_feat) - - # cls prediction and alignment - cls_logits = self.tood_cls(cls_feat) - cls_prob = self.cls_prob_module(feat) - cls_score = sigmoid_geometric_mean(cls_logits, cls_prob) - - # reg prediction and alignment - if self.anchor_type == 'anchor_free': - reg_dist = scale(self.tood_reg(reg_feat).exp()).float() - reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) - reg_bbox = distance2bbox( - self.anchor_center(anchor) / stride[0], - reg_dist).reshape(b, h, w, 4).permute(0, 3, 1, - 2) # (b, c, h, w) - elif self.anchor_type == 'anchor_based': - reg_dist = scale(self.tood_reg(reg_feat)).float() - reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4) - reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape( - b, h, w, 4).permute(0, 3, 1, 2) / stride[0] - else: - raise NotImplementedError( - f'Unknown anchor type: {self.anchor_type}.' - f'Please use `anchor_free` or `anchor_based`.') - reg_offset = self.reg_offset_module(feat) - bbox_pred = self.deform_sampling(reg_bbox.contiguous(), - reg_offset.contiguous()) - - # After deform_sampling, some boxes will become invalid (The - # left-top point is at the right or bottom of the right-bottom - # point), which will make the GIoULoss negative. - invalid_bbox_idx = (bbox_pred[:, [0]] > bbox_pred[:, [2]]) | \ - (bbox_pred[:, [1]] > bbox_pred[:, [3]]) - invalid_bbox_idx = invalid_bbox_idx.expand_as(bbox_pred) - bbox_pred = torch.where(invalid_bbox_idx, reg_bbox, bbox_pred) - - cls_scores.append(cls_score) - bbox_preds.append(bbox_pred) - return tuple(cls_scores), tuple(bbox_preds) - - def deform_sampling(self, feat, offset): - """Sampling the feature x according to offset. - - Args: - feat (Tensor): Feature - offset (Tensor): Spatial offset for feature sampling - """ - # it is an equivalent implementation of bilinear interpolation - b, c, h, w = feat.shape - weight = feat.new_ones(c, 1, 1, 1) - y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) - return y - - def anchor_center(self, anchors): - """Get anchor centers from anchors. - - Args: - anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format. - - Returns: - Tensor: Anchor centers with shape (N, 2), "xy" format. - """ - anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 - anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 - return torch.stack([anchors_cx, anchors_cy], dim=-1) - - def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights, - bbox_targets, alignment_metrics, stride): - """Compute loss of a single scale level. - - Args: - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - cls_score (Tensor): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W). - bbox_pred (Tensor): Decoded bboxes for each scale - level with shape (N, num_anchors * 4, H, W). - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors). - bbox_targets (Tensor): BBox regression targets of each anchor with - shape (N, num_total_anchors, 4). - alignment_metrics (Tensor): Alignment metrics with shape - (N, num_total_anchors). - stride (tuple[int]): Downsample stride of the feature map. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert stride[0] == stride[1], 'h stride is not equal to w stride!' - anchors = anchors.reshape(-1, 4) - cls_score = cls_score.permute(0, 2, 3, 1).reshape( - -1, self.cls_out_channels).contiguous() - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - bbox_targets = bbox_targets.reshape(-1, 4) - labels = labels.reshape(-1) - alignment_metrics = alignment_metrics.reshape(-1) - label_weights = label_weights.reshape(-1) - targets = labels if self.epoch < self.initial_epoch else ( - labels, alignment_metrics) - cls_loss_func = self.initial_loss_cls \ - if self.epoch < self.initial_epoch else self.loss_cls - - loss_cls = cls_loss_func( - cls_score, targets, label_weights, avg_factor=1.0) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - - if len(pos_inds) > 0: - pos_bbox_targets = bbox_targets[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_anchors = anchors[pos_inds] - - pos_decode_bbox_pred = pos_bbox_pred - pos_decode_bbox_targets = pos_bbox_targets / stride[0] - - # regression loss - pos_bbox_weight = self.centerness_target( - pos_anchors, pos_bbox_targets - ) if self.epoch < self.initial_epoch else alignment_metrics[ - pos_inds] - - loss_bbox = self.loss_bbox( - pos_decode_bbox_pred, - pos_decode_bbox_targets, - weight=pos_bbox_weight, - avg_factor=1.0) - else: - loss_bbox = bbox_pred.sum() * 0 - pos_bbox_weight = bbox_targets.new_tensor(0.) - - return loss_cls, loss_bbox, alignment_metrics.sum( - ), pos_bbox_weight.sum() - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Decoded box for each scale - level with shape (N, num_anchors * 4, H, W) in - [tl_x, tl_y, br_x, br_y] format. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - num_imgs = len(img_metas) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - flatten_cls_scores = torch.cat([ - cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, - self.cls_out_channels) - for cls_score in cls_scores - ], 1) - flatten_bbox_preds = torch.cat([ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) * stride[0] - for bbox_pred, stride in zip(bbox_preds, - self.prior_generator.strides) - ], 1) - - cls_reg_targets = self.get_targets( - flatten_cls_scores, - flatten_bbox_preds, - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - alignment_metrics_list) = cls_reg_targets - - losses_cls, losses_bbox,\ - cls_avg_factors, bbox_avg_factors = multi_apply( - self.loss_single, - anchor_list, - cls_scores, - bbox_preds, - labels_list, - label_weights_list, - bbox_targets_list, - alignment_metrics_list, - self.prior_generator.strides) - - cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item() - losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls)) - - bbox_avg_factor = reduce_mean( - sum(bbox_avg_factors)).clamp_(min=1).item() - losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) - return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - score_factor_list, - mlvl_priors, - img_meta, - cfg, - rescale=False, - with_nms=True, - **kwargs): - """Transform outputs of a single image into bbox predictions. - - Args: - cls_score_list (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_priors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has shape - (num_priors * 4, H, W). - score_factor_list (list[Tensor]): Score factor from all scale - levels of a single image, each item has shape - (num_priors * 1, H, W). - mlvl_priors (list[Tensor]): Each element in the list is - the priors of a single level in feature pyramid. In all - anchor-based methods, it has shape (num_priors, 4). In - all anchor-free methods, it has shape (num_priors, 2) - when `with_stride=True`, otherwise it still has shape - (num_priors, 4). - img_meta (dict): Image meta info. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape \ - [num_bboxes, 5], where the first 4 columns are bounding \ - box positions (tl_x, tl_y, br_x, br_y) and the 5-th \ - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding \ - box with shape [num_bboxes]. - """ - - cfg = self.test_cfg if cfg is None else cfg - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_labels = [] - for cls_score, bbox_pred, priors, stride in zip( - cls_score_list, bbox_pred_list, mlvl_priors, - self.prior_generator.strides): - - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) * stride[0] - scores = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - - # After https://github.com/open-mmlab/mmdetection/pull/6268/, - # this operation keeps fewer bboxes under the same `nms_pre`. - # There is no difference in performance for most models. If you - # find a slight drop in performance, you can set a larger - # `nms_pre` than before. - results = filter_scores_and_topk( - scores, cfg.score_thr, nms_pre, - dict(bbox_pred=bbox_pred, priors=priors)) - scores, labels, keep_idxs, filtered_results = results - - bboxes = filtered_results['bbox_pred'] - - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_labels.append(labels) - - return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes, - img_meta['scale_factor'], cfg, rescale, - with_nms, None, **kwargs) - - def get_targets(self, - cls_scores, - bbox_preds, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in - multiple images. - - Args: - cls_scores (Tensor): Classification predictions of images, - a 3D-Tensor with shape [num_imgs, num_priors, num_classes]. - bbox_preds (Tensor): Decoded bboxes predictions of one image, - a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x, - tl_y, br_x, br_y] format. - anchor_list (list[list[Tensor]]): Multi level anchors of each - image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, 4). - valid_flag_list (list[list[Tensor]]): Multi level valid flags of - each image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be - ignored. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: a tuple containing learning targets. - - - anchors_list (list[list[Tensor]]): Anchors of each level. - - labels_list (list[Tensor]): Labels of each level. - - label_weights_list (list[Tensor]): Label weights of each - level. - - bbox_targets_list (list[Tensor]): BBox targets of each level. - - norm_alignment_metrics_list (list[Tensor]): Normalized - alignment metrics of each level. - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - num_level_anchors_list = [num_level_anchors] * num_imgs - - # concat all level anchors and flags to a single tensor - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - anchor_list[i] = torch.cat(anchor_list[i]) - valid_flag_list[i] = torch.cat(valid_flag_list[i]) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - # anchor_list: list(b * [-1, 4]) - - if self.epoch < self.initial_epoch: - (all_anchors, all_labels, all_label_weights, all_bbox_targets, - all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( - super()._get_target_single, - anchor_list, - valid_flag_list, - num_level_anchors_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - all_assign_metrics = [ - weight[..., 0] for weight in all_bbox_weights - ] - else: - (all_anchors, all_labels, all_label_weights, all_bbox_targets, - all_assign_metrics) = multi_apply( - self._get_target_single, - cls_scores, - bbox_preds, - anchor_list, - valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - - # split targets to a list w.r.t. multiple levels - anchors_list = images_to_levels(all_anchors, num_level_anchors) - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - norm_alignment_metrics_list = images_to_levels(all_assign_metrics, - num_level_anchors) - - return (anchors_list, labels_list, label_weights_list, - bbox_targets_list, norm_alignment_metrics_list) - - def _get_target_single(self, - cls_scores, - bbox_preds, - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression, classification targets for anchors in a single - image. - - Args: - cls_scores (list(Tensor)): Box scores for each image. - bbox_preds (list(Tensor)): Box energies / deltas for each image. - flat_anchors (Tensor): Multi-level anchors of the image, which are - concatenated into a single tensor of shape (num_anchors ,4) - valid_flags (Tensor): Multi level valid flags of the image, - which are concatenated into a single tensor of - shape (num_anchors,). - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - img_meta (dict): Meta info of the image. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: N is the number of total anchors in the image. - anchors (Tensor): All anchors in the image with shape (N, 4). - labels (Tensor): Labels of all anchors in the image with shape - (N,). - label_weights (Tensor): Label weights of all anchor in the - image with shape (N,). - bbox_targets (Tensor): BBox targets of all anchors in the - image with shape (N, 4). - norm_alignment_metrics (Tensor): Normalized alignment metrics - of all priors in the image with shape (N,). - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 7 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - assign_result = self.alignment_assigner.assign( - cls_scores[inside_flags, :], bbox_preds[inside_flags, :], anchors, - gt_bboxes, gt_bboxes_ignore, gt_labels, self.alpha, self.beta) - assign_ious = assign_result.max_overlaps - assign_metrics = assign_result.assign_metrics - - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - norm_alignment_metrics = anchors.new_zeros( - num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - # point-based - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class since v2.5.0 - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - class_assigned_gt_inds = torch.unique( - sampling_result.pos_assigned_gt_inds) - for gt_inds in class_assigned_gt_inds: - gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds == - gt_inds] - pos_alignment_metrics = assign_metrics[gt_class_inds] - pos_ious = assign_ious[gt_class_inds] - pos_norm_alignment_metrics = pos_alignment_metrics / ( - pos_alignment_metrics.max() + 10e-8) * pos_ious.max() - norm_alignment_metrics[gt_class_inds] = pos_norm_alignment_metrics - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - anchors = unmap(anchors, num_total_anchors, inside_flags) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - norm_alignment_metrics = unmap(norm_alignment_metrics, - num_total_anchors, inside_flags) - return (anchors, labels, label_weights, bbox_targets, - norm_alignment_metrics) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/vfnet_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/vfnet_head.py deleted file mode 100644 index ba285e22e32f3764ffa86f06246ffd5d2fbdd03d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/vfnet_head.py +++ /dev/null @@ -1,740 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, Scale -from mmcv.ops import DeformConv2d -from mmcv.runner import force_fp32 - -from mmdet.core import (MlvlPointGenerator, bbox_overlaps, build_assigner, - build_prior_generator, build_sampler, multi_apply, - reduce_mean) -from ..builder import HEADS, build_loss -from .atss_head import ATSSHead -from .fcos_head import FCOSHead - -INF = 1e8 - - -@HEADS.register_module() -class VFNetHead(ATSSHead, FCOSHead): - """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object - Detector.`_. - - The VFNet predicts IoU-aware classification scores which mix the - object presence confidence and object localization accuracy as the - detection score. It is built on the FCOS architecture and uses ATSS - for defining positive/negative training examples. The VFNet is trained - with Varifocal Loss and empolys star-shaped deformable convolution to - extract features for a bbox. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - regress_ranges (tuple[tuple[int, int]]): Regress range of multiple - level points. - center_sampling (bool): If true, use center sampling. Default: False. - center_sample_radius (float): Radius of center sampling. Default: 1.5. - sync_num_pos (bool): If true, synchronize the number of positive - examples across GPUs. Default: True - gradient_mul (float): The multiplier to gradients from bbox refinement - and recognition. Default: 0.1. - bbox_norm_type (str): The bbox normalization type, 'reg_denom' or - 'stride'. Default: reg_denom - loss_cls_fl (dict): Config of focal loss. - use_vfl (bool): If true, use varifocal loss for training. - Default: True. - loss_cls (dict): Config of varifocal loss. - loss_bbox (dict): Config of localization loss, GIoU Loss. - loss_bbox (dict): Config of localization refinement loss, GIoU Loss. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: norm_cfg=dict(type='GN', num_groups=32, - requires_grad=True). - use_atss (bool): If true, use ATSS to define positive/negative - examples. Default: True. - anchor_generator (dict): Config of anchor generator for ATSS. - init_cfg (dict or list[dict], optional): Initialization config dict. - - Example: - >>> self = VFNetHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats) - >>> assert len(cls_score) == len(self.scales) - """ # noqa: E501 - - def __init__(self, - num_classes, - in_channels, - regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), - (512, INF)), - center_sampling=False, - center_sample_radius=1.5, - sync_num_pos=True, - gradient_mul=0.1, - bbox_norm_type='reg_denom', - loss_cls_fl=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - use_vfl=True, - loss_cls=dict( - type='VarifocalLoss', - use_sigmoid=True, - alpha=0.75, - gamma=2.0, - iou_weighted=True, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.5), - loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0), - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - use_atss=True, - reg_decoded_bbox=True, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - center_offset=0.0, - strides=[8, 16, 32, 64, 128]), - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='vfnet_cls', - std=0.01, - bias_prob=0.01)), - **kwargs): - # dcn base offsets, adapted from reppoints_head.py - self.num_dconv_points = 9 - self.dcn_kernel = int(np.sqrt(self.num_dconv_points)) - self.dcn_pad = int((self.dcn_kernel - 1) / 2) - dcn_base = np.arange(-self.dcn_pad, - self.dcn_pad + 1).astype(np.float64) - dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) - dcn_base_x = np.tile(dcn_base, self.dcn_kernel) - dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( - (-1)) - self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) - - super(FCOSHead, self).__init__( - num_classes, - in_channels, - norm_cfg=norm_cfg, - init_cfg=init_cfg, - **kwargs) - self.regress_ranges = regress_ranges - self.reg_denoms = [ - regress_range[-1] for regress_range in regress_ranges - ] - self.reg_denoms[-1] = self.reg_denoms[-2] * 2 - self.center_sampling = center_sampling - self.center_sample_radius = center_sample_radius - self.sync_num_pos = sync_num_pos - self.bbox_norm_type = bbox_norm_type - self.gradient_mul = gradient_mul - self.use_vfl = use_vfl - if self.use_vfl: - self.loss_cls = build_loss(loss_cls) - else: - self.loss_cls = build_loss(loss_cls_fl) - self.loss_bbox = build_loss(loss_bbox) - self.loss_bbox_refine = build_loss(loss_bbox_refine) - - # for getting ATSS targets - self.use_atss = use_atss - self.reg_decoded_bbox = reg_decoded_bbox - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - - self.anchor_center_offset = anchor_generator['center_offset'] - - self.num_base_priors = self.prior_generator.num_base_priors[0] - - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - # only be used in `get_atss_targets` when `use_atss` is True - self.atss_prior_generator = build_prior_generator(anchor_generator) - - self.fcos_prior_generator = MlvlPointGenerator( - anchor_generator['strides'], - self.anchor_center_offset if self.use_atss else 0.5) - - # In order to reuse the `get_bboxes` in `BaseDenseHead. - # Only be used in testing phase. - self.prior_generator = self.fcos_prior_generator - - @property - def num_anchors(self): - """ - Returns: - int: Number of anchors on each point of feature map. - """ - warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' - 'please use "num_base_priors" instead') - return self.num_base_priors - - @property - def anchor_generator(self): - warnings.warn('DeprecationWarning: anchor_generator is deprecated, ' - 'please use "atss_prior_generator" instead') - return self.prior_generator - - def _init_layers(self): - """Initialize layers of the head.""" - super(FCOSHead, self)._init_cls_convs() - super(FCOSHead, self)._init_reg_convs() - self.relu = nn.ReLU(inplace=True) - self.vfnet_reg_conv = ConvModule( - self.feat_channels, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias) - self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - self.vfnet_reg_refine_dconv = DeformConv2d( - self.feat_channels, - self.feat_channels, - self.dcn_kernel, - 1, - padding=self.dcn_pad) - self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - self.vfnet_cls_dconv = DeformConv2d( - self.feat_channels, - self.feat_channels, - self.dcn_kernel, - 1, - padding=self.dcn_pad) - self.vfnet_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level, each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box offsets for each - scale level, each is a 4D-tensor, the channel number is - num_points * 4. - bbox_preds_refine (list[Tensor]): Refined Box offsets for - each scale level, each is a 4D-tensor, the channel - number is num_points * 4. - """ - return multi_apply(self.forward_single, feats, self.scales, - self.scales_refine, self.strides, self.reg_denoms) - - def forward_single(self, x, scale, scale_refine, stride, reg_denom): - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to - resize the refined bbox prediction. - stride (int): The corresponding stride for feature maps, - used to normalize the bbox prediction when - bbox_norm_type = 'stride'. - reg_denom (int): The corresponding regression range for feature - maps, only used to normalize the bbox prediction when - bbox_norm_type = 'reg_denom'. - - Returns: - tuple: iou-aware cls scores for each box, bbox predictions and - refined bbox predictions of input feature maps. - """ - cls_feat = x - reg_feat = x - - for cls_layer in self.cls_convs: - cls_feat = cls_layer(cls_feat) - - for reg_layer in self.reg_convs: - reg_feat = reg_layer(reg_feat) - - # predict the bbox_pred of different level - reg_feat_init = self.vfnet_reg_conv(reg_feat) - if self.bbox_norm_type == 'reg_denom': - bbox_pred = scale( - self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom - elif self.bbox_norm_type == 'stride': - bbox_pred = scale( - self.vfnet_reg(reg_feat_init)).float().exp() * stride - else: - raise NotImplementedError - - # compute star deformable convolution offsets - # converting dcn_offset to reg_feat.dtype thus VFNet can be - # trained with FP16 - dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul, - stride).to(reg_feat.dtype) - - # refine the bbox_pred - reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset)) - bbox_pred_refine = scale_refine( - self.vfnet_reg_refine(reg_feat)).float().exp() - bbox_pred_refine = bbox_pred_refine * bbox_pred.detach() - - # predict the iou-aware cls score - cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset)) - cls_score = self.vfnet_cls(cls_feat) - - if self.training: - return cls_score, bbox_pred, bbox_pred_refine - else: - return cls_score, bbox_pred_refine - - def star_dcn_offset(self, bbox_pred, gradient_mul, stride): - """Compute the star deformable conv offsets. - - Args: - bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b). - gradient_mul (float): Gradient multiplier. - stride (int): The corresponding stride for feature maps, - used to project the bbox onto the feature map. - - Returns: - dcn_offsets (Tensor): The offsets for deformable convolution. - """ - dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred) - bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \ - gradient_mul * bbox_pred - # map to the feature map scale - bbox_pred_grad_mul = bbox_pred_grad_mul / stride - N, C, H, W = bbox_pred.size() - - x1 = bbox_pred_grad_mul[:, 0, :, :] - y1 = bbox_pred_grad_mul[:, 1, :, :] - x2 = bbox_pred_grad_mul[:, 2, :, :] - y2 = bbox_pred_grad_mul[:, 3, :, :] - bbox_pred_grad_mul_offset = bbox_pred.new_zeros( - N, 2 * self.num_dconv_points, H, W) - bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2 - bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2 - bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2 - dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset - - return dcn_offset - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine')) - def loss(self, - cls_scores, - bbox_preds, - bbox_preds_refine, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level, each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box offsets for each - scale level, each is a 4D-tensor, the channel number is - num_points * 4. - bbox_preds_refine (list[Tensor]): Refined Box offsets for - each scale level, each is a 4D-tensor, the channel - number is num_points * 4. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - Default: None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - all_level_points = self.fcos_prior_generator.grid_priors( - featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) - labels, label_weights, bbox_targets, bbox_weights = self.get_targets( - cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas, - gt_bboxes_ignore) - - num_imgs = cls_scores[0].size(0) - # flatten cls_scores, bbox_preds and bbox_preds_refine - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, - 1).reshape(-1, - self.cls_out_channels).contiguous() - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() - for bbox_pred in bbox_preds - ] - flatten_bbox_preds_refine = [ - bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() - for bbox_pred_refine in bbox_preds_refine - ] - flatten_cls_scores = torch.cat(flatten_cls_scores) - flatten_bbox_preds = torch.cat(flatten_bbox_preds) - flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine) - flatten_labels = torch.cat(labels) - flatten_bbox_targets = torch.cat(bbox_targets) - # repeat points to align with bbox_preds - flatten_points = torch.cat( - [points.repeat(num_imgs, 1) for points in all_level_points]) - - # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = torch.where( - ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0] - num_pos = len(pos_inds) - - pos_bbox_preds = flatten_bbox_preds[pos_inds] - pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds] - pos_labels = flatten_labels[pos_inds] - - # sync num_pos across all gpus - if self.sync_num_pos: - num_pos_avg_per_gpu = reduce_mean( - pos_inds.new_tensor(num_pos).float()).item() - num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0) - else: - num_pos_avg_per_gpu = num_pos - - pos_bbox_targets = flatten_bbox_targets[pos_inds] - pos_points = flatten_points[pos_inds] - - pos_decoded_bbox_preds = self.bbox_coder.decode( - pos_points, pos_bbox_preds) - pos_decoded_target_preds = self.bbox_coder.decode( - pos_points, pos_bbox_targets) - iou_targets_ini = bbox_overlaps( - pos_decoded_bbox_preds, - pos_decoded_target_preds.detach(), - is_aligned=True).clamp(min=1e-6) - bbox_weights_ini = iou_targets_ini.clone().detach() - bbox_avg_factor_ini = reduce_mean( - bbox_weights_ini.sum()).clamp_(min=1).item() - - pos_decoded_bbox_preds_refine = \ - self.bbox_coder.decode(pos_points, pos_bbox_preds_refine) - iou_targets_rf = bbox_overlaps( - pos_decoded_bbox_preds_refine, - pos_decoded_target_preds.detach(), - is_aligned=True).clamp(min=1e-6) - bbox_weights_rf = iou_targets_rf.clone().detach() - bbox_avg_factor_rf = reduce_mean( - bbox_weights_rf.sum()).clamp_(min=1).item() - - if num_pos > 0: - loss_bbox = self.loss_bbox( - pos_decoded_bbox_preds, - pos_decoded_target_preds.detach(), - weight=bbox_weights_ini, - avg_factor=bbox_avg_factor_ini) - - loss_bbox_refine = self.loss_bbox_refine( - pos_decoded_bbox_preds_refine, - pos_decoded_target_preds.detach(), - weight=bbox_weights_rf, - avg_factor=bbox_avg_factor_rf) - - # build IoU-aware cls_score targets - if self.use_vfl: - pos_ious = iou_targets_rf.clone().detach() - cls_iou_targets = torch.zeros_like(flatten_cls_scores) - cls_iou_targets[pos_inds, pos_labels] = pos_ious - else: - loss_bbox = pos_bbox_preds.sum() * 0 - loss_bbox_refine = pos_bbox_preds_refine.sum() * 0 - if self.use_vfl: - cls_iou_targets = torch.zeros_like(flatten_cls_scores) - - if self.use_vfl: - loss_cls = self.loss_cls( - flatten_cls_scores, - cls_iou_targets, - avg_factor=num_pos_avg_per_gpu) - else: - loss_cls = self.loss_cls( - flatten_cls_scores, - flatten_labels, - weight=label_weights, - avg_factor=num_pos_avg_per_gpu) - - return dict( - loss_cls=loss_cls, - loss_bbox=loss_bbox, - loss_bbox_rf=loss_bbox_refine) - - def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, - img_metas, gt_bboxes_ignore): - """A wrapper for computing ATSS and FCOS targets for points in multiple - images. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level with shape (N, num_points * num_classes, H, W). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - gt_bboxes (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - - Returns: - tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights (Tensor/None): Label weights of all levels. - bbox_targets_list (list[Tensor]): Regression targets of each - level, (l, t, r, b). - bbox_weights (Tensor/None): Bbox weights of all levels. - """ - if self.use_atss: - return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes, - gt_labels, img_metas, - gt_bboxes_ignore) - else: - self.norm_on_bbox = False - return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels) - - def _get_target_single(self, *args, **kwargs): - """Avoid ambiguity in multiple inheritance.""" - if self.use_atss: - return ATSSHead._get_target_single(self, *args, **kwargs) - else: - return FCOSHead._get_target_single(self, *args, **kwargs) - - def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list): - """Compute FCOS regression and classification targets for points in - multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels_list (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - - Returns: - tuple: - labels (list[Tensor]): Labels of each level. - label_weights: None, to be compatible with ATSS targets. - bbox_targets (list[Tensor]): BBox targets of each level. - bbox_weights: None, to be compatible with ATSS targets. - """ - labels, bbox_targets = FCOSHead.get_targets(self, points, - gt_bboxes_list, - gt_labels_list) - label_weights = None - bbox_weights = None - return labels, label_weights, bbox_targets, bbox_weights - - def get_anchors(self, featmap_sizes, img_metas, device='cuda'): - """Get anchors according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - img_metas (list[dict]): Image meta info. - device (torch.device | str): Device for returned tensors - - Returns: - tuple: - anchor_list (list[Tensor]): Anchors of each image. - valid_flag_list (list[Tensor]): Valid flags of each image. - """ - num_imgs = len(img_metas) - - # since feature map sizes of all images are the same, we only compute - # anchors for one time - multi_level_anchors = self.atss_prior_generator.grid_priors( - featmap_sizes, device=device) - anchor_list = [multi_level_anchors for _ in range(num_imgs)] - - # for each image, we compute valid flags of multi level anchors - valid_flag_list = [] - for img_id, img_meta in enumerate(img_metas): - multi_level_flags = self.atss_prior_generator.valid_flags( - featmap_sizes, img_meta['pad_shape'], device=device) - valid_flag_list.append(multi_level_flags) - - return anchor_list, valid_flag_list - - def get_atss_targets(self, - cls_scores, - mlvl_points, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """A wrapper for computing ATSS targets for points in multiple images. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level with shape (N, num_points * num_classes, H, W). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - gt_bboxes (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). Default: None. - - Returns: - tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights (Tensor): Label weights of all levels. - bbox_targets_list (list[Tensor]): Regression targets of each - level, (l, t, r, b). - bbox_weights (Tensor): Bbox weights of all levels. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len( - featmap_sizes - ) == self.atss_prior_generator.num_levels == \ - self.fcos_prior_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - cls_reg_targets = ATSSHead.get_targets( - self, - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - unmap_outputs=True) - if cls_reg_targets is None: - return None - - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets - - bbox_targets_list = [ - bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list - ] - - num_imgs = len(img_metas) - # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format - bbox_targets_list = self.transform_bbox_targets( - bbox_targets_list, mlvl_points, num_imgs) - - labels_list = [labels.reshape(-1) for labels in labels_list] - label_weights_list = [ - label_weights.reshape(-1) for label_weights in label_weights_list - ] - bbox_weights_list = [ - bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list - ] - label_weights = torch.cat(label_weights_list) - bbox_weights = torch.cat(bbox_weights_list) - return labels_list, label_weights, bbox_targets_list, bbox_weights - - def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs): - """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format. - - Args: - decoded_bboxes (list[Tensor]): Regression targets of each level, - in the form of (x1, y1, x2, y2). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - num_imgs (int): the number of images in a batch. - - Returns: - bbox_targets (list[Tensor]): Regression targets of each level in - the form of (l, t, r, b). - """ - # TODO: Re-implemented in Class PointCoder - assert len(decoded_bboxes) == len(mlvl_points) - num_levels = len(decoded_bboxes) - mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points] - bbox_targets = [] - for i in range(num_levels): - bbox_target = self.bbox_coder.encode(mlvl_points[i], - decoded_bboxes[i]) - bbox_targets.append(bbox_target) - - return bbox_targets - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """Override the method in the parent class to avoid changing para's - name.""" - pass - - def _get_points_single(self, - featmap_size, - stride, - dtype, - device, - flatten=False): - """Get points according to feature map size. - - This function will be deprecated soon. - """ - - warnings.warn( - '`_get_points_single` in `VFNetHead` will be ' - 'deprecated soon, we support a multi level point generator now' - 'you can get points of a single level feature map' - 'with `self.fcos_prior_generator.single_level_grid_priors` ') - - h, w = featmap_size - x_range = torch.arange( - 0, w * stride, stride, dtype=dtype, device=device) - y_range = torch.arange( - 0, h * stride, stride, dtype=dtype, device=device) - y, x = torch.meshgrid(y_range, x_range) - # to be compatible with anchor points in ATSS - if self.use_atss: - points = torch.stack( - (x.reshape(-1), y.reshape(-1)), dim=-1) + \ - stride * self.anchor_center_offset - else: - points = torch.stack( - (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 - return points diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolact_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolact_head.py deleted file mode 100644 index 8f89a271baf2fd75eb63dc16e8343870fe640760..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolact_head.py +++ /dev/null @@ -1,1018 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, ModuleList, force_fp32 - -from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply -from mmdet.core.utils import select_single_mlvl -from ..builder import HEADS, build_loss -from .anchor_head import AnchorHead - - -@HEADS.register_module() -class YOLACTHead(AnchorHead): - """YOLACT box head used in https://arxiv.org/abs/1904.02689. - - Note that YOLACT head is a light version of RetinaNet head. - Four differences are described as follows: - - 1. YOLACT box head has three-times fewer anchors. - 2. YOLACT box head shares the convs for box and cls branches. - 3. YOLACT box head uses OHEM instead of Focal loss. - 4. YOLACT box head predicts a set of mask coefficients for each box. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - anchor_generator (dict): Config dict for anchor generator - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - num_head_convs (int): Number of the conv layers shared by - box and cls branches. - num_protos (int): Number of the mask coefficients. - use_ohem (bool): If true, ``loss_single_OHEM`` will be used for - cls loss calculation. If false, ``loss_single`` will be used. - conv_cfg (dict): Dictionary to construct and config conv layer. - norm_cfg (dict): Dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - in_channels, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=3, - scales_per_octave=1, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - reduction='none', - loss_weight=1.0), - loss_bbox=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1.5), - num_head_convs=1, - num_protos=32, - use_ohem=True, - conv_cfg=None, - norm_cfg=None, - init_cfg=dict( - type='Xavier', - distribution='uniform', - bias=0, - layer='Conv2d'), - **kwargs): - self.num_head_convs = num_head_convs - self.num_protos = num_protos - self.use_ohem = use_ohem - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - super(YOLACTHead, self).__init__( - num_classes, - in_channels, - loss_cls=loss_cls, - loss_bbox=loss_bbox, - anchor_generator=anchor_generator, - init_cfg=init_cfg, - **kwargs) - if self.use_ohem: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.sampling = False - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.head_convs = ModuleList() - for i in range(self.num_head_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.head_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.conv_cls = nn.Conv2d( - self.feat_channels, - self.num_base_priors * self.cls_out_channels, - 3, - padding=1) - self.conv_reg = nn.Conv2d( - self.feat_channels, self.num_base_priors * 4, 3, padding=1) - self.conv_coeff = nn.Conv2d( - self.feat_channels, - self.num_base_priors * self.num_protos, - 3, - padding=1) - - def forward_single(self, x): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level \ - the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale \ - level, the channels number is num_anchors * 4. - coeff_pred (Tensor): Mask coefficients for a single scale \ - level, the channels number is num_anchors * num_protos. - """ - for head_conv in self.head_convs: - x = head_conv(x) - cls_score = self.conv_cls(x) - bbox_pred = self.conv_reg(x) - coeff_pred = self.conv_coeff(x).tanh() - return cls_score, bbox_pred, coeff_pred - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """A combination of the func:``AnchorHead.loss`` and - func:``SSDHead.loss``. - - When ``self.use_ohem == True``, it functions like ``SSDHead.loss``, - otherwise, it follows ``AnchorHead.loss``. Besides, it additionally - returns ``sampling_results``. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): Class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): Specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - tuple: - dict[str, Tensor]: A dictionary of loss components. - List[:obj:``SamplingResult``]: Sampler results for each image. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - unmap_outputs=not self.use_ohem, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results) = cls_reg_targets - - if self.use_ohem: - num_images = len(img_metas) - all_cls_scores = torch.cat([ - s.permute(0, 2, 3, 1).reshape( - num_images, -1, self.cls_out_channels) for s in cls_scores - ], 1) - all_labels = torch.cat(labels_list, -1).view(num_images, -1) - all_label_weights = torch.cat(label_weights_list, - -1).view(num_images, -1) - all_bbox_preds = torch.cat([ - b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) - for b in bbox_preds - ], -2) - all_bbox_targets = torch.cat(bbox_targets_list, - -2).view(num_images, -1, 4) - all_bbox_weights = torch.cat(bbox_weights_list, - -2).view(num_images, -1, 4) - - # concat all level anchors to a single tensor - all_anchors = [] - for i in range(num_images): - all_anchors.append(torch.cat(anchor_list[i])) - - # check NaN and Inf - assert torch.isfinite(all_cls_scores).all().item(), \ - 'classification scores become infinite or NaN!' - assert torch.isfinite(all_bbox_preds).all().item(), \ - 'bbox predications become infinite or NaN!' - - losses_cls, losses_bbox = multi_apply( - self.loss_single_OHEM, - all_cls_scores, - all_bbox_preds, - all_anchors, - all_labels, - all_label_weights, - all_bbox_targets, - all_bbox_weights, - num_total_samples=num_total_pos) - else: - num_total_samples = ( - num_total_pos + - num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - losses_cls, losses_bbox = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - all_anchor_list, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - num_total_samples=num_total_samples) - - return dict( - loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results - - def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels, - label_weights, bbox_targets, bbox_weights, - num_total_samples): - """"See func:``SSDHead.loss``.""" - loss_cls_all = self.loss_cls(cls_score, labels, label_weights) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero( - as_tuple=False).reshape(-1) - neg_inds = (labels == self.num_classes).nonzero( - as_tuple=False).view(-1) - - num_pos_samples = pos_inds.size(0) - if num_pos_samples == 0: - num_neg_samples = neg_inds.size(0) - else: - num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples - if num_neg_samples > neg_inds.size(0): - num_neg_samples = neg_inds.size(0) - topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) - loss_cls_pos = loss_cls_all[pos_inds].sum() - loss_cls_neg = topk_loss_cls_neg.sum() - loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, it - # decodes the already encoded coordinates to absolute format. - bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) - loss_bbox = self.loss_bbox( - bbox_pred, - bbox_targets, - bbox_weights, - avg_factor=num_total_samples) - return loss_cls[None], loss_bbox - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - coeff_preds, - img_metas, - cfg=None, - rescale=False): - """"Similar to func:``AnchorHead.get_bboxes``, but additionally - processes coeff_preds. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - with shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - coeff_preds (list[Tensor]): Mask coefficients for each scale - level with shape (N, num_anchors * num_protos, H, W) - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used - rescale (bool): If True, return boxes in original image space. - Default: False. - - Returns: - list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is - a 3-tuple. The first item is an (n, 5) tensor, where the - first 4 columns are bounding box positions - (tl_x, tl_y, br_x, br_y) and the 5-th column is a score - between 0 and 1. The second item is an (n,) tensor where each - item is the predicted class label of the corresponding box. - The third item is an (n, num_protos) tensor where each item - is the predicted mask coefficients of instance inside the - corresponding box. - """ - assert len(cls_scores) == len(bbox_preds) - num_levels = len(cls_scores) - - device = cls_scores[0].device - featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] - mlvl_anchors = self.prior_generator.grid_priors( - featmap_sizes, device=device) - - det_bboxes = [] - det_labels = [] - det_coeffs = [] - for img_id in range(len(img_metas)): - cls_score_list = select_single_mlvl(cls_scores, img_id) - bbox_pred_list = select_single_mlvl(bbox_preds, img_id) - coeff_pred_list = select_single_mlvl(coeff_preds, img_id) - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list, - coeff_pred_list, mlvl_anchors, - img_shape, scale_factor, cfg, - rescale) - det_bboxes.append(bbox_res[0]) - det_labels.append(bbox_res[1]) - det_coeffs.append(bbox_res[2]) - return det_bboxes, det_labels, det_coeffs - - def _get_bboxes_single(self, - cls_score_list, - bbox_pred_list, - coeff_preds_list, - mlvl_anchors, - img_shape, - scale_factor, - cfg, - rescale=False): - """"Similar to func:``AnchorHead._get_bboxes_single``, but additionally - processes coeff_preds_list and uses fast NMS instead of traditional - NMS. - - Args: - cls_score_list (list[Tensor]): Box scores for a single scale level - Has shape (num_anchors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas for a single - scale level with shape (num_anchors * 4, H, W). - coeff_preds_list (list[Tensor]): Mask coefficients for a single - scale level with shape (num_anchors * num_protos, H, W). - mlvl_anchors (list[Tensor]): Box reference for a single scale level - with shape (num_total_anchors, 4). - img_shape (tuple[int]): Shape of the input image, - (height, width, 3). - scale_factor (ndarray): Scale factor of the image arange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - - Returns: - tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor, - where the first 4 columns are bounding box positions - (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between - 0 and 1. The second item is an (n,) tensor where each item is - the predicted class label of the corresponding box. The third - item is an (n, num_protos) tensor where each item is the - predicted mask coefficients of instance inside the - corresponding box. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) - nms_pre = cfg.get('nms_pre', -1) - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_coeffs = [] - for cls_score, bbox_pred, coeff_pred, anchors in \ - zip(cls_score_list, bbox_pred_list, - coeff_preds_list, mlvl_anchors): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - cls_score = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - scores = cls_score.softmax(-1) - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) - coeff_pred = coeff_pred.permute(1, 2, - 0).reshape(-1, self.num_protos) - - if 0 < nms_pre < scores.shape[0]: - # Get maximum scores for foreground classes. - if self.use_sigmoid_cls: - max_scores, _ = scores.max(dim=1) - else: - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - max_scores, _ = scores[:, :-1].max(dim=1) - _, topk_inds = max_scores.topk(nms_pre) - anchors = anchors[topk_inds, :] - bbox_pred = bbox_pred[topk_inds, :] - scores = scores[topk_inds, :] - coeff_pred = coeff_pred[topk_inds, :] - bboxes = self.bbox_coder.decode( - anchors, bbox_pred, max_shape=img_shape) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_coeffs.append(coeff_pred) - mlvl_bboxes = torch.cat(mlvl_bboxes) - if rescale: - mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) - mlvl_scores = torch.cat(mlvl_scores) - mlvl_coeffs = torch.cat(mlvl_coeffs) - if self.use_sigmoid_cls: - # Add a dummy background class to the backend when using sigmoid - # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 - # BG cat_id: num_class - padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) - mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) - det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores, - mlvl_coeffs, - cfg.score_thr, - cfg.iou_thr, cfg.top_k, - cfg.max_per_img) - return det_bboxes, det_labels, det_coeffs - - -@HEADS.register_module() -class YOLACTSegmHead(BaseModule): - """YOLACT segmentation head used in https://arxiv.org/abs/1904.02689. - - Apply a semantic segmentation loss on feature space using layers that are - only evaluated during training to increase performance with no speed - penalty. - - Args: - in_channels (int): Number of channels in the input feature map. - num_classes (int): Number of categories excluding the background - category. - loss_segm (dict): Config of semantic segmentation loss. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - in_channels=256, - loss_segm=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - init_cfg=dict( - type='Xavier', - distribution='uniform', - override=dict(name='segm_conv'))): - super(YOLACTSegmHead, self).__init__(init_cfg) - self.in_channels = in_channels - self.num_classes = num_classes - self.loss_segm = build_loss(loss_segm) - self._init_layers() - self.fp16_enabled = False - - def _init_layers(self): - """Initialize layers of the head.""" - self.segm_conv = nn.Conv2d( - self.in_channels, self.num_classes, kernel_size=1) - - def forward(self, x): - """Forward feature from the upstream network. - - Args: - x (Tensor): Feature from the upstream network, which is - a 4D-tensor. - - Returns: - Tensor: Predicted semantic segmentation map with shape - (N, num_classes, H, W). - """ - return self.segm_conv(x) - - @force_fp32(apply_to=('segm_pred', )) - def loss(self, segm_pred, gt_masks, gt_labels): - """Compute loss of the head. - - Args: - segm_pred (list[Tensor]): Predicted semantic segmentation map - with shape (N, num_classes, H, W). - gt_masks (list[Tensor]): Ground truth masks for each image with - the same shape of the input image. - gt_labels (list[Tensor]): Class indices corresponding to each box. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - loss_segm = [] - num_imgs, num_classes, mask_h, mask_w = segm_pred.size() - for idx in range(num_imgs): - cur_segm_pred = segm_pred[idx] - cur_gt_masks = gt_masks[idx].float() - cur_gt_labels = gt_labels[idx] - segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks, - cur_gt_labels) - if segm_targets is None: - loss = self.loss_segm(cur_segm_pred, - torch.zeros_like(cur_segm_pred), - torch.zeros_like(cur_segm_pred)) - else: - loss = self.loss_segm( - cur_segm_pred, - segm_targets, - avg_factor=num_imgs * mask_h * mask_w) - loss_segm.append(loss) - return dict(loss_segm=loss_segm) - - def get_targets(self, segm_pred, gt_masks, gt_labels): - """Compute semantic segmentation targets for each image. - - Args: - segm_pred (Tensor): Predicted semantic segmentation map - with shape (num_classes, H, W). - gt_masks (Tensor): Ground truth masks for each image with - the same shape of the input image. - gt_labels (Tensor): Class indices corresponding to each box. - - Returns: - Tensor: Semantic segmentation targets with shape - (num_classes, H, W). - """ - if gt_masks.size(0) == 0: - return None - num_classes, mask_h, mask_w = segm_pred.size() - with torch.no_grad(): - downsampled_masks = F.interpolate( - gt_masks.unsqueeze(0), (mask_h, mask_w), - mode='bilinear', - align_corners=False).squeeze(0) - downsampled_masks = downsampled_masks.gt(0.5).float() - segm_targets = torch.zeros_like(segm_pred, requires_grad=False) - for obj_idx in range(downsampled_masks.size(0)): - segm_targets[gt_labels[obj_idx] - 1] = torch.max( - segm_targets[gt_labels[obj_idx] - 1], - downsampled_masks[obj_idx]) - return segm_targets - - def simple_test(self, feats, img_metas, rescale=False): - """Test function without test-time augmentation.""" - raise NotImplementedError( - 'simple_test of YOLACTSegmHead is not implemented ' - 'because this head is only evaluated during training') - - -@HEADS.register_module() -class YOLACTProtonet(BaseModule): - """YOLACT mask head used in https://arxiv.org/abs/1904.02689. - - This head outputs the mask prototypes for YOLACT. - - Args: - in_channels (int): Number of channels in the input feature map. - proto_channels (tuple[int]): Output channels of protonet convs. - proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs. - include_last_relu (Bool): If keep the last relu of protonet. - num_protos (int): Number of prototypes. - num_classes (int): Number of categories excluding the background - category. - loss_mask_weight (float): Reweight the mask loss by this factor. - max_masks_to_train (int): Maximum number of masks to train for - each image. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - in_channels=256, - proto_channels=(256, 256, 256, None, 256, 32), - proto_kernel_sizes=(3, 3, 3, -2, 3, 1), - include_last_relu=True, - num_protos=32, - loss_mask_weight=1.0, - max_masks_to_train=100, - init_cfg=dict( - type='Xavier', - distribution='uniform', - override=dict(name='protonet'))): - super(YOLACTProtonet, self).__init__(init_cfg) - self.in_channels = in_channels - self.proto_channels = proto_channels - self.proto_kernel_sizes = proto_kernel_sizes - self.include_last_relu = include_last_relu - self.protonet = self._init_layers() - - self.loss_mask_weight = loss_mask_weight - self.num_protos = num_protos - self.num_classes = num_classes - self.max_masks_to_train = max_masks_to_train - self.fp16_enabled = False - - def _init_layers(self): - """A helper function to take a config setting and turn it into a - network.""" - # Possible patterns: - # ( 256, 3) -> conv - # ( 256,-2) -> deconv - # (None,-2) -> bilinear interpolate - in_channels = self.in_channels - protonets = ModuleList() - for num_channels, kernel_size in zip(self.proto_channels, - self.proto_kernel_sizes): - if kernel_size > 0: - layer = nn.Conv2d( - in_channels, - num_channels, - kernel_size, - padding=kernel_size // 2) - else: - if num_channels is None: - layer = InterpolateModule( - scale_factor=-kernel_size, - mode='bilinear', - align_corners=False) - else: - layer = nn.ConvTranspose2d( - in_channels, - num_channels, - -kernel_size, - padding=kernel_size // 2) - protonets.append(layer) - protonets.append(nn.ReLU(inplace=True)) - in_channels = num_channels if num_channels is not None \ - else in_channels - if not self.include_last_relu: - protonets = protonets[:-1] - return nn.Sequential(*protonets) - - def forward_dummy(self, x): - prototypes = self.protonet(x) - return prototypes - - def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None): - """Forward feature from the upstream network to get prototypes and - linearly combine the prototypes, using masks coefficients, into - instance masks. Finally, crop the instance masks with given bboxes. - - Args: - x (Tensor): Feature from the upstream network, which is - a 4D-tensor. - coeff_pred (list[Tensor]): Mask coefficients for each scale - level with shape (N, num_anchors * num_protos, H, W). - bboxes (list[Tensor]): Box used for cropping with shape - (N, num_anchors * 4, H, W). During training, they are - ground truth boxes. During testing, they are predicted - boxes. - img_meta (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - sampling_results (List[:obj:``SamplingResult``]): Sampler results - for each image. - - Returns: - list[Tensor]: Predicted instance segmentation masks. - """ - prototypes = self.protonet(x) - prototypes = prototypes.permute(0, 2, 3, 1).contiguous() - - num_imgs = x.size(0) - - # The reason for not using self.training is that - # val workflow will have a dimension mismatch error. - # Note that this writing method is very tricky. - # Fix https://github.com/open-mmlab/mmdetection/issues/5978 - is_train_or_val_workflow = (coeff_pred[0].dim() == 4) - - # Train or val workflow - if is_train_or_val_workflow: - coeff_pred_list = [] - for coeff_pred_per_level in coeff_pred: - coeff_pred_per_level = \ - coeff_pred_per_level.permute( - 0, 2, 3, 1).reshape(num_imgs, -1, self.num_protos) - coeff_pred_list.append(coeff_pred_per_level) - coeff_pred = torch.cat(coeff_pred_list, dim=1) - - mask_pred_list = [] - for idx in range(num_imgs): - cur_prototypes = prototypes[idx] - cur_coeff_pred = coeff_pred[idx] - cur_bboxes = bboxes[idx] - cur_img_meta = img_meta[idx] - - # Testing state - if not is_train_or_val_workflow: - bboxes_for_cropping = cur_bboxes - else: - cur_sampling_results = sampling_results[idx] - pos_assigned_gt_inds = \ - cur_sampling_results.pos_assigned_gt_inds - bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone() - pos_inds = cur_sampling_results.pos_inds - cur_coeff_pred = cur_coeff_pred[pos_inds] - - # Linearly combine the prototypes with the mask coefficients - mask_pred = cur_prototypes @ cur_coeff_pred.t() - mask_pred = torch.sigmoid(mask_pred) - - h, w = cur_img_meta['img_shape'][:2] - bboxes_for_cropping[:, 0] /= w - bboxes_for_cropping[:, 1] /= h - bboxes_for_cropping[:, 2] /= w - bboxes_for_cropping[:, 3] /= h - - mask_pred = self.crop(mask_pred, bboxes_for_cropping) - mask_pred = mask_pred.permute(2, 0, 1).contiguous() - mask_pred_list.append(mask_pred) - return mask_pred_list - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results): - """Compute loss of the head. - - Args: - mask_pred (list[Tensor]): Predicted prototypes with shape - (num_classes, H, W). - gt_masks (list[Tensor]): Ground truth masks for each image with - the same shape of the input image. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - img_meta (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - sampling_results (List[:obj:``SamplingResult``]): Sampler results - for each image. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - loss_mask = [] - num_imgs = len(mask_pred) - total_pos = 0 - for idx in range(num_imgs): - cur_mask_pred = mask_pred[idx] - cur_gt_masks = gt_masks[idx].float() - cur_gt_bboxes = gt_bboxes[idx] - cur_img_meta = img_meta[idx] - cur_sampling_results = sampling_results[idx] - - pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds - num_pos = pos_assigned_gt_inds.size(0) - # Since we're producing (near) full image masks, - # it'd take too much vram to backprop on every single mask. - # Thus we select only a subset. - if num_pos > self.max_masks_to_train: - perm = torch.randperm(num_pos) - select = perm[:self.max_masks_to_train] - cur_mask_pred = cur_mask_pred[select] - pos_assigned_gt_inds = pos_assigned_gt_inds[select] - num_pos = self.max_masks_to_train - total_pos += num_pos - - gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds] - - mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks, - pos_assigned_gt_inds) - if num_pos == 0: - loss = cur_mask_pred.sum() * 0. - elif mask_targets is None: - loss = F.binary_cross_entropy(cur_mask_pred, - torch.zeros_like(cur_mask_pred), - torch.zeros_like(cur_mask_pred)) - else: - cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1) - loss = F.binary_cross_entropy( - cur_mask_pred, mask_targets, - reduction='none') * self.loss_mask_weight - - h, w = cur_img_meta['img_shape'][:2] - gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] - - gt_bboxes_for_reweight[:, 0]) / w - gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] - - gt_bboxes_for_reweight[:, 1]) / h - loss = loss.mean(dim=(1, - 2)) / gt_bboxes_width / gt_bboxes_height - loss = torch.sum(loss) - loss_mask.append(loss) - - if total_pos == 0: - total_pos += 1 # avoid nan - loss_mask = [x / total_pos for x in loss_mask] - - return dict(loss_mask=loss_mask) - - def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds): - """Compute instance segmentation targets for each image. - - Args: - mask_pred (Tensor): Predicted prototypes with shape - (num_classes, H, W). - gt_masks (Tensor): Ground truth masks for each image with - the same shape of the input image. - pos_assigned_gt_inds (Tensor): GT indices of the corresponding - positive samples. - Returns: - Tensor: Instance segmentation targets with shape - (num_instances, H, W). - """ - if gt_masks.size(0) == 0: - return None - mask_h, mask_w = mask_pred.shape[-2:] - gt_masks = F.interpolate( - gt_masks.unsqueeze(0), (mask_h, mask_w), - mode='bilinear', - align_corners=False).squeeze(0) - gt_masks = gt_masks.gt(0.5).float() - mask_targets = gt_masks[pos_assigned_gt_inds] - return mask_targets - - def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale): - """Resize, binarize, and format the instance mask predictions. - - Args: - mask_pred (Tensor): shape (N, H, W). - label_pred (Tensor): shape (N, ). - img_meta (dict): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If rescale is False, then returned masks will - fit the scale of imgs[0]. - Returns: - list[ndarray]: Mask predictions grouped by their predicted classes. - """ - ori_shape = img_meta['ori_shape'] - scale_factor = img_meta['scale_factor'] - if rescale: - img_h, img_w = ori_shape[:2] - else: - img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32) - img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32) - - cls_segms = [[] for _ in range(self.num_classes)] - if mask_pred.size(0) == 0: - return cls_segms - - mask_pred = F.interpolate( - mask_pred.unsqueeze(0), (img_h, img_w), - mode='bilinear', - align_corners=False).squeeze(0) > 0.5 - mask_pred = mask_pred.cpu().numpy().astype(np.uint8) - - for m, l in zip(mask_pred, label_pred): - cls_segms[l].append(m) - return cls_segms - - def crop(self, masks, boxes, padding=1): - """Crop predicted masks by zeroing out everything not in the predicted - bbox. - - Args: - masks (Tensor): shape [H, W, N]. - boxes (Tensor): bbox coords in relative point form with - shape [N, 4]. - - Return: - Tensor: The cropped masks. - """ - h, w, n = masks.size() - x1, x2 = self.sanitize_coordinates( - boxes[:, 0], boxes[:, 2], w, padding, cast=False) - y1, y2 = self.sanitize_coordinates( - boxes[:, 1], boxes[:, 3], h, padding, cast=False) - - rows = torch.arange( - w, device=masks.device, dtype=x1.dtype).view(1, -1, - 1).expand(h, w, n) - cols = torch.arange( - h, device=masks.device, dtype=x1.dtype).view(-1, 1, - 1).expand(h, w, n) - - masks_left = rows >= x1.view(1, 1, -1) - masks_right = rows < x2.view(1, 1, -1) - masks_up = cols >= y1.view(1, 1, -1) - masks_down = cols < y2.view(1, 1, -1) - - crop_mask = masks_left * masks_right * masks_up * masks_down - - return masks * crop_mask.float() - - def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True): - """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, - and x2 <= image_size. Also converts from relative to absolute - coordinates and casts the results to long tensors. - - Warning: this does things in-place behind the scenes so - copy if necessary. - - Args: - _x1 (Tensor): shape (N, ). - _x2 (Tensor): shape (N, ). - img_size (int): Size of the input image. - padding (int): x1 >= padding, x2 <= image_size-padding. - cast (bool): If cast is false, the result won't be cast to longs. - - Returns: - tuple: - x1 (Tensor): Sanitized _x1. - x2 (Tensor): Sanitized _x2. - """ - x1 = x1 * img_size - x2 = x2 * img_size - if cast: - x1 = x1.long() - x2 = x2.long() - x1 = torch.min(x1, x2) - x2 = torch.max(x1, x2) - x1 = torch.clamp(x1 - padding, min=0) - x2 = torch.clamp(x2 + padding, max=img_size) - return x1, x2 - - def simple_test(self, - feats, - det_bboxes, - det_labels, - det_coeffs, - img_metas, - rescale=False): - """Test function without test-time augmentation. - - Args: - feats (tuple[torch.Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - det_bboxes (list[Tensor]): BBox results of each image. each - element is (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - det_labels (list[Tensor]): BBox results of each image. each - element is (n, ) tensor, each element represents the class - label of the corresponding box. - det_coeffs (list[Tensor]): BBox coefficient of each image. each - element is (n, m) tensor, m is vector length. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[list]: encoded masks. The c-th item in the outer list - corresponds to the c-th class. Given the c-th outer list, the - i-th item in that inner list is the mask for the i-th box with - class label c. - """ - num_imgs = len(img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - segm_results = [[[] for _ in range(self.num_classes)] - for _ in range(num_imgs)] - else: - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i][:, :4] - for i in range(len(det_bboxes)) - ] - mask_preds = self.forward(feats[0], det_coeffs, _bboxes, img_metas) - # apply mask post-processing to each image individually - segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append([[] for _ in range(self.num_classes)]) - else: - segm_result = self.get_seg_masks(mask_preds[i], - det_labels[i], - img_metas[i], rescale) - segm_results.append(segm_result) - return segm_results - - -class InterpolateModule(BaseModule): - """This is a module version of F.interpolate. - - Any arguments you give it just get passed along for the ride. - """ - - def __init__(self, *args, init_cfg=None, **kwargs): - super().__init__(init_cfg) - - self.args = args - self.kwargs = kwargs - - def forward(self, x): - """Forward features from the upstream network.""" - return F.interpolate(x, *self.args, **self.kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolo_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolo_head.py deleted file mode 100644 index b446cb7eb24b6608ba217713a36c917dc4b93407..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolo_head.py +++ /dev/null @@ -1,621 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Copyright (c) 2019 Western Digital Corporation or its affiliates. - -import warnings - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, - normal_init) -from mmcv.runner import force_fp32 - -from mmdet.core import (build_assigner, build_bbox_coder, - build_prior_generator, build_sampler, images_to_levels, - multi_apply, multiclass_nms) -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class YOLOV3Head(BaseDenseHead, BBoxTestMixin): - """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. - - Args: - num_classes (int): The number of object classes (w/o background) - in_channels (List[int]): Number of input channels per scale. - out_channels (List[int]): The number of output channels per scale - before the final 1x1 layer. Default: (1024, 512, 256). - anchor_generator (dict): Config dict for anchor generator - bbox_coder (dict): Config of bounding box coder. - featmap_strides (List[int]): The stride of each scale. - Should be in descending order. Default: (32, 16, 8). - one_hot_smoother (float): Set a non-zero value to enable label-smooth - Default: 0. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - loss_cls (dict): Config of classification loss. - loss_conf (dict): Config of confidence loss. - loss_xy (dict): Config of xy coordinate loss. - loss_wh (dict): Config of wh coordinate loss. - train_cfg (dict): Training config of YOLOV3 head. Default: None. - test_cfg (dict): Testing config of YOLOV3 head. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - in_channels, - out_channels=(1024, 512, 256), - anchor_generator=dict( - type='YOLOAnchorGenerator', - base_sizes=[[(116, 90), (156, 198), (373, 326)], - [(30, 61), (62, 45), (59, 119)], - [(10, 13), (16, 30), (33, 23)]], - strides=[32, 16, 8]), - bbox_coder=dict(type='YOLOBBoxCoder'), - featmap_strides=[32, 16, 8], - one_hot_smoother=0., - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_conf=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_xy=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_wh=dict(type='MSELoss', loss_weight=1.0), - train_cfg=None, - test_cfg=None, - init_cfg=dict( - type='Normal', std=0.01, - override=dict(name='convs_pred'))): - super(YOLOV3Head, self).__init__(init_cfg) - # Check params - assert (len(in_channels) == len(out_channels) == len(featmap_strides)) - - self.num_classes = num_classes - self.in_channels = in_channels - self.out_channels = out_channels - self.featmap_strides = featmap_strides - self.train_cfg = train_cfg - self.test_cfg = test_cfg - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - if hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.fp16_enabled = False - - self.one_hot_smoother = one_hot_smoother - - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - - self.bbox_coder = build_bbox_coder(bbox_coder) - - self.prior_generator = build_prior_generator(anchor_generator) - - self.loss_cls = build_loss(loss_cls) - self.loss_conf = build_loss(loss_conf) - self.loss_xy = build_loss(loss_xy) - self.loss_wh = build_loss(loss_wh) - - self.num_base_priors = self.prior_generator.num_base_priors[0] - assert len( - self.prior_generator.num_base_priors) == len(featmap_strides) - self._init_layers() - - @property - def anchor_generator(self): - - warnings.warn('DeprecationWarning: `anchor_generator` is deprecated, ' - 'please use "prior_generator" instead') - return self.prior_generator - - @property - def num_anchors(self): - """ - Returns: - int: Number of anchors on each point of feature map. - """ - warnings.warn('DeprecationWarning: `num_anchors` is deprecated, ' - 'please use "num_base_priors" instead') - return self.num_base_priors - - @property - def num_levels(self): - return len(self.featmap_strides) - - @property - def num_attrib(self): - """int: number of attributes in pred_map, bboxes (4) + - objectness (1) + num_classes""" - - return 5 + self.num_classes - - def _init_layers(self): - self.convs_bridge = nn.ModuleList() - self.convs_pred = nn.ModuleList() - for i in range(self.num_levels): - conv_bridge = ConvModule( - self.in_channels[i], - self.out_channels[i], - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - conv_pred = nn.Conv2d(self.out_channels[i], - self.num_base_priors * self.num_attrib, 1) - - self.convs_bridge.append(conv_bridge) - self.convs_pred.append(conv_pred) - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - normal_init(m, mean=0, std=0.01) - if is_norm(m): - constant_init(m, 1) - - # Use prior in model initialization to improve stability - for conv_pred, stride in zip(self.convs_pred, self.featmap_strides): - bias = conv_pred.bias.reshape(self.num_base_priors, -1) - # init objectness with prior of 8 objects per feature map - # refer to https://github.com/ultralytics/yolov3 - nn.init.constant_(bias.data[:, 4], - bias_init_with_prob(8 / (608 / stride)**2)) - nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01)) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple[Tensor]: A tuple of multi-level predication map, each is a - 4D-tensor of shape (batch_size, 5+num_classes, height, width). - """ - - assert len(feats) == self.num_levels - pred_maps = [] - for i in range(self.num_levels): - x = feats[i] - x = self.convs_bridge[i](x) - pred_map = self.convs_pred[i](x) - pred_maps.append(pred_map) - - return tuple(pred_maps), - - @force_fp32(apply_to=('pred_maps', )) - def get_bboxes(self, - pred_maps, - img_metas, - cfg=None, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. It has - been accelerated since PR #5991. - - Args: - pred_maps (list[Tensor]): Raw predictions for a batch of images. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used. Default: None. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - """ - assert len(pred_maps) == self.num_levels - cfg = self.test_cfg if cfg is None else cfg - scale_factors = np.array( - [img_meta['scale_factor'] for img_meta in img_metas]) - - num_imgs = len(img_metas) - featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps] - - mlvl_anchors = self.prior_generator.grid_priors( - featmap_sizes, device=pred_maps[0].device) - flatten_preds = [] - flatten_strides = [] - for pred, stride in zip(pred_maps, self.featmap_strides): - pred = pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, - self.num_attrib) - pred[..., :2].sigmoid_() - flatten_preds.append(pred) - flatten_strides.append( - pred.new_tensor(stride).expand(pred.size(1))) - - flatten_preds = torch.cat(flatten_preds, dim=1) - flatten_bbox_preds = flatten_preds[..., :4] - flatten_objectness = flatten_preds[..., 4].sigmoid() - flatten_cls_scores = flatten_preds[..., 5:].sigmoid() - flatten_anchors = torch.cat(mlvl_anchors) - flatten_strides = torch.cat(flatten_strides) - flatten_bboxes = self.bbox_coder.decode(flatten_anchors, - flatten_bbox_preds, - flatten_strides.unsqueeze(-1)) - - if with_nms and (flatten_objectness.size(0) == 0): - return torch.zeros((0, 5)), torch.zeros((0, )) - - if rescale: - flatten_bboxes /= flatten_bboxes.new_tensor( - scale_factors).unsqueeze(1) - - padding = flatten_bboxes.new_zeros(num_imgs, flatten_bboxes.shape[1], - 1) - flatten_cls_scores = torch.cat([flatten_cls_scores, padding], dim=-1) - - det_results = [] - for (bboxes, scores, objectness) in zip(flatten_bboxes, - flatten_cls_scores, - flatten_objectness): - # Filtering out all predictions with conf < conf_thr - conf_thr = cfg.get('conf_thr', -1) - if conf_thr > 0: - conf_inds = objectness >= conf_thr - bboxes = bboxes[conf_inds, :] - scores = scores[conf_inds, :] - objectness = objectness[conf_inds] - - det_bboxes, det_labels = multiclass_nms( - bboxes, - scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=objectness) - det_results.append(tuple([det_bboxes, det_labels])) - return det_results - - @force_fp32(apply_to=('pred_maps', )) - def loss(self, - pred_maps, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - pred_maps (list[Tensor]): Prediction map for each scale level, - shape (N, num_anchors * num_attrib, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - num_imgs = len(img_metas) - device = pred_maps[0][0].device - - featmap_sizes = [ - pred_maps[i].shape[-2:] for i in range(self.num_levels) - ] - mlvl_anchors = self.prior_generator.grid_priors( - featmap_sizes, device=device) - anchor_list = [mlvl_anchors for _ in range(num_imgs)] - - responsible_flag_list = [] - for img_id in range(len(img_metas)): - responsible_flag_list.append( - self.prior_generator.responsible_flags(featmap_sizes, - gt_bboxes[img_id], - device)) - - target_maps_list, neg_maps_list = self.get_targets( - anchor_list, responsible_flag_list, gt_bboxes, gt_labels) - - losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( - self.loss_single, pred_maps, target_maps_list, neg_maps_list) - - return dict( - loss_cls=losses_cls, - loss_conf=losses_conf, - loss_xy=losses_xy, - loss_wh=losses_wh) - - def loss_single(self, pred_map, target_map, neg_map): - """Compute loss of a single image from a batch. - - Args: - pred_map (Tensor): Raw predictions for a single level. - target_map (Tensor): The Ground-Truth target for a single level. - neg_map (Tensor): The negative masks for a single level. - - Returns: - tuple: - loss_cls (Tensor): Classification loss. - loss_conf (Tensor): Confidence loss. - loss_xy (Tensor): Regression loss of x, y coordinate. - loss_wh (Tensor): Regression loss of w, h coordinate. - """ - - num_imgs = len(pred_map) - pred_map = pred_map.permute(0, 2, 3, - 1).reshape(num_imgs, -1, self.num_attrib) - neg_mask = neg_map.float() - pos_mask = target_map[..., 4] - pos_and_neg_mask = neg_mask + pos_mask - pos_mask = pos_mask.unsqueeze(dim=-1) - if torch.max(pos_and_neg_mask) > 1.: - warnings.warn('There is overlap between pos and neg sample.') - pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) - - pred_xy = pred_map[..., :2] - pred_wh = pred_map[..., 2:4] - pred_conf = pred_map[..., 4] - pred_label = pred_map[..., 5:] - - target_xy = target_map[..., :2] - target_wh = target_map[..., 2:4] - target_conf = target_map[..., 4] - target_label = target_map[..., 5:] - - loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) - loss_conf = self.loss_conf( - pred_conf, target_conf, weight=pos_and_neg_mask) - loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) - loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) - - return loss_cls, loss_conf, loss_xy, loss_wh - - def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list, - gt_labels_list): - """Compute target maps for anchors in multiple images. - - Args: - anchor_list (list[list[Tensor]]): Multi level anchors of each - image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_total_anchors, 4). - responsible_flag_list (list[list[Tensor]]): Multi level responsible - flags of each image. Each element is a tensor of shape - (num_total_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - target_map_list (list[Tensor]): Target map of each level. - - neg_map_list (list[Tensor]): Negative map of each level. - """ - num_imgs = len(anchor_list) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - - results = multi_apply(self._get_targets_single, anchor_list, - responsible_flag_list, gt_bboxes_list, - gt_labels_list) - - all_target_maps, all_neg_maps = results - assert num_imgs == len(all_target_maps) == len(all_neg_maps) - target_maps_list = images_to_levels(all_target_maps, num_level_anchors) - neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) - - return target_maps_list, neg_maps_list - - def _get_targets_single(self, anchors, responsible_flags, gt_bboxes, - gt_labels): - """Generate matching bounding box prior and converted GT. - - Args: - anchors (list[Tensor]): Multi-level anchors of the image. - responsible_flags (list[Tensor]): Multi-level responsible flags of - anchors - gt_bboxes (Tensor): Ground truth bboxes of single image. - gt_labels (Tensor): Ground truth labels of single image. - - Returns: - tuple: - target_map (Tensor): Predication target map of each - scale level, shape (num_total_anchors, - 5+num_classes) - neg_map (Tensor): Negative map of each scale level, - shape (num_total_anchors,) - """ - - anchor_strides = [] - for i in range(len(anchors)): - anchor_strides.append( - torch.tensor(self.featmap_strides[i], - device=gt_bboxes.device).repeat(len(anchors[i]))) - concat_anchors = torch.cat(anchors) - concat_responsible_flags = torch.cat(responsible_flags) - - anchor_strides = torch.cat(anchor_strides) - assert len(anchor_strides) == len(concat_anchors) == \ - len(concat_responsible_flags) - assign_result = self.assigner.assign(concat_anchors, - concat_responsible_flags, - gt_bboxes) - sampling_result = self.sampler.sample(assign_result, concat_anchors, - gt_bboxes) - - target_map = concat_anchors.new_zeros( - concat_anchors.size(0), self.num_attrib) - - target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, - anchor_strides[sampling_result.pos_inds]) - - target_map[sampling_result.pos_inds, 4] = 1 - - gt_labels_one_hot = F.one_hot( - gt_labels, num_classes=self.num_classes).float() - if self.one_hot_smoother != 0: # label smooth - gt_labels_one_hot = gt_labels_one_hot * ( - 1 - self.one_hot_smoother - ) + self.one_hot_smoother / self.num_classes - target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ - sampling_result.pos_assigned_gt_inds] - - neg_map = concat_anchors.new_zeros( - concat_anchors.size(0), dtype=torch.uint8) - neg_map[sampling_result.neg_inds] = 1 - - return target_map, neg_map - - def aug_test(self, feats, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - return self.aug_test_bboxes(feats, img_metas, rescale=rescale) - - @force_fp32(apply_to=('pred_maps')) - def onnx_export(self, pred_maps, img_metas, with_nms=True): - num_levels = len(pred_maps) - pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)] - - cfg = self.test_cfg - assert len(pred_maps_list) == self.num_levels - - device = pred_maps_list[0].device - batch_size = pred_maps_list[0].shape[0] - - featmap_sizes = [ - pred_maps_list[i].shape[-2:] for i in range(self.num_levels) - ] - mlvl_anchors = self.prior_generator.grid_priors( - featmap_sizes, device=device) - # convert to tensor to keep tracing - nms_pre_tensor = torch.tensor( - cfg.get('nms_pre', -1), device=device, dtype=torch.long) - - multi_lvl_bboxes = [] - multi_lvl_cls_scores = [] - multi_lvl_conf_scores = [] - for i in range(self.num_levels): - # get some key info for current scale - pred_map = pred_maps_list[i] - stride = self.featmap_strides[i] - # (b,h, w, num_anchors*num_attrib) -> - # (b,h*w*num_anchors, num_attrib) - pred_map = pred_map.permute(0, 2, 3, - 1).reshape(batch_size, -1, - self.num_attrib) - # Inplace operation like - # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])``` - # would create constant tensor when exporting to onnx - pred_map_conf = torch.sigmoid(pred_map[..., :2]) - pred_map_rest = pred_map[..., 2:] - pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1) - pred_map_boxes = pred_map[..., :4] - multi_lvl_anchor = mlvl_anchors[i] - multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes) - bbox_pred = self.bbox_coder.decode(multi_lvl_anchor, - pred_map_boxes, stride) - # conf and cls - conf_pred = torch.sigmoid(pred_map[..., 4]) - cls_pred = torch.sigmoid(pred_map[..., 5:]).view( - batch_size, -1, self.num_classes) # Cls pred one-hot. - - # Get top-k prediction - from mmdet.core.export import get_k_for_topk - nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) - if nms_pre > 0: - _, topk_inds = conf_pred.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds).long() - # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 - transformed_inds = ( - bbox_pred.shape[1] * batch_inds + topk_inds) - bbox_pred = bbox_pred.reshape(-1, - 4)[transformed_inds, :].reshape( - batch_size, -1, 4) - cls_pred = cls_pred.reshape( - -1, self.num_classes)[transformed_inds, :].reshape( - batch_size, -1, self.num_classes) - conf_pred = conf_pred.reshape(-1, 1)[transformed_inds].reshape( - batch_size, -1) - - # Save the result of current scale - multi_lvl_bboxes.append(bbox_pred) - multi_lvl_cls_scores.append(cls_pred) - multi_lvl_conf_scores.append(conf_pred) - - # Merge the results of different scales together - batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1) - batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1) - batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1) - - # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment - from mmdet.core.export import add_dummy_nms_for_onnx - conf_thr = cfg.get('conf_thr', -1) - score_thr = cfg.get('score_thr', -1) - # follow original pipeline of YOLOv3 - if conf_thr > 0: - mask = (batch_mlvl_conf_scores >= conf_thr).float() - batch_mlvl_conf_scores *= mask - if score_thr > 0: - mask = (batch_mlvl_scores > score_thr).float() - batch_mlvl_scores *= mask - batch_mlvl_conf_scores = batch_mlvl_conf_scores.unsqueeze(2).expand_as( - batch_mlvl_scores) - batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_conf_scores - if with_nms: - max_output_boxes_per_class = cfg.nms.get( - 'max_output_boxes_per_class', 200) - iou_threshold = cfg.nms.get('iou_threshold', 0.5) - # keep aligned with original pipeline, improve - # mAP by 1% for YOLOv3 in ONNX - score_threshold = 0 - nms_pre = cfg.get('deploy_nms_pre', -1) - return add_dummy_nms_for_onnx( - batch_mlvl_bboxes, - batch_mlvl_scores, - max_output_boxes_per_class, - iou_threshold, - score_threshold, - nms_pre, - cfg.max_per_img, - ) - else: - return batch_mlvl_bboxes, batch_mlvl_scores diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolof_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolof_head.py deleted file mode 100644 index 1063524a7d17f2bb037ca64c35f5ce3e658771eb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolof_head.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm, - normal_init) -from mmcv.runner import force_fp32 - -from mmdet.core import anchor_inside_flags, multi_apply, reduce_mean, unmap -from ..builder import HEADS -from .anchor_head import AnchorHead - -INF = 1e8 - - -def levels_to_images(mlvl_tensor): - """Concat multi-level feature maps by image. - - [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] - Convert the shape of each element in mlvl_tensor from (N, C, H, W) to - (N, H*W , C), then split the element to N elements with shape (H*W, C), and - concat elements in same image of all level along first dimension. - - Args: - mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from - corresponding level. Each element is of shape (N, C, H, W) - - Returns: - list[torch.Tensor]: A list that contains N tensors and each tensor is - of shape (num_elements, C) - """ - batch_size = mlvl_tensor[0].size(0) - batch_list = [[] for _ in range(batch_size)] - channels = mlvl_tensor[0].size(1) - for t in mlvl_tensor: - t = t.permute(0, 2, 3, 1) - t = t.view(batch_size, -1, channels).contiguous() - for img in range(batch_size): - batch_list[img].append(t[img]) - return [torch.cat(item, 0) for item in batch_list] - - -@HEADS.register_module() -class YOLOFHead(AnchorHead): - """YOLOFHead Paper link: https://arxiv.org/abs/2103.09460. - - Args: - num_classes (int): The number of object classes (w/o background) - in_channels (List[int]): The number of input channels per scale. - cls_num_convs (int): The number of convolutions of cls branch. - Default 2. - reg_num_convs (int): The number of convolutions of reg branch. - Default 4. - norm_cfg (dict): Dictionary to construct and config norm layer. - """ - - def __init__(self, - num_classes, - in_channels, - num_cls_convs=2, - num_reg_convs=4, - norm_cfg=dict(type='BN', requires_grad=True), - **kwargs): - self.num_cls_convs = num_cls_convs - self.num_reg_convs = num_reg_convs - self.norm_cfg = norm_cfg - super(YOLOFHead, self).__init__(num_classes, in_channels, **kwargs) - - def _init_layers(self): - cls_subnet = [] - bbox_subnet = [] - for i in range(self.num_cls_convs): - cls_subnet.append( - ConvModule( - self.in_channels, - self.in_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg)) - for i in range(self.num_reg_convs): - bbox_subnet.append( - ConvModule( - self.in_channels, - self.in_channels, - kernel_size=3, - padding=1, - norm_cfg=self.norm_cfg)) - self.cls_subnet = nn.Sequential(*cls_subnet) - self.bbox_subnet = nn.Sequential(*bbox_subnet) - self.cls_score = nn.Conv2d( - self.in_channels, - self.num_base_priors * self.num_classes, - kernel_size=3, - stride=1, - padding=1) - self.bbox_pred = nn.Conv2d( - self.in_channels, - self.num_base_priors * 4, - kernel_size=3, - stride=1, - padding=1) - self.object_pred = nn.Conv2d( - self.in_channels, - self.num_base_priors, - kernel_size=3, - stride=1, - padding=1) - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - normal_init(m, mean=0, std=0.01) - if is_norm(m): - constant_init(m, 1) - - # Use prior in model initialization to improve stability - bias_cls = bias_init_with_prob(0.01) - torch.nn.init.constant_(self.cls_score.bias, bias_cls) - - def forward_single(self, feature): - cls_score = self.cls_score(self.cls_subnet(feature)) - N, _, H, W = cls_score.shape - cls_score = cls_score.view(N, -1, self.num_classes, H, W) - - reg_feat = self.bbox_subnet(feature) - bbox_reg = self.bbox_pred(reg_feat) - objectness = self.object_pred(reg_feat) - - # implicit objectness - objectness = objectness.view(N, -1, 1, H, W) - normalized_cls_score = cls_score + objectness - torch.log( - 1. + torch.clamp(cls_score.exp(), max=INF) + - torch.clamp(objectness.exp(), max=INF)) - normalized_cls_score = normalized_cls_score.view(N, -1, H, W) - return normalized_cls_score, bbox_reg - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (batch, num_anchors * num_classes, h, w) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (batch, num_anchors * 4, h, w) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert len(cls_scores) == 1 - assert self.prior_generator.num_levels == 1 - - device = cls_scores[0].device - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - - # The output level is always 1 - anchor_list = [anchors[0] for anchors in anchor_list] - valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list] - - cls_scores_list = levels_to_images(cls_scores) - bbox_preds_list = levels_to_images(bbox_preds) - - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - cls_scores_list, - bbox_preds_list, - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - (batch_labels, batch_label_weights, num_total_pos, num_total_neg, - batch_bbox_weights, batch_pos_predicted_boxes, - batch_target_boxes) = cls_reg_targets - - flatten_labels = batch_labels.reshape(-1) - batch_label_weights = batch_label_weights.reshape(-1) - cls_score = cls_scores[0].permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - - num_total_samples = (num_total_pos + - num_total_neg) if self.sampling else num_total_pos - num_total_samples = reduce_mean( - cls_score.new_tensor(num_total_samples)).clamp_(1.0).item() - - # classification loss - loss_cls = self.loss_cls( - cls_score, - flatten_labels, - batch_label_weights, - avg_factor=num_total_samples) - - # regression loss - if batch_pos_predicted_boxes.shape[0] == 0: - # no pos sample - loss_bbox = batch_pos_predicted_boxes.sum() * 0 - else: - loss_bbox = self.loss_bbox( - batch_pos_predicted_boxes, - batch_target_boxes, - batch_bbox_weights.float(), - avg_factor=num_total_samples) - - return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) - - def get_targets(self, - cls_scores_list, - bbox_preds_list, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in - multiple images. - - Args: - cls_scores_list (list[Tensor]): Classification scores of - each image. each is a 4D-tensor, the shape is - (h * w, num_anchors * num_classes). - bbox_preds_list (list[Tensor]): Bbox preds of each image. - each is a 4D-tensor, the shape is (h * w, num_anchors * 4). - anchor_list (list[Tensor]): Anchors of each image. Each element of - is a tensor of shape (h * w * num_anchors, 4). - valid_flag_list (list[Tensor]): Valid flags of each image. Each - element of is a tensor of shape (h * w * num_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be - ignored. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - - batch_labels (Tensor): Label of all images. Each element \ - of is a tensor of shape (batch, h * w * num_anchors) - - batch_label_weights (Tensor): Label weights of all images \ - of is a tensor of shape (batch, h * w * num_anchors) - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - additional_returns: This function enables user-defined returns from - `self._get_targets_single`. These returns are currently refined - to properties at each feature map (i.e. having HxW dimension). - The results will be concatenated after the end - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - results = multi_apply( - self._get_targets_single, - bbox_preds_list, - anchor_list, - valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - (all_labels, all_label_weights, pos_inds_list, neg_inds_list, - sampling_results_list) = results[:5] - rest_results = list(results[5:]) # user-added return values - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - - batch_labels = torch.stack(all_labels, 0) - batch_label_weights = torch.stack(all_label_weights, 0) - - res = (batch_labels, batch_label_weights, num_total_pos, num_total_neg) - for i, rests in enumerate(rest_results): # user-added return values - rest_results[i] = torch.cat(rests, 0) - - return res + tuple(rest_results) - - def _get_targets_single(self, - bbox_preds, - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - Args: - bbox_preds (Tensor): Bbox prediction of the image, which - shape is (h * w ,4) - flat_anchors (Tensor): Anchors of the image, which shape is - (h * w * num_anchors ,4) - valid_flags (Tensor): Valid flags of the image, which shape is - (h * w * num_anchors,). - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - img_meta (dict): Meta info of the image. - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: - labels (Tensor): Labels of image, which shape is - (h * w * num_anchors, ). - label_weights (Tensor): Label weights of image, which shape is - (h * w * num_anchors, ). - pos_inds (Tensor): Pos index of image. - neg_inds (Tensor): Neg index of image. - sampling_result (obj:`SamplingResult`): Sampling result. - pos_bbox_weights (Tensor): The Weight of using to calculate - the bbox branch loss, which shape is (num, ). - pos_predicted_boxes (Tensor): boxes predicted value of - using to calculate the bbox branch loss, which shape is - (num, 4). - pos_target_boxes (Tensor): boxes target value of - using to calculate the bbox branch loss, which shape is - (num, 4). - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 8 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - bbox_preds = bbox_preds.reshape(-1, 4) - bbox_preds = bbox_preds[inside_flags, :] - - # decoded bbox - decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds) - assign_result = self.assigner.assign( - decoder_bbox_preds, anchors, gt_bboxes, gt_bboxes_ignore, - None if self.sampling else gt_labels) - - pos_bbox_weights = assign_result.get_extra_property('pos_idx') - pos_predicted_boxes = assign_result.get_extra_property( - 'pos_predicted_boxes') - pos_target_boxes = assign_result.get_extra_property('target_boxes') - - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - num_valid_anchors = anchors.shape[0] - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class since v2.5.0 - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - labels = unmap( - labels, num_total_anchors, inside_flags, - fill=self.num_classes) # fill bg label - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - - return (labels, label_weights, pos_inds, neg_inds, sampling_result, - pos_bbox_weights, pos_predicted_boxes, pos_target_boxes) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolox_head.py b/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolox_head.py deleted file mode 100644 index f317e14760b2948609309016e6b4a87eae2e26a8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/dense_heads/yolox_head.py +++ /dev/null @@ -1,493 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, - bias_init_with_prob) -from mmcv.ops.nms import batched_nms -from mmcv.runner import force_fp32 - -from mmdet.core import (MlvlPointGenerator, bbox_xyxy_to_cxcywh, - build_assigner, build_sampler, multi_apply, - reduce_mean) -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class YOLOXHead(BaseDenseHead, BBoxTestMixin): - """YOLOXHead head used in `YOLOX `_. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels in stacking convs. - Default: 256 - stacked_convs (int): Number of stacking convs of the head. - Default: 2. - strides (tuple): Downsample factor of each feature map. - use_depthwise (bool): Whether to depthwise separable convolution in - blocks. Default: False - dcn_on_last_conv (bool): If true, use dcn in the last layer of - towers. Default: False. - conv_bias (bool | str): If specified as `auto`, it will be decided by - the norm_cfg. Bias of conv will be set as True if `norm_cfg` is - None, otherwise False. Default: "auto". - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (dict): Config dict for activation layer. Default: None. - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - loss_obj (dict): Config of objectness loss. - loss_l1 (dict): Config of L1 loss. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - in_channels, - feat_channels=256, - stacked_convs=2, - strides=[8, 16, 32], - use_depthwise=False, - dcn_on_last_conv=False, - conv_bias='auto', - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_bbox=dict( - type='IoULoss', - mode='square', - eps=1e-16, - reduction='sum', - loss_weight=5.0), - loss_obj=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0), - train_cfg=None, - test_cfg=None, - init_cfg=dict( - type='Kaiming', - layer='Conv2d', - a=math.sqrt(5), - distribution='uniform', - mode='fan_in', - nonlinearity='leaky_relu')): - - super().__init__(init_cfg=init_cfg) - self.num_classes = num_classes - self.cls_out_channels = num_classes - self.in_channels = in_channels - self.feat_channels = feat_channels - self.stacked_convs = stacked_convs - self.strides = strides - self.use_depthwise = use_depthwise - self.dcn_on_last_conv = dcn_on_last_conv - assert conv_bias == 'auto' or isinstance(conv_bias, bool) - self.conv_bias = conv_bias - self.use_sigmoid_cls = True - - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.loss_obj = build_loss(loss_obj) - - self.use_l1 = False # This flag will be modified by hooks. - self.loss_l1 = build_loss(loss_l1) - - self.prior_generator = MlvlPointGenerator(strides, offset=0) - - self.test_cfg = test_cfg - self.train_cfg = train_cfg - - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # sampling=False so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - self.fp16_enabled = False - self._init_layers() - - def _init_layers(self): - self.multi_level_cls_convs = nn.ModuleList() - self.multi_level_reg_convs = nn.ModuleList() - self.multi_level_conv_cls = nn.ModuleList() - self.multi_level_conv_reg = nn.ModuleList() - self.multi_level_conv_obj = nn.ModuleList() - for _ in self.strides: - self.multi_level_cls_convs.append(self._build_stacked_convs()) - self.multi_level_reg_convs.append(self._build_stacked_convs()) - conv_cls, conv_reg, conv_obj = self._build_predictor() - self.multi_level_conv_cls.append(conv_cls) - self.multi_level_conv_reg.append(conv_reg) - self.multi_level_conv_obj.append(conv_obj) - - def _build_stacked_convs(self): - """Initialize conv layers of a single level head.""" - conv = DepthwiseSeparableConvModule \ - if self.use_depthwise else ConvModule - stacked_convs = [] - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - if self.dcn_on_last_conv and i == self.stacked_convs - 1: - conv_cfg = dict(type='DCNv2') - else: - conv_cfg = self.conv_cfg - stacked_convs.append( - conv( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - bias=self.conv_bias)) - return nn.Sequential(*stacked_convs) - - def _build_predictor(self): - """Initialize predictor layers of a single level head.""" - conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) - conv_reg = nn.Conv2d(self.feat_channels, 4, 1) - conv_obj = nn.Conv2d(self.feat_channels, 1, 1) - return conv_cls, conv_reg, conv_obj - - def init_weights(self): - super(YOLOXHead, self).init_weights() - # Use prior in model initialization to improve stability - bias_init = bias_init_with_prob(0.01) - for conv_cls, conv_obj in zip(self.multi_level_conv_cls, - self.multi_level_conv_obj): - conv_cls.bias.data.fill_(bias_init) - conv_obj.bias.data.fill_(bias_init) - - def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg, - conv_obj): - """Forward feature of a single scale level.""" - - cls_feat = cls_convs(x) - reg_feat = reg_convs(x) - - cls_score = conv_cls(cls_feat) - bbox_pred = conv_reg(reg_feat) - objectness = conv_obj(reg_feat) - - return cls_score, bbox_pred, objectness - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - Returns: - tuple[Tensor]: A tuple of multi-level predication map, each is a - 4D-tensor of shape (batch_size, 5+num_classes, height, width). - """ - - return multi_apply(self.forward_single, feats, - self.multi_level_cls_convs, - self.multi_level_reg_convs, - self.multi_level_conv_cls, - self.multi_level_conv_reg, - self.multi_level_conv_obj) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) - def get_bboxes(self, - cls_scores, - bbox_preds, - objectnesses, - img_metas=None, - cfg=None, - rescale=False, - with_nms=True): - """Transform network outputs of a batch into bbox results. - Args: - cls_scores (list[Tensor]): Classification scores for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * 4, H, W). - objectnesses (list[Tensor], Optional): Score factor for - all scale level, each is a 4D-tensor, has shape - (batch_size, 1, H, W). - img_metas (list[dict], Optional): Image meta info. Default None. - cfg (mmcv.Config, Optional): Test / postprocessing configuration, - if None, test_cfg would be used. Default None. - rescale (bool): If True, return boxes in original image space. - Default False. - with_nms (bool): If True, do nms before return boxes. - Default True. - Returns: - list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. The second item is a - (n,) tensor where each item is the predicted class label of - the corresponding box. - """ - assert len(cls_scores) == len(bbox_preds) == len(objectnesses) - cfg = self.test_cfg if cfg is None else cfg - scale_factors = np.array( - [img_meta['scale_factor'] for img_meta in img_metas]) - - num_imgs = len(img_metas) - featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] - mlvl_priors = self.prior_generator.grid_priors( - featmap_sizes, - dtype=cls_scores[0].dtype, - device=cls_scores[0].device, - with_stride=True) - - # flatten cls_scores, bbox_preds and objectness - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, - self.cls_out_channels) - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) - for bbox_pred in bbox_preds - ] - flatten_objectness = [ - objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) - for objectness in objectnesses - ] - - flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() - flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) - flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid() - flatten_priors = torch.cat(mlvl_priors) - - flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) - - if rescale: - flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor( - scale_factors).unsqueeze(1) - - result_list = [] - for img_id in range(len(img_metas)): - cls_scores = flatten_cls_scores[img_id] - score_factor = flatten_objectness[img_id] - bboxes = flatten_bboxes[img_id] - - result_list.append( - self._bboxes_nms(cls_scores, bboxes, score_factor, cfg)) - - return result_list - - def _bbox_decode(self, priors, bbox_preds): - xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2] - whs = bbox_preds[..., 2:].exp() * priors[:, 2:] - - tl_x = (xys[..., 0] - whs[..., 0] / 2) - tl_y = (xys[..., 1] - whs[..., 1] / 2) - br_x = (xys[..., 0] + whs[..., 0] / 2) - br_y = (xys[..., 1] + whs[..., 1] / 2) - - decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1) - return decoded_bboxes - - def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg): - max_scores, labels = torch.max(cls_scores, 1) - valid_mask = score_factor * max_scores >= cfg.score_thr - - bboxes = bboxes[valid_mask] - scores = max_scores[valid_mask] * score_factor[valid_mask] - labels = labels[valid_mask] - - if labels.numel() == 0: - return bboxes, labels - else: - dets, keep = batched_nms(bboxes, scores, labels, cfg.nms) - return dets, labels[keep] - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses')) - def loss(self, - cls_scores, - bbox_preds, - objectnesses, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_priors * 4. - objectnesses (list[Tensor], Optional): Score factor for - all scale level, each is a 4D-tensor, has shape - (batch_size, 1, H, W). - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - """ - num_imgs = len(img_metas) - featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] - mlvl_priors = self.prior_generator.grid_priors( - featmap_sizes, - dtype=cls_scores[0].dtype, - device=cls_scores[0].device, - with_stride=True) - - flatten_cls_preds = [ - cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, - self.cls_out_channels) - for cls_pred in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) - for bbox_pred in bbox_preds - ] - flatten_objectness = [ - objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1) - for objectness in objectnesses - ] - - flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1) - flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) - flatten_objectness = torch.cat(flatten_objectness, dim=1) - flatten_priors = torch.cat(mlvl_priors) - flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds) - - (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, - num_fg_imgs) = multi_apply( - self._get_target_single, flatten_cls_preds.detach(), - flatten_objectness.detach(), - flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1), - flatten_bboxes.detach(), gt_bboxes, gt_labels) - - # The experimental results show that ‘reduce_mean’ can improve - # performance on the COCO dataset. - num_pos = torch.tensor( - sum(num_fg_imgs), - dtype=torch.float, - device=flatten_cls_preds.device) - num_total_samples = max(reduce_mean(num_pos), 1.0) - - pos_masks = torch.cat(pos_masks, 0) - cls_targets = torch.cat(cls_targets, 0) - obj_targets = torch.cat(obj_targets, 0) - bbox_targets = torch.cat(bbox_targets, 0) - if self.use_l1: - l1_targets = torch.cat(l1_targets, 0) - - loss_bbox = self.loss_bbox( - flatten_bboxes.view(-1, 4)[pos_masks], - bbox_targets) / num_total_samples - loss_obj = self.loss_obj(flatten_objectness.view(-1, 1), - obj_targets) / num_total_samples - loss_cls = self.loss_cls( - flatten_cls_preds.view(-1, self.num_classes)[pos_masks], - cls_targets) / num_total_samples - - loss_dict = dict( - loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj) - - if self.use_l1: - loss_l1 = self.loss_l1( - flatten_bbox_preds.view(-1, 4)[pos_masks], - l1_targets) / num_total_samples - loss_dict.update(loss_l1=loss_l1) - - return loss_dict - - @torch.no_grad() - def _get_target_single(self, cls_preds, objectness, priors, decoded_bboxes, - gt_bboxes, gt_labels): - """Compute classification, regression, and objectness targets for - priors in a single image. - Args: - cls_preds (Tensor): Classification predictions of one image, - a 2D-Tensor with shape [num_priors, num_classes] - objectness (Tensor): Objectness predictions of one image, - a 1D-Tensor with shape [num_priors] - priors (Tensor): All priors of one image, a 2D-Tensor with shape - [num_priors, 4] in [cx, xy, stride_w, stride_y] format. - decoded_bboxes (Tensor): Decoded bboxes predictions of one image, - a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, - br_x, br_y] format. - gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor - with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. - gt_labels (Tensor): Ground truth labels of one image, a Tensor - with shape [num_gts]. - """ - - num_priors = priors.size(0) - num_gts = gt_labels.size(0) - gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype) - # No target - if num_gts == 0: - cls_target = cls_preds.new_zeros((0, self.num_classes)) - bbox_target = cls_preds.new_zeros((0, 4)) - l1_target = cls_preds.new_zeros((0, 4)) - obj_target = cls_preds.new_zeros((num_priors, 1)) - foreground_mask = cls_preds.new_zeros(num_priors).bool() - return (foreground_mask, cls_target, obj_target, bbox_target, - l1_target, 0) - - # YOLOX uses center priors with 0.5 offset to assign targets, - # but use center priors without offset to regress bboxes. - offset_priors = torch.cat( - [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1) - - assign_result = self.assigner.assign( - cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid(), - offset_priors, decoded_bboxes, gt_bboxes, gt_labels) - - sampling_result = self.sampler.sample(assign_result, priors, gt_bboxes) - pos_inds = sampling_result.pos_inds - num_pos_per_img = pos_inds.size(0) - - pos_ious = assign_result.max_overlaps[pos_inds] - # IOU aware classification score - cls_target = F.one_hot(sampling_result.pos_gt_labels, - self.num_classes) * pos_ious.unsqueeze(-1) - obj_target = torch.zeros_like(objectness).unsqueeze(-1) - obj_target[pos_inds] = 1 - bbox_target = sampling_result.pos_gt_bboxes - l1_target = cls_preds.new_zeros((num_pos_per_img, 4)) - if self.use_l1: - l1_target = self._get_l1_target(l1_target, bbox_target, - priors[pos_inds]) - foreground_mask = torch.zeros_like(objectness).to(torch.bool) - foreground_mask[pos_inds] = 1 - return (foreground_mask, cls_target, obj_target, bbox_target, - l1_target, num_pos_per_img) - - def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8): - """Convert gt bboxes to center offset and log width height.""" - gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes) - l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:] - l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps) - return l1_target diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/__init__.py deleted file mode 100644 index a0a89b87ece2271f1d769413a2712a7bcf3c8620..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .atss import ATSS -from .autoassign import AutoAssign -from .base import BaseDetector -from .cascade_rcnn import CascadeRCNN -from .centernet import CenterNet -from .cornernet import CornerNet -from .ddod import DDOD -from .deformable_detr import DeformableDETR -from .detr import DETR -from .fast_rcnn import FastRCNN -from .faster_rcnn import FasterRCNN -from .fcos import FCOS -from .fovea import FOVEA -from .fsaf import FSAF -from .gfl import GFL -from .grid_rcnn import GridRCNN -from .htc import HybridTaskCascade -from .kd_one_stage import KnowledgeDistillationSingleStageDetector -from .lad import LAD -from .mask2former import Mask2Former -from .mask_rcnn import MaskRCNN -from .mask_scoring_rcnn import MaskScoringRCNN -from .maskformer import MaskFormer -from .nasfcos import NASFCOS -from .paa import PAA -from .panoptic_fpn import PanopticFPN -from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor -from .point_rend import PointRend -from .queryinst import QueryInst -from .reppoints_detector import RepPointsDetector -from .retinanet import RetinaNet -from .rpn import RPN -from .scnet import SCNet -from .single_stage import SingleStageDetector -from .solo import SOLO -from .solov2 import SOLOv2 -from .sparse_rcnn import SparseRCNN -from .tood import TOOD -from .trident_faster_rcnn import TridentFasterRCNN -from .two_stage import TwoStageDetector -from .vfnet import VFNet -from .yolact import YOLACT -from .yolo import YOLOV3 -from .yolof import YOLOF -from .yolox import YOLOX - -__all__ = [ - 'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', - 'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN', - 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS', - 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF', - 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT', - 'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO', - 'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX', - 'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD', - 'MaskFormer', 'DDOD', 'Mask2Former' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/atss.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/atss.py deleted file mode 100644 index 00f1acd9a1595ecea0fd7a19ccd63cd991130657..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/atss.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class ATSS(SingleStageDetector): - """Implementation of `ATSS `_.""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/autoassign.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/autoassign.py deleted file mode 100644 index 30ab72075807fbe565ede7e15bbf5ad1ebbec001..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/autoassign.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class AutoAssign(SingleStageDetector): - """Implementation of `AutoAssign: Differentiable Label Assignment for Dense - Object Detection `_.""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/base.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/base.py deleted file mode 100644 index bf64bce63e8174b56b168d7295cb5a7926b732d2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/base.py +++ /dev/null @@ -1,360 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod -from collections import OrderedDict - -import mmcv -import numpy as np -import torch -import torch.distributed as dist -from mmcv.runner import BaseModule, auto_fp16 - -from mmdet.core.visualization import imshow_det_bboxes - - -class BaseDetector(BaseModule, metaclass=ABCMeta): - """Base class for detectors.""" - - def __init__(self, init_cfg=None): - super(BaseDetector, self).__init__(init_cfg) - self.fp16_enabled = False - - @property - def with_neck(self): - """bool: whether the detector has a neck""" - return hasattr(self, 'neck') and self.neck is not None - - # TODO: these properties need to be carefully handled - # for both single stage & two stage detectors - @property - def with_shared_head(self): - """bool: whether the detector has a shared head in the RoI Head""" - return hasattr(self, 'roi_head') and self.roi_head.with_shared_head - - @property - def with_bbox(self): - """bool: whether the detector has a bbox head""" - return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) - or (hasattr(self, 'bbox_head') and self.bbox_head is not None)) - - @property - def with_mask(self): - """bool: whether the detector has a mask head""" - return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) - or (hasattr(self, 'mask_head') and self.mask_head is not None)) - - @abstractmethod - def extract_feat(self, imgs): - """Extract features from images.""" - pass - - def extract_feats(self, imgs): - """Extract features from multiple images. - - Args: - imgs (list[torch.Tensor]): A list of images. The images are - augmented from the same image but in different ways. - - Returns: - list[torch.Tensor]: Features of different images - """ - assert isinstance(imgs, list) - return [self.extract_feat(img) for img in imgs] - - def forward_train(self, imgs, img_metas, **kwargs): - """ - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys, see - :class:`mmdet.datasets.pipelines.Collect`. - kwargs (keyword arguments): Specific to concrete implementation. - """ - # NOTE the batched image size information may be useful, e.g. - # in DETR, this is needed for the construction of masks, which is - # then used for the transformer_head. - batch_input_shape = tuple(imgs[0].size()[-2:]) - for img_meta in img_metas: - img_meta['batch_input_shape'] = batch_input_shape - - async def async_simple_test(self, img, img_metas, **kwargs): - raise NotImplementedError - - @abstractmethod - def simple_test(self, img, img_metas, **kwargs): - pass - - @abstractmethod - def aug_test(self, imgs, img_metas, **kwargs): - """Test function with test time augmentation.""" - pass - - async def aforward_test(self, *, img, img_metas, **kwargs): - for var, name in [(img, 'img'), (img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError(f'{name} must be a list, but got {type(var)}') - - num_augs = len(img) - if num_augs != len(img_metas): - raise ValueError(f'num of augmentations ({len(img)}) ' - f'!= num of image metas ({len(img_metas)})') - # TODO: remove the restriction of samples_per_gpu == 1 when prepared - samples_per_gpu = img[0].size(0) - assert samples_per_gpu == 1 - - if num_augs == 1: - return await self.async_simple_test(img[0], img_metas[0], **kwargs) - else: - raise NotImplementedError - - def forward_test(self, imgs, img_metas, **kwargs): - """ - Args: - imgs (List[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains all images in the batch. - img_metas (List[List[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. - """ - for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError(f'{name} must be a list, but got {type(var)}') - - num_augs = len(imgs) - if num_augs != len(img_metas): - raise ValueError(f'num of augmentations ({len(imgs)}) ' - f'!= num of image meta ({len(img_metas)})') - - # NOTE the batched image size information may be useful, e.g. - # in DETR, this is needed for the construction of masks, which is - # then used for the transformer_head. - for img, img_meta in zip(imgs, img_metas): - batch_size = len(img_meta) - for img_id in range(batch_size): - img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:]) - - if num_augs == 1: - # proposals (List[List[Tensor]]): the outer list indicates - # test-time augs (multiscale, flip, etc.) and the inner list - # indicates images in a batch. - # The Tensor should have a shape Px4, where P is the number of - # proposals. - if 'proposals' in kwargs: - kwargs['proposals'] = kwargs['proposals'][0] - return self.simple_test(imgs[0], img_metas[0], **kwargs) - else: - assert imgs[0].size(0) == 1, 'aug test does not support ' \ - 'inference with batch size ' \ - f'{imgs[0].size(0)}' - # TODO: support test augmentation for predefined proposals - assert 'proposals' not in kwargs - return self.aug_test(imgs, img_metas, **kwargs) - - @auto_fp16(apply_to=('img', )) - def forward(self, img, img_metas, return_loss=True, **kwargs): - """Calls either :func:`forward_train` or :func:`forward_test` depending - on whether ``return_loss`` is ``True``. - - Note this setting will change the expected inputs. When - ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor - and List[dict]), and when ``resturn_loss=False``, img and img_meta - should be double nested (i.e. List[Tensor], List[List[dict]]), with - the outer list indicating test time augmentations. - """ - if torch.onnx.is_in_onnx_export(): - assert len(img_metas) == 1 - return self.onnx_export(img[0], img_metas[0]) - - if return_loss: - return self.forward_train(img, img_metas, **kwargs) - else: - return self.forward_test(img, img_metas, **kwargs) - - def _parse_losses(self, losses): - """Parse the raw outputs (losses) of the network. - - Args: - losses (dict): Raw output of the network, which usually contain - losses and other necessary information. - - Returns: - tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \ - which may be a weighted sum of all losses, log_vars contains \ - all the variables to be sent to the logger. - """ - log_vars = OrderedDict() - for loss_name, loss_value in losses.items(): - if isinstance(loss_value, torch.Tensor): - log_vars[loss_name] = loss_value.mean() - elif isinstance(loss_value, list): - log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) - else: - raise TypeError( - f'{loss_name} is not a tensor or list of tensors') - - loss = sum(_value for _key, _value in log_vars.items() - if 'loss' in _key) - - # If the loss_vars has different length, GPUs will wait infinitely - if dist.is_available() and dist.is_initialized(): - log_var_length = torch.tensor(len(log_vars), device=loss.device) - dist.all_reduce(log_var_length) - message = (f'rank {dist.get_rank()}' + - f' len(log_vars): {len(log_vars)}' + ' keys: ' + - ','.join(log_vars.keys())) - assert log_var_length == len(log_vars) * dist.get_world_size(), \ - 'loss log variables are different across GPUs!\n' + message - - log_vars['loss'] = loss - for loss_name, loss_value in log_vars.items(): - # reduce loss when distributed training - if dist.is_available() and dist.is_initialized(): - loss_value = loss_value.data.clone() - dist.all_reduce(loss_value.div_(dist.get_world_size())) - log_vars[loss_name] = loss_value.item() - - return loss, log_vars - - def train_step(self, data, optimizer): - """The iteration step during training. - - This method defines an iteration step during training, except for the - back propagation and optimizer updating, which are done in an optimizer - hook. Note that in some complicated cases or models, the whole process - including back propagation and optimizer updating is also defined in - this method, such as GAN. - - Args: - data (dict): The output of dataloader. - optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of - runner is passed to ``train_step()``. This argument is unused - and reserved. - - Returns: - dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \ - ``num_samples``. - - - ``loss`` is a tensor for back propagation, which can be a - weighted sum of multiple losses. - - ``log_vars`` contains all the variables to be sent to the - logger. - - ``num_samples`` indicates the batch size (when the model is - DDP, it means the batch size on each GPU), which is used for - averaging the logs. - """ - losses = self(**data) - loss, log_vars = self._parse_losses(losses) - - outputs = dict( - loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) - - return outputs - - def val_step(self, data, optimizer=None): - """The iteration step during validation. - - This method shares the same signature as :func:`train_step`, but used - during val epochs. Note that the evaluation after training epochs is - not implemented with this method, but an evaluation hook. - """ - losses = self(**data) - loss, log_vars = self._parse_losses(losses) - - outputs = dict( - loss=loss, log_vars=log_vars, num_samples=len(data['img_metas'])) - - return outputs - - def show_result(self, - img, - result, - score_thr=0.3, - bbox_color=(72, 101, 241), - text_color=(72, 101, 241), - mask_color=None, - thickness=2, - font_size=13, - win_name='', - show=False, - wait_time=0, - out_file=None): - """Draw `result` over `img`. - - Args: - img (str or Tensor): The image to be displayed. - result (Tensor or tuple): The results to draw over `img` - bbox_result or (bbox_result, segm_result). - score_thr (float, optional): Minimum score of bboxes to be shown. - Default: 0.3. - bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. - The tuple of color should be in BGR order. Default: 'green' - text_color (str or tuple(int) or :obj:`Color`):Color of texts. - The tuple of color should be in BGR order. Default: 'green' - mask_color (None or str or tuple(int) or :obj:`Color`): - Color of masks. The tuple of color should be in BGR order. - Default: None - thickness (int): Thickness of lines. Default: 2 - font_size (int): Font size of texts. Default: 13 - win_name (str): The window name. Default: '' - wait_time (float): Value of waitKey param. - Default: 0. - show (bool): Whether to show the image. - Default: False. - out_file (str or None): The filename to write the image. - Default: None. - - Returns: - img (Tensor): Only if not `show` or `out_file` - """ - img = mmcv.imread(img) - img = img.copy() - if isinstance(result, tuple): - bbox_result, segm_result = result - if isinstance(segm_result, tuple): - segm_result = segm_result[0] # ms rcnn - else: - bbox_result, segm_result = result, None - bboxes = np.vstack(bbox_result) - labels = [ - np.full(bbox.shape[0], i, dtype=np.int32) - for i, bbox in enumerate(bbox_result) - ] - labels = np.concatenate(labels) - # draw segmentation masks - segms = None - if segm_result is not None and len(labels) > 0: # non empty - segms = mmcv.concat_list(segm_result) - if isinstance(segms[0], torch.Tensor): - segms = torch.stack(segms, dim=0).detach().cpu().numpy() - else: - segms = np.stack(segms, axis=0) - # if out_file specified, do not show image in window - if out_file is not None: - show = False - # draw bounding boxes - img = imshow_det_bboxes( - img, - bboxes, - labels, - segms, - class_names=self.CLASSES, - score_thr=score_thr, - bbox_color=bbox_color, - text_color=text_color, - mask_color=mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=show, - wait_time=wait_time, - out_file=out_file) - - if not (show or out_file): - return img - - def onnx_export(self, img, img_metas): - raise NotImplementedError(f'{self.__class__.__name__} does ' - f'not support ONNX EXPORT') diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/cascade_rcnn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/cascade_rcnn.py deleted file mode 100644 index d8c738271d1c8bdc374a6deeab19902ad8d74b38..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/cascade_rcnn.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class CascadeRCNN(TwoStageDetector): - r"""Implementation of `Cascade R-CNN: Delving into High Quality Object - Detection `_""" - - def __init__(self, - backbone, - neck=None, - rpn_head=None, - roi_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(CascadeRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) - - def show_result(self, data, result, **kwargs): - """Show prediction results of the detector. - - Args: - data (str or np.ndarray): Image filename or loaded image. - result (Tensor or tuple): The results to draw over `img` - bbox_result or (bbox_result, segm_result). - - Returns: - np.ndarray: The image with bboxes drawn on it. - """ - if self.with_mask: - ms_bbox_result, ms_segm_result = result - if isinstance(ms_bbox_result, dict): - result = (ms_bbox_result['ensemble'], - ms_segm_result['ensemble']) - else: - if isinstance(result, dict): - result = result['ensemble'] - return super(CascadeRCNN, self).show_result(data, result, **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/centernet.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/centernet.py deleted file mode 100644 index e1e3fd3ccd4d49832f7450ed359a0bbea13bf631..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/centernet.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core import bbox2result -from mmdet.models.builder import DETECTORS -from ...core.utils import flip_tensor -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class CenterNet(SingleStageDetector): - """Implementation of CenterNet(Objects as Points) - - . - """ - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - - def merge_aug_results(self, aug_results, with_nms): - """Merge augmented detection bboxes and score. - - Args: - aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each - image. - with_nms (bool): If True, do nms before return boxes. - - Returns: - tuple: (out_bboxes, out_labels) - """ - recovered_bboxes, aug_labels = [], [] - for single_result in aug_results: - recovered_bboxes.append(single_result[0][0]) - aug_labels.append(single_result[0][1]) - - bboxes = torch.cat(recovered_bboxes, dim=0).contiguous() - labels = torch.cat(aug_labels).contiguous() - if with_nms: - out_bboxes, out_labels = self.bbox_head._bboxes_nms( - bboxes, labels, self.bbox_head.test_cfg) - else: - out_bboxes, out_labels = bboxes, labels - - return out_bboxes, out_labels - - def aug_test(self, imgs, img_metas, rescale=True): - """Augment testing of CenterNet. Aug test must have flipped image pair, - and unlike CornerNet, it will perform an averaging operation on the - feature map instead of detecting bbox. - - Args: - imgs (list[Tensor]): Augmented images. - img_metas (list[list[dict]]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: True. - - Note: - ``imgs`` must including flipped image pairs. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - img_inds = list(range(len(imgs))) - assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], ( - 'aug test must have flipped image pair') - aug_results = [] - for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]): - flip_direction = img_metas[flip_ind][0]['flip_direction'] - img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) - x = self.extract_feat(img_pair) - center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x) - assert len(center_heatmap_preds) == len(wh_preds) == len( - offset_preds) == 1 - - # Feature map averaging - center_heatmap_preds[0] = ( - center_heatmap_preds[0][0:1] + - flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2 - wh_preds[0] = (wh_preds[0][0:1] + - flip_tensor(wh_preds[0][1:2], flip_direction)) / 2 - - bbox_list = self.bbox_head.get_bboxes( - center_heatmap_preds, - wh_preds, [offset_preds[0][0:1]], - img_metas[ind], - rescale=rescale, - with_nms=False) - aug_results.append(bbox_list) - - nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None) - if nms_cfg is None: - with_nms = False - else: - with_nms = True - bbox_list = [self.merge_aug_results(aug_results, with_nms)] - bbox_results = [ - bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) - for det_bboxes, det_labels in bbox_list - ] - return bbox_results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/cornernet.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/cornernet.py deleted file mode 100644 index ce921cc3b38e81c1629abeea0cd4e3b317bf7a83..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/cornernet.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core import bbox2result, bbox_mapping_back -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class CornerNet(SingleStageDetector): - """CornerNet. - - This detector is the implementation of the paper `CornerNet: Detecting - Objects as Paired Keypoints `_ . - """ - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - - def merge_aug_results(self, aug_results, img_metas): - """Merge augmented detection bboxes and score. - - Args: - aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each - image. - img_metas (list[list[dict]]): Meta information of each image, e.g., - image size, scaling factor, etc. - - Returns: - tuple: (bboxes, labels) - """ - recovered_bboxes, aug_labels = [], [] - for bboxes_labels, img_info in zip(aug_results, img_metas): - img_shape = img_info[0]['img_shape'] # using shape before padding - scale_factor = img_info[0]['scale_factor'] - flip = img_info[0]['flip'] - bboxes, labels = bboxes_labels - bboxes, scores = bboxes[:, :4], bboxes[:, -1:] - bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) - recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1)) - aug_labels.append(labels) - - bboxes = torch.cat(recovered_bboxes, dim=0) - labels = torch.cat(aug_labels) - - if bboxes.shape[0] > 0: - out_bboxes, out_labels = self.bbox_head._bboxes_nms( - bboxes, labels, self.bbox_head.test_cfg) - else: - out_bboxes, out_labels = bboxes, labels - - return out_bboxes, out_labels - - def aug_test(self, imgs, img_metas, rescale=False): - """Augment testing of CornerNet. - - Args: - imgs (list[Tensor]): Augmented images. - img_metas (list[list[dict]]): Meta information of each image, e.g., - image size, scaling factor, etc. - rescale (bool): If True, return boxes in original image space. - Default: False. - - Note: - ``imgs`` must including flipped image pairs. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - img_inds = list(range(len(imgs))) - - assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], ( - 'aug test must have flipped image pair') - aug_results = [] - for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]): - img_pair = torch.cat([imgs[ind], imgs[flip_ind]]) - x = self.extract_feat(img_pair) - outs = self.bbox_head(x) - bbox_list = self.bbox_head.get_bboxes( - *outs, [img_metas[ind], img_metas[flip_ind]], False, False) - aug_results.append(bbox_list[0]) - aug_results.append(bbox_list[1]) - - bboxes, labels = self.merge_aug_results(aug_results, img_metas) - bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes) - - return [bbox_results] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/ddod.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/ddod.py deleted file mode 100644 index 2ae0a74172ecca07aa8fad399425b19b4ce63eab..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/ddod.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class DDOD(SingleStageDetector): - """Implementation of `DDOD `_.""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(DDOD, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/deformable_detr.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/deformable_detr.py deleted file mode 100644 index b1f164221d2f6ac21448eeb04d685d93f7b86853..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/deformable_detr.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .detr import DETR - - -@DETECTORS.register_module() -class DeformableDETR(DETR): - - def __init__(self, *args, **kwargs): - super(DETR, self).__init__(*args, **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/detr.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/detr.py deleted file mode 100644 index 06d76913be64b98e3a497c043cf71c7d2d4491ae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/detr.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch - -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class DETR(SingleStageDetector): - r"""Implementation of `DETR: End-to-End Object Detection with - Transformers `_""" - - def __init__(self, - backbone, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(DETR, self).__init__(backbone, None, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - - # over-write `forward_dummy` because: - # the forward of bbox_head requires img_metas - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - warnings.warn('Warning! MultiheadAttention in DETR does not ' - 'support flops computation! Do not use the ' - 'results in your papers!') - - batch_size, _, height, width = img.shape - dummy_img_metas = [ - dict( - batch_input_shape=(height, width), - img_shape=(height, width, 3)) for _ in range(batch_size) - ] - x = self.extract_feat(img) - outs = self.bbox_head(x, dummy_img_metas) - return outs - - # over-write `onnx_export` because: - # (1) the forward of bbox_head requires img_metas - # (2) the different behavior (e.g. construction of `masks`) between - # torch and ONNX model, during the forward of bbox_head - def onnx_export(self, img, img_metas): - """Test function for exporting to ONNX, without test time augmentation. - - Args: - img (torch.Tensor): input images. - img_metas (list[dict]): List of image information. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - x = self.extract_feat(img) - # forward of this head requires img_metas - outs = self.bbox_head.forward_onnx(x, img_metas) - # get shape as tensor - img_shape = torch._shape_as_tensor(img)[2:] - img_metas[0]['img_shape_for_onnx'] = img_shape - - det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) - - return det_bboxes, det_labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/fast_rcnn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/fast_rcnn.py deleted file mode 100644 index 7aebe151feb22354573b7b06675e15be3f610fe6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/fast_rcnn.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class FastRCNN(TwoStageDetector): - """Implementation of `Fast R-CNN `_""" - - def __init__(self, - backbone, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - super(FastRCNN, self).__init__( - backbone=backbone, - neck=neck, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) - - def forward_test(self, imgs, img_metas, proposals, **kwargs): - """ - Args: - imgs (List[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains all images in the batch. - img_metas (List[List[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. - proposals (List[List[Tensor]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. The Tensor should have a shape Px4, where - P is the number of proposals. - """ - for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError(f'{name} must be a list, but got {type(var)}') - - num_augs = len(imgs) - if num_augs != len(img_metas): - raise ValueError(f'num of augmentations ({len(imgs)}) ' - f'!= num of image meta ({len(img_metas)})') - - if num_augs == 1: - return self.simple_test(imgs[0], img_metas[0], proposals[0], - **kwargs) - else: - # TODO: support test-time augmentation - assert NotImplementedError diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/faster_rcnn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/faster_rcnn.py deleted file mode 100644 index 70fb662f1705997be8d899f4760ab9a3aafec18d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/faster_rcnn.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class FasterRCNN(TwoStageDetector): - """Implementation of `Faster R-CNN `_""" - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - super(FasterRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/fcos.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/fcos.py deleted file mode 100644 index d985bd02d7ca5c13e86dfdb9a7a5ed9b29d890cc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/fcos.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class FCOS(SingleStageDetector): - """Implementation of `FCOS `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/fovea.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/fovea.py deleted file mode 100644 index 6fd908c7e1795f3f216481d7a3f6975e710a33b5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/fovea.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class FOVEA(SingleStageDetector): - """Implementation of `FoveaBox `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/fsaf.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/fsaf.py deleted file mode 100644 index 81ed1bdef1a8957077788397422725c83e3ffed2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/fsaf.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class FSAF(SingleStageDetector): - """Implementation of `FSAF `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/gfl.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/gfl.py deleted file mode 100644 index 4628e2e7c929bb7195ef51f741da9ca66bf9c3d8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/gfl.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class GFL(SingleStageDetector): - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/grid_rcnn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/grid_rcnn.py deleted file mode 100644 index bba7873bcf3df1ca82f471a86cce5a3f15ccf724..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/grid_rcnn.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class GridRCNN(TwoStageDetector): - """Grid R-CNN. - - This detector is the implementation of: - - Grid R-CNN (https://arxiv.org/abs/1811.12030) - - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) - """ - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - super(GridRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/htc.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/htc.py deleted file mode 100644 index f7c95338a78fad03ffa7db3a479865a416d0d70c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/htc.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .cascade_rcnn import CascadeRCNN - - -@DETECTORS.register_module() -class HybridTaskCascade(CascadeRCNN): - """Implementation of `HTC `_""" - - def __init__(self, **kwargs): - super(HybridTaskCascade, self).__init__(**kwargs) - - @property - def with_semantic(self): - """bool: whether the detector has a semantic head""" - return self.roi_head.with_semantic diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/kd_one_stage.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/kd_one_stage.py deleted file mode 100644 index fb66b5152cdeb1dd9698cff011108de3f3f12ac2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/kd_one_stage.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from pathlib import Path - -import mmcv -import torch -from mmcv.runner import load_checkpoint - -from .. import build_detector -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class KnowledgeDistillationSingleStageDetector(SingleStageDetector): - r"""Implementation of `Distilling the Knowledge in a Neural Network. - `_. - - Args: - teacher_config (str | dict): Config file path - or the config object of teacher model. - teacher_ckpt (str, optional): Checkpoint path of teacher model. - If left as None, the model will not load any weights. - """ - - def __init__(self, - backbone, - neck, - bbox_head, - teacher_config, - teacher_ckpt=None, - eval_teacher=True, - train_cfg=None, - test_cfg=None, - pretrained=None): - super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg, - pretrained) - self.eval_teacher = eval_teacher - # Build teacher model - if isinstance(teacher_config, (str, Path)): - teacher_config = mmcv.Config.fromfile(teacher_config) - self.teacher_model = build_detector(teacher_config['model']) - if teacher_ckpt is not None: - load_checkpoint( - self.teacher_model, teacher_ckpt, map_location='cpu') - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None): - """ - Args: - img (Tensor): Input images of shape (N, C, H, W). - Typically these should be mean centered and std scaled. - img_metas (list[dict]): A List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_bboxes (list[Tensor]): Each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): Class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): Specify which bounding - boxes can be ignored when computing the loss. - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - x = self.extract_feat(img) - with torch.no_grad(): - teacher_x = self.teacher_model.extract_feat(img) - out_teacher = self.teacher_model.bbox_head(teacher_x) - losses = self.bbox_head.forward_train(x, out_teacher, img_metas, - gt_bboxes, gt_labels, - gt_bboxes_ignore) - return losses - - def cuda(self, device=None): - """Since teacher_model is registered as a plain object, it is necessary - to put the teacher model to cuda when calling cuda function.""" - self.teacher_model.cuda(device=device) - return super().cuda(device=device) - - def train(self, mode=True): - """Set the same train mode for teacher and student model.""" - if self.eval_teacher: - self.teacher_model.train(False) - else: - self.teacher_model.train(mode) - super().train(mode) - - def __setattr__(self, name, value): - """Set attribute, i.e. self.name = value - - This reloading prevent the teacher model from being registered as a - nn.Module. The teacher module is registered as a plain object, so that - the teacher parameters will not show up when calling - ``self.parameters``, ``self.modules``, ``self.children`` methods. - """ - if name == 'teacher_model': - object.__setattr__(self, name, value) - else: - super().__setattr__(name, value) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/lad.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/lad.py deleted file mode 100644 index c6cc1e0b2d9fd91dabc606da5192522e908ccebf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/lad.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.runner import load_checkpoint - -from ..builder import DETECTORS, build_backbone, build_head, build_neck -from .kd_one_stage import KnowledgeDistillationSingleStageDetector - - -@DETECTORS.register_module() -class LAD(KnowledgeDistillationSingleStageDetector): - """Implementation of `LAD `_.""" - - def __init__(self, - backbone, - neck, - bbox_head, - teacher_backbone, - teacher_neck, - teacher_bbox_head, - teacher_ckpt, - eval_teacher=True, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(KnowledgeDistillationSingleStageDetector, - self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, - pretrained) - self.eval_teacher = eval_teacher - self.teacher_model = nn.Module() - self.teacher_model.backbone = build_backbone(teacher_backbone) - if teacher_neck is not None: - self.teacher_model.neck = build_neck(teacher_neck) - teacher_bbox_head.update(train_cfg=train_cfg) - teacher_bbox_head.update(test_cfg=test_cfg) - self.teacher_model.bbox_head = build_head(teacher_bbox_head) - if teacher_ckpt is not None: - load_checkpoint( - self.teacher_model, teacher_ckpt, map_location='cpu') - - @property - def with_teacher_neck(self): - """bool: whether the detector has a teacher_neck""" - return hasattr(self.teacher_model, 'neck') and \ - self.teacher_model.neck is not None - - def extract_teacher_feat(self, img): - """Directly extract teacher features from the backbone+neck.""" - x = self.teacher_model.backbone(img) - if self.with_teacher_neck: - x = self.teacher_model.neck(x) - return x - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None): - """ - Args: - img (Tensor): Input images of shape (N, C, H, W). - Typically these should be mean centered and std scaled. - img_metas (list[dict]): A List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_bboxes (list[Tensor]): Each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): Class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): Specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - # get label assignment from the teacher - with torch.no_grad(): - x_teacher = self.extract_teacher_feat(img) - outs_teacher = self.teacher_model.bbox_head(x_teacher) - label_assignment_results = \ - self.teacher_model.bbox_head.get_label_assignment( - *outs_teacher, gt_bboxes, gt_labels, img_metas, - gt_bboxes_ignore) - - # the student use the label assignment from the teacher to learn - x = self.extract_feat(img) - losses = self.bbox_head.forward_train(x, label_assignment_results, - img_metas, gt_bboxes, gt_labels, - gt_bboxes_ignore) - return losses diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask2former.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask2former.py deleted file mode 100644 index b9ad2ed25d30072aeb8ec99e4a865c9cad092444..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask2former.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .maskformer import MaskFormer - - -@DETECTORS.register_module() -class Mask2Former(MaskFormer): - r"""Implementation of `Masked-attention Mask - Transformer for Universal Image Segmentation - `_.""" - - def __init__(self, - backbone, - neck=None, - panoptic_head=None, - panoptic_fusion_head=None, - train_cfg=None, - test_cfg=None, - init_cfg=None): - super().__init__( - backbone, - neck=neck, - panoptic_head=panoptic_head, - panoptic_fusion_head=panoptic_fusion_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - init_cfg=init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask_rcnn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask_rcnn.py deleted file mode 100644 index c68489f9c22e112ceae9c265e916cc3c1a6ae301..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask_rcnn.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class MaskRCNN(TwoStageDetector): - """Implementation of `Mask R-CNN `_""" - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - super(MaskRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask_scoring_rcnn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask_scoring_rcnn.py deleted file mode 100644 index 5f55656f3043564c7f974739c764180c9230738b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/mask_scoring_rcnn.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class MaskScoringRCNN(TwoStageDetector): - """Mask Scoring RCNN. - - https://arxiv.org/abs/1903.00241 - """ - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - super(MaskScoringRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/maskformer.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/maskformer.py deleted file mode 100644 index 3d251adad139997d28827e3ad7ed79a48bcce8bb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/maskformer.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy - -import mmcv -import numpy as np - -from mmdet.core import INSTANCE_OFFSET, bbox2result -from mmdet.core.visualization import imshow_det_bboxes -from ..builder import DETECTORS, build_backbone, build_head, build_neck -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class MaskFormer(SingleStageDetector): - r"""Implementation of `Per-Pixel Classification is - NOT All You Need for Semantic Segmentation - `_.""" - - def __init__(self, - backbone, - neck=None, - panoptic_head=None, - panoptic_fusion_head=None, - train_cfg=None, - test_cfg=None, - init_cfg=None): - super(SingleStageDetector, self).__init__(init_cfg=init_cfg) - self.backbone = build_backbone(backbone) - if neck is not None: - self.neck = build_neck(neck) - - panoptic_head_ = copy.deepcopy(panoptic_head) - panoptic_head_.update(train_cfg=train_cfg) - panoptic_head_.update(test_cfg=test_cfg) - self.panoptic_head = build_head(panoptic_head_) - - panoptic_fusion_head_ = copy.deepcopy(panoptic_fusion_head) - panoptic_fusion_head_.update(test_cfg=test_cfg) - self.panoptic_fusion_head = build_head(panoptic_fusion_head_) - - self.num_things_classes = self.panoptic_head.num_things_classes - self.num_stuff_classes = self.panoptic_head.num_stuff_classes - self.num_classes = self.panoptic_head.num_classes - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - # BaseDetector.show_result default for instance segmentation - if self.num_stuff_classes > 0: - self.show_result = self._show_pan_result - - def forward_dummy(self, img, img_metas): - """Used for computing network flops. See - `mmdetection/tools/analysis_tools/get_flops.py` - - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[Dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - """ - super(SingleStageDetector, self).forward_train(img, img_metas) - x = self.extract_feat(img) - outs = self.panoptic_head(x, img_metas) - return outs - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_masks, - gt_semantic_seg=None, - gt_bboxes_ignore=None, - **kargs): - """ - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[Dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box. - gt_masks (list[BitmapMasks]): true segmentation masks for each box - used if the architecture supports a segmentation task. - gt_semantic_seg (list[tensor]): semantic segmentation mask for - images for panoptic segmentation. - Defaults to None for instance segmentation. - gt_bboxes_ignore (list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - Defaults to None. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # add batch_input_shape in img_metas - super(SingleStageDetector, self).forward_train(img, img_metas) - x = self.extract_feat(img) - losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes, - gt_labels, gt_masks, - gt_semantic_seg, - gt_bboxes_ignore) - - return losses - - def simple_test(self, imgs, img_metas, **kwargs): - """Test without augmentation. - - Args: - imgs (Tensor): A batch of images. - img_metas (list[dict]): List of image information. - - Returns: - list[dict[str, np.array | tuple[list]] | tuple[list]]: - Semantic segmentation results and panoptic segmentation \ - results of each image for panoptic segmentation, or formatted \ - bbox and mask results of each image for instance segmentation. - - .. code-block:: none - - [ - # panoptic segmentation - { - 'pan_results': np.array, # shape = [h, w] - 'ins_results': tuple[list], - # semantic segmentation results are not supported yet - 'sem_results': np.array - }, - ... - ] - - or - - .. code-block:: none - - [ - # instance segmentation - ( - bboxes, # list[np.array] - masks # list[list[np.array]] - ), - ... - ] - """ - feats = self.extract_feat(imgs) - mask_cls_results, mask_pred_results = self.panoptic_head.simple_test( - feats, img_metas, **kwargs) - results = self.panoptic_fusion_head.simple_test( - mask_cls_results, mask_pred_results, img_metas, **kwargs) - for i in range(len(results)): - if 'pan_results' in results[i]: - results[i]['pan_results'] = results[i]['pan_results'].detach( - ).cpu().numpy() - - if 'ins_results' in results[i]: - labels_per_image, bboxes, mask_pred_binary = results[i][ - 'ins_results'] - bbox_results = bbox2result(bboxes, labels_per_image, - self.num_things_classes) - mask_results = [[] for _ in range(self.num_things_classes)] - for j, label in enumerate(labels_per_image): - mask = mask_pred_binary[j].detach().cpu().numpy() - mask_results[label].append(mask) - results[i]['ins_results'] = bbox_results, mask_results - - assert 'sem_results' not in results[i], 'segmantic segmentation '\ - 'results are not supported yet.' - - if self.num_stuff_classes == 0: - results = [res['ins_results'] for res in results] - - return results - - def aug_test(self, imgs, img_metas, **kwargs): - raise NotImplementedError - - def onnx_export(self, img, img_metas): - raise NotImplementedError - - def _show_pan_result(self, - img, - result, - score_thr=0.3, - bbox_color=(72, 101, 241), - text_color=(72, 101, 241), - mask_color=None, - thickness=2, - font_size=13, - win_name='', - show=False, - wait_time=0, - out_file=None): - """Draw `panoptic result` over `img`. - - Args: - img (str or Tensor): The image to be displayed. - result (dict): The results. - - score_thr (float, optional): Minimum score of bboxes to be shown. - Default: 0.3. - bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. - The tuple of color should be in BGR order. Default: 'green'. - text_color (str or tuple(int) or :obj:`Color`):Color of texts. - The tuple of color should be in BGR order. Default: 'green'. - mask_color (None or str or tuple(int) or :obj:`Color`): - Color of masks. The tuple of color should be in BGR order. - Default: None. - thickness (int): Thickness of lines. Default: 2. - font_size (int): Font size of texts. Default: 13. - win_name (str): The window name. Default: ''. - wait_time (float): Value of waitKey param. - Default: 0. - show (bool): Whether to show the image. - Default: False. - out_file (str or None): The filename to write the image. - Default: None. - - Returns: - img (Tensor): Only if not `show` or `out_file`. - """ - img = mmcv.imread(img) - img = img.copy() - pan_results = result['pan_results'] - # keep objects ahead - ids = np.unique(pan_results)[::-1] - legal_indices = ids != self.num_classes # for VOID label - ids = ids[legal_indices] - labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) - segms = (pan_results[None] == ids[:, None, None]) - - # if out_file specified, do not show image in window - if out_file is not None: - show = False - # draw bounding boxes - img = imshow_det_bboxes( - img, - segms=segms, - labels=labels, - class_names=self.CLASSES, - bbox_color=bbox_color, - text_color=text_color, - mask_color=mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=show, - wait_time=wait_time, - out_file=out_file) - - if not (show or out_file): - return img diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/nasfcos.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/nasfcos.py deleted file mode 100644 index a34c2280f59f93139e716b54ef1799fc0941149f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/nasfcos.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class NASFCOS(SingleStageDetector): - """NAS-FCOS: Fast Neural Architecture Search for Object Detection. - - https://arxiv.org/abs/1906.0442 - """ - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/paa.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/paa.py deleted file mode 100644 index f5cb8372a02e84fc1405c05cd814e8109bc19d20..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/paa.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class PAA(SingleStageDetector): - """Implementation of `PAA `_.""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/panoptic_fpn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/panoptic_fpn.py deleted file mode 100644 index f8ac751fad188a85a75a87678ee76693c5609df2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/panoptic_fpn.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor - - -@DETECTORS.register_module() -class PanopticFPN(TwoStagePanopticSegmentor): - r"""Implementation of `Panoptic feature pyramid - networks `_""" - - def __init__( - self, - backbone, - neck=None, - rpn_head=None, - roi_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None, - # for panoptic segmentation - semantic_head=None, - panoptic_fusion_head=None): - super(PanopticFPN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg, - semantic_head=semantic_head, - panoptic_fusion_head=panoptic_fusion_head) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/panoptic_two_stage_segmentor.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/panoptic_two_stage_segmentor.py deleted file mode 100644 index 5ad49bac705a677d1656cf95d2686fd83d2b1b47..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/panoptic_two_stage_segmentor.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np -import torch - -from mmdet.core import INSTANCE_OFFSET, bbox2roi, multiclass_nms -from mmdet.core.visualization import imshow_det_bboxes -from ..builder import DETECTORS, build_head -from ..roi_heads.mask_heads.fcn_mask_head import _do_paste_mask -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class TwoStagePanopticSegmentor(TwoStageDetector): - """Base class of Two-stage Panoptic Segmentor. - - As well as the components in TwoStageDetector, Panoptic Segmentor has extra - semantic_head and panoptic_fusion_head. - """ - - def __init__( - self, - backbone, - neck=None, - rpn_head=None, - roi_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None, - # for panoptic segmentation - semantic_head=None, - panoptic_fusion_head=None): - super(TwoStagePanopticSegmentor, - self).__init__(backbone, neck, rpn_head, roi_head, train_cfg, - test_cfg, pretrained, init_cfg) - if semantic_head is not None: - self.semantic_head = build_head(semantic_head) - if panoptic_fusion_head is not None: - panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None - panoptic_fusion_head_ = panoptic_fusion_head.deepcopy() - panoptic_fusion_head_.update(test_cfg=panoptic_cfg) - self.panoptic_fusion_head = build_head(panoptic_fusion_head_) - - self.num_things_classes = self.panoptic_fusion_head.\ - num_things_classes - self.num_stuff_classes = self.panoptic_fusion_head.\ - num_stuff_classes - self.num_classes = self.panoptic_fusion_head.num_classes - - @property - def with_semantic_head(self): - return hasattr(self, - 'semantic_head') and self.semantic_head is not None - - @property - def with_panoptic_fusion_head(self): - return hasattr(self, 'panoptic_fusion_heads') and \ - self.panoptic_fusion_head is not None - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/get_flops.py` - """ - raise NotImplementedError( - f'`forward_dummy` is not implemented in {self.__class__.__name__}') - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - gt_semantic_seg=None, - proposals=None, - **kwargs): - x = self.extract_feat(img) - losses = dict() - - # RPN forward and loss - if self.with_rpn: - proposal_cfg = self.train_cfg.get('rpn_proposal', - self.test_cfg.rpn) - rpn_losses, proposal_list = self.rpn_head.forward_train( - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=gt_bboxes_ignore, - proposal_cfg=proposal_cfg) - losses.update(rpn_losses) - else: - proposal_list = proposals - - roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, - gt_bboxes, gt_labels, - gt_bboxes_ignore, gt_masks, - **kwargs) - losses.update(roi_losses) - - semantic_loss = self.semantic_head.forward_train(x, gt_semantic_seg) - losses.update(semantic_loss) - - return losses - - def simple_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False): - """Simple test for mask head without augmentation.""" - img_shapes = tuple(meta['ori_shape'] - for meta in img_metas) if rescale else tuple( - meta['pad_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - masks = [] - for img_shape in img_shapes: - out_shape = (0, self.roi_head.bbox_head.num_classes) \ - + img_shape[:2] - masks.append(det_bboxes[0].new_zeros(out_shape)) - mask_pred = det_bboxes[0].new_zeros((0, 80, 28, 28)) - mask_results = dict( - masks=masks, mask_pred=mask_pred, mask_feats=None) - return mask_results - - _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))] - if rescale: - if not isinstance(scale_factors[0], float): - scale_factors = [ - det_bboxes[0].new_tensor(scale_factor) - for scale_factor in scale_factors - ] - _bboxes = [ - _bboxes[i] * scale_factors[i] for i in range(len(_bboxes)) - ] - - mask_rois = bbox2roi(_bboxes) - mask_results = self.roi_head._mask_forward(x, mask_rois) - mask_pred = mask_results['mask_pred'] - # split batch mask prediction back to each image - num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] - mask_preds = mask_pred.split(num_mask_roi_per_img, 0) - - # resize the mask_preds to (K, H, W) - masks = [] - for i in range(len(_bboxes)): - det_bbox = det_bboxes[i][:, :4] - det_label = det_labels[i] - - mask_pred = mask_preds[i].sigmoid() - - box_inds = torch.arange(mask_pred.shape[0]) - mask_pred = mask_pred[box_inds, det_label][:, None] - - img_h, img_w, _ = img_shapes[i] - mask_pred, _ = _do_paste_mask( - mask_pred, det_bbox, img_h, img_w, skip_empty=False) - masks.append(mask_pred) - - mask_results['masks'] = masks - - return mask_results - - def simple_test(self, img, img_metas, proposals=None, rescale=False): - """Test without Augmentation.""" - x = self.extract_feat(img) - - if proposals is None: - proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) - else: - proposal_list = proposals - - bboxes, scores = self.roi_head.simple_test_bboxes( - x, img_metas, proposal_list, None, rescale=rescale) - - pan_cfg = self.test_cfg.panoptic - # class-wise predictions - det_bboxes = [] - det_labels = [] - for bboxe, score in zip(bboxes, scores): - det_bbox, det_label = multiclass_nms(bboxe, score, - pan_cfg.score_thr, - pan_cfg.nms, - pan_cfg.max_per_img) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - - mask_results = self.simple_test_mask( - x, img_metas, det_bboxes, det_labels, rescale=rescale) - masks = mask_results['masks'] - - seg_preds = self.semantic_head.simple_test(x, img_metas, rescale) - - results = [] - for i in range(len(det_bboxes)): - pan_results = self.panoptic_fusion_head.simple_test( - det_bboxes[i], det_labels[i], masks[i], seg_preds[i]) - pan_results = pan_results.int().detach().cpu().numpy() - result = dict(pan_results=pan_results) - results.append(result) - return results - - def show_result(self, - img, - result, - score_thr=0.3, - bbox_color=(72, 101, 241), - text_color=(72, 101, 241), - mask_color=None, - thickness=2, - font_size=13, - win_name='', - show=False, - wait_time=0, - out_file=None): - """Draw `result` over `img`. - - Args: - img (str or Tensor): The image to be displayed. - result (dict): The results. - - score_thr (float, optional): Minimum score of bboxes to be shown. - Default: 0.3. - bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. - The tuple of color should be in BGR order. Default: 'green'. - text_color (str or tuple(int) or :obj:`Color`):Color of texts. - The tuple of color should be in BGR order. Default: 'green'. - mask_color (None or str or tuple(int) or :obj:`Color`): - Color of masks. The tuple of color should be in BGR order. - Default: None. - thickness (int): Thickness of lines. Default: 2. - font_size (int): Font size of texts. Default: 13. - win_name (str): The window name. Default: ''. - wait_time (float): Value of waitKey param. - Default: 0. - show (bool): Whether to show the image. - Default: False. - out_file (str or None): The filename to write the image. - Default: None. - - Returns: - img (Tensor): Only if not `show` or `out_file`. - """ - img = mmcv.imread(img) - img = img.copy() - pan_results = result['pan_results'] - # keep objects ahead - ids = np.unique(pan_results)[::-1] - legal_indices = ids != self.num_classes # for VOID label - ids = ids[legal_indices] - labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) - segms = (pan_results[None] == ids[:, None, None]) - - # if out_file specified, do not show image in window - if out_file is not None: - show = False - # draw bounding boxes - img = imshow_det_bboxes( - img, - segms=segms, - labels=labels, - class_names=self.CLASSES, - bbox_color=bbox_color, - text_color=text_color, - mask_color=mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=show, - wait_time=wait_time, - out_file=out_file) - - if not (show or out_file): - return img diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/point_rend.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/point_rend.py deleted file mode 100644 index 90eb4d40eb179e41cfe0cc2772c9120c093b3d93..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/point_rend.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class PointRend(TwoStageDetector): - """PointRend: Image Segmentation as Rendering - - This detector is the implementation of - `PointRend `_. - - """ - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - super(PointRend, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/queryinst.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/queryinst.py deleted file mode 100644 index 5fc216c47340fc79344c8eae908b1ec45da2b2b2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/queryinst.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .sparse_rcnn import SparseRCNN - - -@DETECTORS.register_module() -class QueryInst(SparseRCNN): - r"""Implementation of - `Instances as Queries `_""" - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - super(QueryInst, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/reppoints_detector.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/reppoints_detector.py deleted file mode 100644 index f1986cdccf3da96cd179f6bfe9f4f16ff54c411e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/reppoints_detector.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class RepPointsDetector(SingleStageDetector): - """RepPoints: Point Set Representation for Object Detection. - - This detector is the implementation of: - - RepPoints detector (https://arxiv.org/pdf/1904.11490) - """ - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(RepPointsDetector, - self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, - pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/retinanet.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/retinanet.py deleted file mode 100644 index c28545abb011fa838c56d04fc2583428d61a42f8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/retinanet.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class RetinaNet(SingleStageDetector): - """Implementation of `RetinaNet `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/rpn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/rpn.py deleted file mode 100644 index 6ec326b751ad26bd7247e9ba8605d785389b91af..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/rpn.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import mmcv -import torch -from mmcv.image import tensor2imgs - -from mmdet.core import bbox_mapping -from ..builder import DETECTORS, build_backbone, build_head, build_neck -from .base import BaseDetector - - -@DETECTORS.register_module() -class RPN(BaseDetector): - """Implementation of Region Proposal Network.""" - - def __init__(self, - backbone, - neck, - rpn_head, - train_cfg, - test_cfg, - pretrained=None, - init_cfg=None): - super(RPN, self).__init__(init_cfg) - if pretrained: - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - backbone.pretrained = pretrained - self.backbone = build_backbone(backbone) - self.neck = build_neck(neck) if neck is not None else None - rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None - rpn_head.update(train_cfg=rpn_train_cfg) - rpn_head.update(test_cfg=test_cfg.rpn) - self.rpn_head = build_head(rpn_head) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - def extract_feat(self, img): - """Extract features. - - Args: - img (torch.Tensor): Image tensor with shape (n, c, h ,w). - - Returns: - list[torch.Tensor]: Multi-level features that may have - different resolutions. - """ - x = self.backbone(img) - if self.with_neck: - x = self.neck(x) - return x - - def forward_dummy(self, img): - """Dummy forward function.""" - x = self.extract_feat(img) - rpn_outs = self.rpn_head(x) - return rpn_outs - - def forward_train(self, - img, - img_metas, - gt_bboxes=None, - gt_bboxes_ignore=None): - """ - Args: - img (Tensor): Input images of shape (N, C, H, W). - Typically these should be mean centered and std scaled. - img_metas (list[dict]): A List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_bboxes (list[Tensor]): Each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_bboxes_ignore (None | list[Tensor]): Specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - if (isinstance(self.train_cfg.rpn, dict) - and self.train_cfg.rpn.get('debug', False)): - self.rpn_head.debug_imgs = tensor2imgs(img) - - x = self.extract_feat(img) - losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None, - gt_bboxes_ignore) - return losses - - def simple_test(self, img, img_metas, rescale=False): - """Test function without test time augmentation. - - Args: - imgs (list[torch.Tensor]): List of multiple images - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[np.ndarray]: proposals - """ - x = self.extract_feat(img) - # get origin input shape to onnx dynamic input shape - if torch.onnx.is_in_onnx_export(): - img_shape = torch._shape_as_tensor(img)[2:] - img_metas[0]['img_shape_for_onnx'] = img_shape - proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) - if rescale: - for proposals, meta in zip(proposal_list, img_metas): - proposals[:, :4] /= proposals.new_tensor(meta['scale_factor']) - if torch.onnx.is_in_onnx_export(): - return proposal_list - - return [proposal.cpu().numpy() for proposal in proposal_list] - - def aug_test(self, imgs, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - imgs (list[torch.Tensor]): List of multiple images - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[np.ndarray]: proposals - """ - proposal_list = self.rpn_head.aug_test_rpn( - self.extract_feats(imgs), img_metas) - if not rescale: - for proposals, img_meta in zip(proposal_list, img_metas[0]): - img_shape = img_meta['img_shape'] - scale_factor = img_meta['scale_factor'] - flip = img_meta['flip'] - flip_direction = img_meta['flip_direction'] - proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape, - scale_factor, flip, - flip_direction) - return [proposal.cpu().numpy() for proposal in proposal_list] - - def show_result(self, data, result, top_k=20, **kwargs): - """Show RPN proposals on the image. - - Args: - data (str or np.ndarray): Image filename or loaded image. - result (Tensor or tuple): The results to draw over `img` - bbox_result or (bbox_result, segm_result). - top_k (int): Plot the first k bboxes only - if set positive. Default: 20 - - Returns: - np.ndarray: The image with bboxes drawn on it. - """ - if kwargs is not None: - kwargs.pop('score_thr', None) - kwargs.pop('text_color', None) - kwargs['colors'] = kwargs.pop('bbox_color', 'green') - mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/scnet.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/scnet.py deleted file mode 100644 index a361d81c3aa62de0ff98b303cb5e0b838b8045fa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/scnet.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .cascade_rcnn import CascadeRCNN - - -@DETECTORS.register_module() -class SCNet(CascadeRCNN): - """Implementation of `SCNet `_""" - - def __init__(self, **kwargs): - super(SCNet, self).__init__(**kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/single_stage.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/single_stage.py deleted file mode 100644 index c375c72d69d21cade02f0b4bff8cb035e56f0d65..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/single_stage.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch - -from mmdet.core import bbox2result -from ..builder import DETECTORS, build_backbone, build_head, build_neck -from .base import BaseDetector - - -@DETECTORS.register_module() -class SingleStageDetector(BaseDetector): - """Base class for single-stage detectors. - - Single-stage detectors directly and densely predict bounding boxes on the - output features of the backbone+neck. - """ - - def __init__(self, - backbone, - neck=None, - bbox_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(SingleStageDetector, self).__init__(init_cfg) - if pretrained: - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - backbone.pretrained = pretrained - self.backbone = build_backbone(backbone) - if neck is not None: - self.neck = build_neck(neck) - bbox_head.update(train_cfg=train_cfg) - bbox_head.update(test_cfg=test_cfg) - self.bbox_head = build_head(bbox_head) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - def extract_feat(self, img): - """Directly extract features from the backbone+neck.""" - x = self.backbone(img) - if self.with_neck: - x = self.neck(x) - return x - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - x = self.extract_feat(img) - outs = self.bbox_head(x) - return outs - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None): - """ - Args: - img (Tensor): Input images of shape (N, C, H, W). - Typically these should be mean centered and std scaled. - img_metas (list[dict]): A List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_bboxes (list[Tensor]): Each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): Class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): Specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - super(SingleStageDetector, self).forward_train(img, img_metas) - x = self.extract_feat(img) - losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, - gt_labels, gt_bboxes_ignore) - return losses - - def simple_test(self, img, img_metas, rescale=False): - """Test function without test-time augmentation. - - Args: - img (torch.Tensor): Images with shape (N, C, H, W). - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - feat = self.extract_feat(img) - results_list = self.bbox_head.simple_test( - feat, img_metas, rescale=rescale) - bbox_results = [ - bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) - for det_bboxes, det_labels in results_list - ] - return bbox_results - - def aug_test(self, imgs, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - imgs (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - assert hasattr(self.bbox_head, 'aug_test'), \ - f'{self.bbox_head.__class__.__name__}' \ - ' does not support test-time augmentation' - - feats = self.extract_feats(imgs) - results_list = self.bbox_head.aug_test( - feats, img_metas, rescale=rescale) - bbox_results = [ - bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) - for det_bboxes, det_labels in results_list - ] - return bbox_results - - def onnx_export(self, img, img_metas, with_nms=True): - """Test function without test time augmentation. - - Args: - img (torch.Tensor): input images. - img_metas (list[dict]): List of image information. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - x = self.extract_feat(img) - outs = self.bbox_head(x) - # get origin input shape to support onnx dynamic shape - - # get shape as tensor - img_shape = torch._shape_as_tensor(img)[2:] - img_metas[0]['img_shape_for_onnx'] = img_shape - # get pad input shape to support onnx dynamic shape for exporting - # `CornerNet` and `CentripetalNet`, which 'pad_shape' is used - # for inference - img_metas[0]['pad_shape_for_onnx'] = img_shape - - if len(outs) == 2: - # add dummy score_factor - outs = (*outs, None) - # TODO Can we change to `get_bboxes` when `onnx_export` fail - det_bboxes, det_labels = self.bbox_head.onnx_export( - *outs, img_metas, with_nms=with_nms) - - return det_bboxes, det_labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/single_stage_instance_seg.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/single_stage_instance_seg.py deleted file mode 100644 index 239b669975239f9b1eebb6efa131db7978266704..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/single_stage_instance_seg.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings - -import mmcv -import numpy as np -import torch - -from mmdet.core.visualization.image import imshow_det_bboxes -from ..builder import DETECTORS, build_backbone, build_head, build_neck -from .base import BaseDetector - -INF = 1e8 - - -@DETECTORS.register_module() -class SingleStageInstanceSegmentor(BaseDetector): - """Base class for single-stage instance segmentors.""" - - def __init__(self, - backbone, - neck=None, - bbox_head=None, - mask_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - - if pretrained: - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - backbone.pretrained = pretrained - super(SingleStageInstanceSegmentor, self).__init__(init_cfg=init_cfg) - self.backbone = build_backbone(backbone) - if neck is not None: - self.neck = build_neck(neck) - else: - self.neck = None - if bbox_head is not None: - bbox_head.update(train_cfg=copy.deepcopy(train_cfg)) - bbox_head.update(test_cfg=copy.deepcopy(test_cfg)) - self.bbox_head = build_head(bbox_head) - else: - self.bbox_head = None - - assert mask_head, f'`mask_head` must ' \ - f'be implemented in {self.__class__.__name__}' - mask_head.update(train_cfg=copy.deepcopy(train_cfg)) - mask_head.update(test_cfg=copy.deepcopy(test_cfg)) - self.mask_head = build_head(mask_head) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - def extract_feat(self, img): - """Directly extract features from the backbone and neck.""" - x = self.backbone(img) - if self.with_neck: - x = self.neck(x) - return x - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - raise NotImplementedError( - f'`forward_dummy` is not implemented in {self.__class__.__name__}') - - def forward_train(self, - img, - img_metas, - gt_masks, - gt_labels, - gt_bboxes=None, - gt_bboxes_ignore=None, - **kwargs): - """ - Args: - img (Tensor): Input images of shape (B, C, H, W). - Typically these should be mean centered and std scaled. - img_metas (list[dict]): A List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_masks (list[:obj:`BitmapMasks`] | None) : The segmentation - masks for each box. - gt_labels (list[Tensor]): Class indices corresponding to each box - gt_bboxes (list[Tensor]): Each item is the truth boxes - of each image in [tl_x, tl_y, br_x, br_y] format. - Default: None. - gt_bboxes_ignore (list[Tensor] | None): Specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - - gt_masks = [ - gt_mask.to_tensor(dtype=torch.bool, device=img.device) - for gt_mask in gt_masks - ] - x = self.extract_feat(img) - losses = dict() - - # CondInst and YOLACT have bbox_head - if self.bbox_head: - # bbox_head_preds is a tuple - bbox_head_preds = self.bbox_head(x) - # positive_infos is a list of obj:`InstanceData` - # It contains the information about the positive samples - # CondInst, YOLACT - det_losses, positive_infos = self.bbox_head.loss( - *bbox_head_preds, - gt_bboxes=gt_bboxes, - gt_labels=gt_labels, - gt_masks=gt_masks, - img_metas=img_metas, - gt_bboxes_ignore=gt_bboxes_ignore, - **kwargs) - losses.update(det_losses) - else: - positive_infos = None - - mask_loss = self.mask_head.forward_train( - x, - gt_labels, - gt_masks, - img_metas, - positive_infos=positive_infos, - gt_bboxes=gt_bboxes, - gt_bboxes_ignore=gt_bboxes_ignore, - **kwargs) - # avoid loss override - assert not set(mask_loss.keys()) & set(losses.keys()) - - losses.update(mask_loss) - return losses - - def simple_test(self, img, img_metas, rescale=False): - """Test function without test-time augmentation. - - Args: - img (torch.Tensor): Images with shape (B, C, H, W). - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list(tuple): Formatted bbox and mask results of multiple \ - images. The outer list corresponds to each image. \ - Each tuple contains two type of results of single image: - - - bbox_results (list[np.ndarray]): BBox results of - single image. The list corresponds to each class. - each ndarray has a shape (N, 5), N is the number of - bboxes with this category, and last dimension - 5 arrange as (x1, y1, x2, y2, scores). - - mask_results (list[np.ndarray]): Mask results of - single image. The list corresponds to each class. - each ndarray has a shape (N, img_h, img_w), N - is the number of masks with this category. - """ - feat = self.extract_feat(img) - if self.bbox_head: - outs = self.bbox_head(feat) - # results_list is list[obj:`InstanceData`] - results_list = self.bbox_head.get_results( - *outs, img_metas=img_metas, cfg=self.test_cfg, rescale=rescale) - else: - results_list = None - - results_list = self.mask_head.simple_test( - feat, img_metas, rescale=rescale, instances_list=results_list) - - format_results_list = [] - for results in results_list: - format_results_list.append(self.format_results(results)) - - return format_results_list - - def format_results(self, results): - """Format the model predictions according to the interface with - dataset. - - Args: - results (:obj:`InstanceData`): Processed - results of single images. Usually contains - following keys. - - - scores (Tensor): Classification scores, has shape - (num_instance,) - - labels (Tensor): Has shape (num_instances,). - - masks (Tensor): Processed mask results, has - shape (num_instances, h, w). - - Returns: - tuple: Formatted bbox and mask results.. It contains two items: - - - bbox_results (list[np.ndarray]): BBox results of - single image. The list corresponds to each class. - each ndarray has a shape (N, 5), N is the number of - bboxes with this category, and last dimension - 5 arrange as (x1, y1, x2, y2, scores). - - mask_results (list[np.ndarray]): Mask results of - single image. The list corresponds to each class. - each ndarray has shape (N, img_h, img_w), N - is the number of masks with this category. - """ - data_keys = results.keys() - assert 'scores' in data_keys - assert 'labels' in data_keys - - assert 'masks' in data_keys, \ - 'results should contain ' \ - 'masks when format the results ' - mask_results = [[] for _ in range(self.mask_head.num_classes)] - - num_masks = len(results) - - if num_masks == 0: - bbox_results = [ - np.zeros((0, 5), dtype=np.float32) - for _ in range(self.mask_head.num_classes) - ] - return bbox_results, mask_results - - labels = results.labels.detach().cpu().numpy() - - if 'bboxes' not in results: - # create dummy bbox results to store the scores - results.bboxes = results.scores.new_zeros(len(results), 4) - - det_bboxes = torch.cat([results.bboxes, results.scores[:, None]], - dim=-1) - det_bboxes = det_bboxes.detach().cpu().numpy() - bbox_results = [ - det_bboxes[labels == i, :] - for i in range(self.mask_head.num_classes) - ] - - masks = results.masks.detach().cpu().numpy() - - for idx in range(num_masks): - mask = masks[idx] - mask_results[labels[idx]].append(mask) - - return bbox_results, mask_results - - def aug_test(self, imgs, img_metas, rescale=False): - raise NotImplementedError - - def show_result(self, - img, - result, - score_thr=0.3, - bbox_color=(72, 101, 241), - text_color=(72, 101, 241), - mask_color=None, - thickness=2, - font_size=13, - win_name='', - show=False, - wait_time=0, - out_file=None): - """Draw `result` over `img`. - - Args: - img (str or Tensor): The image to be displayed. - result (tuple): Format bbox and mask results. - It contains two items: - - - bbox_results (list[np.ndarray]): BBox results of - single image. The list corresponds to each class. - each ndarray has a shape (N, 5), N is the number of - bboxes with this category, and last dimension - 5 arrange as (x1, y1, x2, y2, scores). - - mask_results (list[np.ndarray]): Mask results of - single image. The list corresponds to each class. - each ndarray has shape (N, img_h, img_w), N - is the number of masks with this category. - - score_thr (float, optional): Minimum score of bboxes to be shown. - Default: 0.3. - bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. - The tuple of color should be in BGR order. Default: 'green' - text_color (str or tuple(int) or :obj:`Color`):Color of texts. - The tuple of color should be in BGR order. Default: 'green' - mask_color (None or str or tuple(int) or :obj:`Color`): - Color of masks. The tuple of color should be in BGR order. - Default: None - thickness (int): Thickness of lines. Default: 2 - font_size (int): Font size of texts. Default: 13 - win_name (str): The window name. Default: '' - wait_time (float): Value of waitKey param. - Default: 0. - show (bool): Whether to show the image. - Default: False. - out_file (str or None): The filename to write the image. - Default: None. - - Returns: - img (Tensor): Only if not `show` or `out_file` - """ - - assert isinstance(result, tuple) - bbox_result, mask_result = result - bboxes = np.vstack(bbox_result) - img = mmcv.imread(img) - img = img.copy() - labels = [ - np.full(bbox.shape[0], i, dtype=np.int32) - for i, bbox in enumerate(bbox_result) - ] - labels = np.concatenate(labels) - if len(labels) == 0: - bboxes = np.zeros([0, 5]) - masks = np.zeros([0, 0, 0]) - # draw segmentation masks - else: - masks = mmcv.concat_list(mask_result) - - if isinstance(masks[0], torch.Tensor): - masks = torch.stack(masks, dim=0).detach().cpu().numpy() - else: - masks = np.stack(masks, axis=0) - # dummy bboxes - if bboxes[:, :4].sum() == 0: - num_masks = len(bboxes) - x_any = masks.any(axis=1) - y_any = masks.any(axis=2) - for idx in range(num_masks): - x = np.where(x_any[idx, :])[0] - y = np.where(y_any[idx, :])[0] - if len(x) > 0 and len(y) > 0: - bboxes[idx, :4] = np.array( - [x[0], y[0], x[-1] + 1, y[-1] + 1], - dtype=np.float32) - # if out_file specified, do not show image in window - if out_file is not None: - show = False - # draw bounding boxes - img = imshow_det_bboxes( - img, - bboxes, - labels, - masks, - class_names=self.CLASSES, - score_thr=score_thr, - bbox_color=bbox_color, - text_color=text_color, - mask_color=mask_color, - thickness=thickness, - font_size=font_size, - win_name=win_name, - show=show, - wait_time=wait_time, - out_file=out_file) - - if not (show or out_file): - return img diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/solo.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/solo.py deleted file mode 100644 index df6f6de0162ef145ab36c645872337ec7ca4861b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/solo.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage_instance_seg import SingleStageInstanceSegmentor - - -@DETECTORS.register_module() -class SOLO(SingleStageInstanceSegmentor): - """`SOLO: Segmenting Objects by Locations - `_ - - """ - - def __init__(self, - backbone, - neck=None, - bbox_head=None, - mask_head=None, - train_cfg=None, - test_cfg=None, - init_cfg=None, - pretrained=None): - super().__init__( - backbone=backbone, - neck=neck, - bbox_head=bbox_head, - mask_head=mask_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - init_cfg=init_cfg, - pretrained=pretrained) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/solov2.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/solov2.py deleted file mode 100644 index 711fcb495da6738c27e4cbe018104c559e83b0ad..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/solov2.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage_instance_seg import SingleStageInstanceSegmentor - - -@DETECTORS.register_module() -class SOLOv2(SingleStageInstanceSegmentor): - """`SOLOv2: Dynamic and Fast Instance Segmentation - `_ - - """ - - def __init__(self, - backbone, - neck=None, - bbox_head=None, - mask_head=None, - train_cfg=None, - test_cfg=None, - init_cfg=None, - pretrained=None): - super().__init__( - backbone=backbone, - neck=neck, - bbox_head=bbox_head, - mask_head=mask_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - init_cfg=init_cfg, - pretrained=pretrained) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/sparse_rcnn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/sparse_rcnn.py deleted file mode 100644 index e90c2a5aba5a538e024d27aa8d150b4a3982f6fe..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/sparse_rcnn.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class SparseRCNN(TwoStageDetector): - r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with - Learnable Proposals `_""" - - def __init__(self, *args, **kwargs): - super(SparseRCNN, self).__init__(*args, **kwargs) - assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \ - 'do not support external proposals' - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - proposals=None, - **kwargs): - """Forward function of SparseR-CNN and QueryInst in train stage. - - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (List[Tensor], optional) : Segmentation masks for - each box. This is required to train QueryInst. - proposals (List[Tensor], optional): override rpn proposals with - custom proposals. Use when `with_rpn` is False. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - - assert proposals is None, 'Sparse R-CNN and QueryInst ' \ - 'do not support external proposals' - - x = self.extract_feat(img) - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.forward_train(x, img_metas) - roi_losses = self.roi_head.forward_train( - x, - proposal_boxes, - proposal_features, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=gt_bboxes_ignore, - gt_masks=gt_masks, - imgs_whwh=imgs_whwh) - return roi_losses - - def simple_test(self, img, img_metas, rescale=False): - """Test function without test time augmentation. - - Args: - imgs (list[torch.Tensor]): List of multiple images - img_metas (list[dict]): List of image information. - rescale (bool): Whether to rescale the results. - Defaults to False. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - x = self.extract_feat(img) - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.simple_test_rpn(x, img_metas) - results = self.roi_head.simple_test( - x, - proposal_boxes, - proposal_features, - img_metas, - imgs_whwh=imgs_whwh, - rescale=rescale) - return results - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - # backbone - x = self.extract_feat(img) - # rpn - num_imgs = len(img) - dummy_img_metas = [ - dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs) - ] - proposal_boxes, proposal_features, imgs_whwh = \ - self.rpn_head.simple_test_rpn(x, dummy_img_metas) - # roi_head - roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, - proposal_features, - dummy_img_metas) - return roi_outs diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/tood.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/tood.py deleted file mode 100644 index 7dd18c3c96abd0fb4d4eac5a6fb708b242be0571..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/tood.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class TOOD(SingleStageDetector): - r"""Implementation of `TOOD: Task-aligned One-stage Object Detection. - `_.""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - - def set_epoch(self, epoch): - self.bbox_head.epoch = epoch diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/trident_faster_rcnn.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/trident_faster_rcnn.py deleted file mode 100644 index fb26168ca382c2330fefe8065b654dc183d42a74..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/trident_faster_rcnn.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .faster_rcnn import FasterRCNN - - -@DETECTORS.register_module() -class TridentFasterRCNN(FasterRCNN): - """Implementation of `TridentNet `_""" - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None, - init_cfg=None): - - super(TridentFasterRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) - assert self.backbone.num_branch == self.roi_head.num_branch - assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx - self.num_branch = self.backbone.num_branch - self.test_branch_idx = self.backbone.test_branch_idx - - def simple_test(self, img, img_metas, proposals=None, rescale=False): - """Test without augmentation.""" - assert self.with_bbox, 'Bbox head must be implemented.' - x = self.extract_feat(img) - if proposals is None: - num_branch = (self.num_branch if self.test_branch_idx == -1 else 1) - trident_img_metas = img_metas * num_branch - proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas) - else: - proposal_list = proposals - # TODO: Fix trident_img_metas undefined errors - # when proposals is specified - return self.roi_head.simple_test( - x, proposal_list, trident_img_metas, rescale=rescale) - - def aug_test(self, imgs, img_metas, rescale=False): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ - x = self.extract_feats(imgs) - num_branch = (self.num_branch if self.test_branch_idx == -1 else 1) - trident_img_metas = [img_metas * num_branch for img_metas in img_metas] - proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas) - return self.roi_head.aug_test( - x, proposal_list, img_metas, rescale=rescale) - - def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs): - """make copies of img and gts to fit multi-branch.""" - trident_gt_bboxes = tuple(gt_bboxes * self.num_branch) - trident_gt_labels = tuple(gt_labels * self.num_branch) - trident_img_metas = tuple(img_metas * self.num_branch) - - return super(TridentFasterRCNN, - self).forward_train(img, trident_img_metas, - trident_gt_bboxes, trident_gt_labels) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/two_stage.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/two_stage.py deleted file mode 100644 index 870e2b8477f3c08de2029802a2a567592d9f7541..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/two_stage.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch - -from ..builder import DETECTORS, build_backbone, build_head, build_neck -from .base import BaseDetector - - -@DETECTORS.register_module() -class TwoStageDetector(BaseDetector): - """Base class for two-stage detectors. - - Two-stage detectors typically consisting of a region proposal network and a - task-specific regression head. - """ - - def __init__(self, - backbone, - neck=None, - rpn_head=None, - roi_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(TwoStageDetector, self).__init__(init_cfg) - if pretrained: - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - backbone.pretrained = pretrained - self.backbone = build_backbone(backbone) - - if neck is not None: - self.neck = build_neck(neck) - - if rpn_head is not None: - rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None - rpn_head_ = rpn_head.copy() - rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn) - self.rpn_head = build_head(rpn_head_) - - if roi_head is not None: - # update train and test cfg here for now - # TODO: refactor assigner & sampler - rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None - roi_head.update(train_cfg=rcnn_train_cfg) - roi_head.update(test_cfg=test_cfg.rcnn) - roi_head.pretrained = pretrained - self.roi_head = build_head(roi_head) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - @property - def with_rpn(self): - """bool: whether the detector has RPN""" - return hasattr(self, 'rpn_head') and self.rpn_head is not None - - @property - def with_roi_head(self): - """bool: whether the detector has a RoI head""" - return hasattr(self, 'roi_head') and self.roi_head is not None - - def extract_feat(self, img): - """Directly extract features from the backbone+neck.""" - x = self.backbone(img) - if self.with_neck: - x = self.neck(x) - return x - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - outs = () - # backbone - x = self.extract_feat(img) - # rpn - if self.with_rpn: - rpn_outs = self.rpn_head(x) - outs = outs + (rpn_outs, ) - proposals = torch.randn(1000, 4).to(img.device) - # roi_head - roi_outs = self.roi_head.forward_dummy(x, proposals) - outs = outs + (roi_outs, ) - return outs - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - proposals=None, - **kwargs): - """ - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - - gt_labels (list[Tensor]): class indices corresponding to each box - - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - proposals : override rpn proposals with custom proposals. Use when - `with_rpn` is False. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - x = self.extract_feat(img) - - losses = dict() - - # RPN forward and loss - if self.with_rpn: - proposal_cfg = self.train_cfg.get('rpn_proposal', - self.test_cfg.rpn) - rpn_losses, proposal_list = self.rpn_head.forward_train( - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=gt_bboxes_ignore, - proposal_cfg=proposal_cfg, - **kwargs) - losses.update(rpn_losses) - else: - proposal_list = proposals - - roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list, - gt_bboxes, gt_labels, - gt_bboxes_ignore, gt_masks, - **kwargs) - losses.update(roi_losses) - - return losses - - async def async_simple_test(self, - img, - img_meta, - proposals=None, - rescale=False): - """Async test without augmentation.""" - assert self.with_bbox, 'Bbox head must be implemented.' - x = self.extract_feat(img) - - if proposals is None: - proposal_list = await self.rpn_head.async_simple_test_rpn( - x, img_meta) - else: - proposal_list = proposals - - return await self.roi_head.async_simple_test( - x, proposal_list, img_meta, rescale=rescale) - - def simple_test(self, img, img_metas, proposals=None, rescale=False): - """Test without augmentation.""" - - assert self.with_bbox, 'Bbox head must be implemented.' - x = self.extract_feat(img) - if proposals is None: - proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) - else: - proposal_list = proposals - - return self.roi_head.simple_test( - x, proposal_list, img_metas, rescale=rescale) - - def aug_test(self, imgs, img_metas, rescale=False): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ - x = self.extract_feats(imgs) - proposal_list = self.rpn_head.aug_test_rpn(x, img_metas) - return self.roi_head.aug_test( - x, proposal_list, img_metas, rescale=rescale) - - def onnx_export(self, img, img_metas): - - img_shape = torch._shape_as_tensor(img)[2:] - img_metas[0]['img_shape_for_onnx'] = img_shape - x = self.extract_feat(img) - proposals = self.rpn_head.onnx_export(x, img_metas) - if hasattr(self.roi_head, 'onnx_export'): - return self.roi_head.onnx_export(x, proposals, img_metas) - else: - raise NotImplementedError( - f'{self.__class__.__name__} can not ' - f'be exported to ONNX. Please refer to the ' - f'list of supported models,' - f'https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx' # noqa E501 - ) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/vfnet.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/vfnet.py deleted file mode 100644 index 38ddcdabd47d8ce886c31f89db7fcb0842a8c35f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/vfnet.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class VFNet(SingleStageDetector): - """Implementation of `VarifocalNet - (VFNet).`_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolact.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolact.py deleted file mode 100644 index 4ddea0b229df9d661286257e41c37b9028a0fc8f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolact.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core import bbox2result -from ..builder import DETECTORS, build_head -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class YOLACT(SingleStageDetector): - """Implementation of `YOLACT `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - segm_head, - mask_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - self.segm_head = build_head(segm_head) - self.mask_head = build_head(mask_head) - - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - feat = self.extract_feat(img) - bbox_outs = self.bbox_head(feat) - prototypes = self.mask_head.forward_dummy(feat[0]) - return (bbox_outs, prototypes) - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None): - """ - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # convert Bitmap mask or Polygon Mask to Tensor here - gt_masks = [ - gt_mask.to_tensor(dtype=torch.uint8, device=img.device) - for gt_mask in gt_masks - ] - - x = self.extract_feat(img) - - cls_score, bbox_pred, coeff_pred = self.bbox_head(x) - bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels, - img_metas) - losses, sampling_results = self.bbox_head.loss( - *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - - segm_head_outs = self.segm_head(x[0]) - loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels) - losses.update(loss_segm) - - mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas, - sampling_results) - loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes, - img_metas, sampling_results) - losses.update(loss_mask) - - # check NaN and Inf - for loss_name in losses.keys(): - assert torch.isfinite(torch.stack(losses[loss_name]))\ - .all().item(), '{} becomes infinite or NaN!'\ - .format(loss_name) - - return losses - - def simple_test(self, img, img_metas, rescale=False): - """Test function without test-time augmentation.""" - feat = self.extract_feat(img) - det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test( - feat, img_metas, rescale=rescale) - bbox_results = [ - bbox2result(det_bbox, det_label, self.bbox_head.num_classes) - for det_bbox, det_label in zip(det_bboxes, det_labels) - ] - - segm_results = self.mask_head.simple_test( - feat, - det_bboxes, - det_labels, - det_coeffs, - img_metas, - rescale=rescale) - - return list(zip(bbox_results, segm_results)) - - def aug_test(self, imgs, img_metas, rescale=False): - """Test with augmentations.""" - raise NotImplementedError( - 'YOLACT does not support test-time augmentation') diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolo.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolo.py deleted file mode 100644 index 0ccd41777a190e48308b390e9c96d60085096d13..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolo.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Copyright (c) 2019 Western Digital Corporation or its affiliates. -import torch - -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class YOLOV3(SingleStageDetector): - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - - def onnx_export(self, img, img_metas): - """Test function for exporting to ONNX, without test time augmentation. - - Args: - img (torch.Tensor): input images. - img_metas (list[dict]): List of image information. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - x = self.extract_feat(img) - outs = self.bbox_head.forward(x) - # get shape as tensor - img_shape = torch._shape_as_tensor(img)[2:] - img_metas[0]['img_shape_for_onnx'] = img_shape - - det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas) - - return det_bboxes, det_labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolof.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolof.py deleted file mode 100644 index 6d08d16dccc75c2f3aaa36a45058a1711d903655..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolof.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class YOLOF(SingleStageDetector): - r"""Implementation of `You Only Look One-level Feature - `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolox.py b/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolox.py deleted file mode 100644 index 34d51b1482fa55d39ec26e0bcbbe40a4efa661bb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/detectors/yolox.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import random - -import torch -import torch.distributed as dist -import torch.nn.functional as F -from mmcv.runner import get_dist_info - -from ...utils import log_img_scale -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class YOLOX(SingleStageDetector): - r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021 - `_ - - Note: Considering the trade-off between training speed and accuracy, - multi-scale training is temporarily kept. More elegant implementation - will be adopted in the future. - - Args: - backbone (nn.Module): The backbone module. - neck (nn.Module): The neck module. - bbox_head (nn.Module): The bbox head module. - train_cfg (obj:`ConfigDict`, optional): The training config - of YOLOX. Default: None. - test_cfg (obj:`ConfigDict`, optional): The testing config - of YOLOX. Default: None. - pretrained (str, optional): model pretrained path. - Default: None. - input_size (tuple): The model default input image size. The shape - order should be (height, width). Default: (640, 640). - size_multiplier (int): Image size multiplication factor. - Default: 32. - random_size_range (tuple): The multi-scale random range during - multi-scale training. The real training image size will - be multiplied by size_multiplier. Default: (15, 25). - random_size_interval (int): The iter interval of change - image size. Default: 10. - init_cfg (dict, optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - input_size=(640, 640), - size_multiplier=32, - random_size_range=(15, 25), - random_size_interval=10, - init_cfg=None): - super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) - log_img_scale(input_size, skip_square=True) - self.rank, self.world_size = get_dist_info() - self._default_input_size = input_size - self._input_size = input_size - self._random_size_range = random_size_range - self._random_size_interval = random_size_interval - self._size_multiplier = size_multiplier - self._progress_in_iter = 0 - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None): - """ - Args: - img (Tensor): Input images of shape (N, C, H, W). - Typically these should be mean centered and std scaled. - img_metas (list[dict]): A List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - :class:`mmdet.datasets.pipelines.Collect`. - gt_bboxes (list[Tensor]): Each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): Class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): Specify which bounding - boxes can be ignored when computing the loss. - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - # Multi-scale training - img, gt_bboxes = self._preprocess(img, gt_bboxes) - - losses = super(YOLOX, self).forward_train(img, img_metas, gt_bboxes, - gt_labels, gt_bboxes_ignore) - - # random resizing - if (self._progress_in_iter + 1) % self._random_size_interval == 0: - self._input_size = self._random_resize(device=img.device) - self._progress_in_iter += 1 - - return losses - - def _preprocess(self, img, gt_bboxes): - scale_y = self._input_size[0] / self._default_input_size[0] - scale_x = self._input_size[1] / self._default_input_size[1] - if scale_x != 1 or scale_y != 1: - img = F.interpolate( - img, - size=self._input_size, - mode='bilinear', - align_corners=False) - for gt_bbox in gt_bboxes: - gt_bbox[..., 0::2] = gt_bbox[..., 0::2] * scale_x - gt_bbox[..., 1::2] = gt_bbox[..., 1::2] * scale_y - return img, gt_bboxes - - def _random_resize(self, device): - tensor = torch.LongTensor(2).to(device) - - if self.rank == 0: - size = random.randint(*self._random_size_range) - aspect_ratio = float( - self._default_input_size[1]) / self._default_input_size[0] - size = (self._size_multiplier * size, - self._size_multiplier * int(aspect_ratio * size)) - tensor[0] = size[0] - tensor[1] = size[1] - - if self.world_size > 1: - dist.barrier() - dist.broadcast(tensor, 0) - - input_size = (tensor[0].item(), tensor[1].item()) - return input_size diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/__init__.py deleted file mode 100644 index 068a54d651b0c8fd13380a67a216e1e7c3629bd7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .accuracy import Accuracy, accuracy -from .ae_loss import AssociativeEmbeddingLoss -from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss -from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, - cross_entropy, mask_cross_entropy) -from .dice_loss import DiceLoss -from .focal_loss import FocalLoss, sigmoid_focal_loss -from .gaussian_focal_loss import GaussianFocalLoss -from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss -from .ghm_loss import GHMC, GHMR -from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss, - bounded_iou_loss, iou_loss) -from .kd_loss import KnowledgeDistillationKLDivLoss -from .mse_loss import MSELoss, mse_loss -from .pisa_loss import carl_loss, isr_p -from .seesaw_loss import SeesawLoss -from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss -from .utils import reduce_loss, weight_reduce_loss, weighted_loss -from .varifocal_loss import VarifocalLoss - -__all__ = [ - 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', - 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', - 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', - 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', - 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC', - 'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss', - 'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss', - 'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss', - 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/accuracy.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/accuracy.py deleted file mode 100644 index fe765a39f2578bbe3387a087f9f9de9c78f6226f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/accuracy.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch.nn as nn - - -@mmcv.jit(coderize=True) -def accuracy(pred, target, topk=1, thresh=None): - """Calculate accuracy according to the prediction and target. - - Args: - pred (torch.Tensor): The model prediction, shape (N, num_class) - target (torch.Tensor): The target of each prediction, shape (N, ) - topk (int | tuple[int], optional): If the predictions in ``topk`` - matches the target, the predictions will be regarded as - correct ones. Defaults to 1. - thresh (float, optional): If not None, predictions with scores under - this threshold are considered incorrect. Default to None. - - Returns: - float | tuple[float]: If the input ``topk`` is a single integer, - the function will return a single float as accuracy. If - ``topk`` is a tuple containing multiple integers, the - function will return a tuple containing accuracies of - each ``topk`` number. - """ - assert isinstance(topk, (int, tuple)) - if isinstance(topk, int): - topk = (topk, ) - return_single = True - else: - return_single = False - - maxk = max(topk) - if pred.size(0) == 0: - accu = [pred.new_tensor(0.) for i in range(len(topk))] - return accu[0] if return_single else accu - assert pred.ndim == 2 and target.ndim == 1 - assert pred.size(0) == target.size(0) - assert maxk <= pred.size(1), \ - f'maxk {maxk} exceeds pred dimension {pred.size(1)}' - pred_value, pred_label = pred.topk(maxk, dim=1) - pred_label = pred_label.t() # transpose to shape (maxk, N) - correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) - if thresh is not None: - # Only prediction values larger than thresh are counted as correct - correct = correct & (pred_value > thresh).t() - res = [] - for k in topk: - correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / pred.size(0))) - return res[0] if return_single else res - - -class Accuracy(nn.Module): - - def __init__(self, topk=(1, ), thresh=None): - """Module to calculate the accuracy. - - Args: - topk (tuple, optional): The criterion used to calculate the - accuracy. Defaults to (1,). - thresh (float, optional): If not None, predictions with scores - under this threshold are considered incorrect. Default to None. - """ - super().__init__() - self.topk = topk - self.thresh = thresh - - def forward(self, pred, target): - """Forward function to calculate accuracy. - - Args: - pred (torch.Tensor): Prediction of models. - target (torch.Tensor): Target for each prediction. - - Returns: - tuple[float]: The accuracies under different topk criterions. - """ - return accuracy(pred, target, self.topk, self.thresh) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/ae_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/ae_loss.py deleted file mode 100644 index 5c6da22a9ec6ca057359bfb9f1cee6e4bcecfdc1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/ae_loss.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES - - -@mmcv.jit(derivate=True, coderize=True) -def ae_loss_per_image(tl_preds, br_preds, match): - """Associative Embedding Loss in one image. - - Associative Embedding Loss including two parts: pull loss and push loss. - Pull loss makes embedding vectors from same object closer to each other. - Push loss distinguish embedding vector from different objects, and makes - the gap between them is large enough. - - During computing, usually there are 3 cases: - - no object in image: both pull loss and push loss will be 0. - - one object in image: push loss will be 0 and pull loss is computed - by the two corner of the only object. - - more than one objects in image: pull loss is computed by corner pairs - from each object, push loss is computed by each object with all - other objects. We use confusion matrix with 0 in diagonal to - compute the push loss. - - Args: - tl_preds (tensor): Embedding feature map of left-top corner. - br_preds (tensor): Embedding feature map of bottim-right corner. - match (list): Downsampled coordinates pair of each ground truth box. - """ - - tl_list, br_list, me_list = [], [], [] - if len(match) == 0: # no object in image - pull_loss = tl_preds.sum() * 0. - push_loss = tl_preds.sum() * 0. - else: - for m in match: - [tl_y, tl_x], [br_y, br_x] = m - tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1) - br_e = br_preds[:, br_y, br_x].view(-1, 1) - tl_list.append(tl_e) - br_list.append(br_e) - me_list.append((tl_e + br_e) / 2.0) - - tl_list = torch.cat(tl_list) - br_list = torch.cat(br_list) - me_list = torch.cat(me_list) - - assert tl_list.size() == br_list.size() - - # N is object number in image, M is dimension of embedding vector - N, M = tl_list.size() - - pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2) - pull_loss = pull_loss.sum() / N - - margin = 1 # exp setting of CornerNet, details in section 3.3 of paper - - # confusion matrix of push loss - conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list - conf_weight = 1 - torch.eye(N).type_as(me_list) - conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs()) - - if N > 1: # more than one object in current image - push_loss = F.relu(conf_mat).sum() / (N * (N - 1)) - else: - push_loss = tl_preds.sum() * 0. - - return pull_loss, push_loss - - -@LOSSES.register_module() -class AssociativeEmbeddingLoss(nn.Module): - """Associative Embedding Loss. - - More details can be found in - `Associative Embedding `_ and - `CornerNet `_ . - Code is modified from `kp_utils.py `_ # noqa: E501 - - Args: - pull_weight (float): Loss weight for corners from same object. - push_weight (float): Loss weight for corners from different object. - """ - - def __init__(self, pull_weight=0.25, push_weight=0.25): - super(AssociativeEmbeddingLoss, self).__init__() - self.pull_weight = pull_weight - self.push_weight = push_weight - - def forward(self, pred, target, match): - """Forward function.""" - batch = pred.size(0) - pull_all, push_all = 0.0, 0.0 - for i in range(batch): - pull, push = ae_loss_per_image(pred[i], target[i], match[i]) - - pull_all += self.pull_weight * pull - push_all += self.push_weight * push - - return pull_all, push_all diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/balanced_l1_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/balanced_l1_loss.py deleted file mode 100644 index 8500345f0e41e8d98f75c4616c70eee8bce4473f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/balanced_l1_loss.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np -import torch -import torch.nn as nn - -from ..builder import LOSSES -from .utils import weighted_loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def balanced_l1_loss(pred, - target, - beta=1.0, - alpha=0.5, - gamma=1.5, - reduction='mean'): - """Calculate balanced L1 loss. - - Please see the `Libra R-CNN `_ - - Args: - pred (torch.Tensor): The prediction with shape (N, 4). - target (torch.Tensor): The learning target of the prediction with - shape (N, 4). - beta (float): The loss is a piecewise function of prediction and target - and ``beta`` serves as a threshold for the difference between the - prediction and target. Defaults to 1.0. - alpha (float): The denominator ``alpha`` in the balanced L1 loss. - Defaults to 0.5. - gamma (float): The ``gamma`` in the balanced L1 loss. - Defaults to 1.5. - reduction (str, optional): The method that reduces the loss to a - scalar. Options are "none", "mean" and "sum". - - Returns: - torch.Tensor: The calculated loss - """ - assert beta > 0 - if target.numel() == 0: - return pred.sum() * 0 - - assert pred.size() == target.size() - - diff = torch.abs(pred - target) - b = np.e**(gamma / alpha) - 1 - loss = torch.where( - diff < beta, alpha / b * - (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, - gamma * diff + gamma / b - alpha * beta) - - return loss - - -@LOSSES.register_module() -class BalancedL1Loss(nn.Module): - """Balanced L1 Loss. - - arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) - - Args: - alpha (float): The denominator ``alpha`` in the balanced L1 loss. - Defaults to 0.5. - gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5. - beta (float, optional): The loss is a piecewise function of prediction - and target. ``beta`` serves as a threshold for the difference - between the prediction and target. Defaults to 1.0. - reduction (str, optional): The method that reduces the loss to a - scalar. Options are "none", "mean" and "sum". - loss_weight (float, optional): The weight of the loss. Defaults to 1.0 - """ - - def __init__(self, - alpha=0.5, - gamma=1.5, - beta=1.0, - reduction='mean', - loss_weight=1.0): - super(BalancedL1Loss, self).__init__() - self.alpha = alpha - self.gamma = gamma - self.beta = beta - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None, - **kwargs): - """Forward function of loss. - - Args: - pred (torch.Tensor): The prediction with shape (N, 4). - target (torch.Tensor): The learning target of the prediction with - shape (N, 4). - weight (torch.Tensor, optional): Sample-wise loss weight with - shape (N, ). - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Options are "none", "mean" and "sum". - - Returns: - torch.Tensor: The calculated loss - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss_bbox = self.loss_weight * balanced_l1_loss( - pred, - target, - weight, - alpha=self.alpha, - gamma=self.gamma, - beta=self.beta, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss_bbox diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/cross_entropy_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/cross_entropy_loss.py deleted file mode 100644 index 41411fc5456970d1aad9c11a58c2e4988a5a7440..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/cross_entropy_loss.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES -from .utils import weight_reduce_loss - - -def cross_entropy(pred, - label, - weight=None, - reduction='mean', - avg_factor=None, - class_weight=None, - ignore_index=-100, - avg_non_ignore=False): - """Calculate the CrossEntropy loss. - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the number - of classes. - label (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - reduction (str, optional): The method used to reduce the loss. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - class_weight (list[float], optional): The weight for each class. - ignore_index (int | None): The label index to be ignored. - If None, it will be set to default value. Default: -100. - avg_non_ignore (bool): The flag decides to whether the loss is - only averaged over non-ignored targets. Default: False. - - Returns: - torch.Tensor: The calculated loss - """ - # The default value of ignore_index is the same as F.cross_entropy - ignore_index = -100 if ignore_index is None else ignore_index - # element-wise losses - loss = F.cross_entropy( - pred, - label, - weight=class_weight, - reduction='none', - ignore_index=ignore_index) - - # average loss over non-ignored elements - # pytorch's official cross_entropy average loss over non-ignored elements - # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa - if (avg_factor is None) and avg_non_ignore and reduction == 'mean': - avg_factor = label.numel() - (label == ignore_index).sum().item() - - # apply weights and do the reduction - if weight is not None: - weight = weight.float() - loss = weight_reduce_loss( - loss, weight=weight, reduction=reduction, avg_factor=avg_factor) - - return loss - - -def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): - """Expand onehot labels to match the size of prediction.""" - bin_labels = labels.new_full((labels.size(0), label_channels), 0) - valid_mask = (labels >= 0) & (labels != ignore_index) - inds = torch.nonzero( - valid_mask & (labels < label_channels), as_tuple=False) - - if inds.numel() > 0: - bin_labels[inds, labels[inds]] = 1 - - valid_mask = valid_mask.view(-1, 1).expand(labels.size(0), - label_channels).float() - if label_weights is None: - bin_label_weights = valid_mask - else: - bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels) - bin_label_weights *= valid_mask - - return bin_labels, bin_label_weights, valid_mask - - -def binary_cross_entropy(pred, - label, - weight=None, - reduction='mean', - avg_factor=None, - class_weight=None, - ignore_index=-100, - avg_non_ignore=False): - """Calculate the binary CrossEntropy loss. - - Args: - pred (torch.Tensor): The prediction with shape (N, 1) or (N, ). - When the shape of pred is (N, 1), label will be expanded to - one-hot format, and when the shape of pred is (N, ), label - will not be expanded to one-hot format. - label (torch.Tensor): The learning label of the prediction, - with shape (N, ). - weight (torch.Tensor, optional): Sample-wise loss weight. - reduction (str, optional): The method used to reduce the loss. - Options are "none", "mean" and "sum". - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - class_weight (list[float], optional): The weight for each class. - ignore_index (int | None): The label index to be ignored. - If None, it will be set to default value. Default: -100. - avg_non_ignore (bool): The flag decides to whether the loss is - only averaged over non-ignored targets. Default: False. - - Returns: - torch.Tensor: The calculated loss. - """ - # The default value of ignore_index is the same as F.cross_entropy - ignore_index = -100 if ignore_index is None else ignore_index - - if pred.dim() != label.dim(): - label, weight, valid_mask = _expand_onehot_labels( - label, weight, pred.size(-1), ignore_index) - else: - # should mask out the ignored elements - valid_mask = ((label >= 0) & (label != ignore_index)).float() - if weight is not None: - # The inplace writing method will have a mismatched broadcast - # shape error if the weight and valid_mask dimensions - # are inconsistent such as (B,N,1) and (B,N,C). - weight = weight * valid_mask - else: - weight = valid_mask - - # average loss over non-ignored elements - if (avg_factor is None) and avg_non_ignore and reduction == 'mean': - avg_factor = valid_mask.sum().item() - - # weighted element-wise losses - weight = weight.float() - loss = F.binary_cross_entropy_with_logits( - pred, label.float(), pos_weight=class_weight, reduction='none') - # do the reduction for the weighted loss - loss = weight_reduce_loss( - loss, weight, reduction=reduction, avg_factor=avg_factor) - - return loss - - -def mask_cross_entropy(pred, - target, - label, - reduction='mean', - avg_factor=None, - class_weight=None, - ignore_index=None, - **kwargs): - """Calculate the CrossEntropy loss for masks. - - Args: - pred (torch.Tensor): The prediction with shape (N, C, *), C is the - number of classes. The trailing * indicates arbitrary shape. - target (torch.Tensor): The learning label of the prediction. - label (torch.Tensor): ``label`` indicates the class label of the mask - corresponding object. This will be used to select the mask in the - of the class which the object belongs to when the mask prediction - if not class-agnostic. - reduction (str, optional): The method used to reduce the loss. - Options are "none", "mean" and "sum". - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - class_weight (list[float], optional): The weight for each class. - ignore_index (None): Placeholder, to be consistent with other loss. - Default: None. - - Returns: - torch.Tensor: The calculated loss - - Example: - >>> N, C = 3, 11 - >>> H, W = 2, 2 - >>> pred = torch.randn(N, C, H, W) * 1000 - >>> target = torch.rand(N, H, W) - >>> label = torch.randint(0, C, size=(N,)) - >>> reduction = 'mean' - >>> avg_factor = None - >>> class_weights = None - >>> loss = mask_cross_entropy(pred, target, label, reduction, - >>> avg_factor, class_weights) - >>> assert loss.shape == (1,) - """ - assert ignore_index is None, 'BCE loss does not support ignore_index' - # TODO: handle these two reserved arguments - assert reduction == 'mean' and avg_factor is None - num_rois = pred.size()[0] - inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) - pred_slice = pred[inds, label].squeeze(1) - return F.binary_cross_entropy_with_logits( - pred_slice, target, weight=class_weight, reduction='mean')[None] - - -@LOSSES.register_module() -class CrossEntropyLoss(nn.Module): - - def __init__(self, - use_sigmoid=False, - use_mask=False, - reduction='mean', - class_weight=None, - ignore_index=None, - loss_weight=1.0, - avg_non_ignore=False): - """CrossEntropyLoss. - - Args: - use_sigmoid (bool, optional): Whether the prediction uses sigmoid - of softmax. Defaults to False. - use_mask (bool, optional): Whether to use mask cross entropy loss. - Defaults to False. - reduction (str, optional): . Defaults to 'mean'. - Options are "none", "mean" and "sum". - class_weight (list[float], optional): Weight of each class. - Defaults to None. - ignore_index (int | None): The label index to be ignored. - Defaults to None. - loss_weight (float, optional): Weight of the loss. Defaults to 1.0. - avg_non_ignore (bool): The flag decides to whether the loss is - only averaged over non-ignored targets. Default: False. - """ - super(CrossEntropyLoss, self).__init__() - assert (use_sigmoid is False) or (use_mask is False) - self.use_sigmoid = use_sigmoid - self.use_mask = use_mask - self.reduction = reduction - self.loss_weight = loss_weight - self.class_weight = class_weight - self.ignore_index = ignore_index - self.avg_non_ignore = avg_non_ignore - if ((ignore_index is not None) and not self.avg_non_ignore - and self.reduction == 'mean'): - warnings.warn( - 'Default ``avg_non_ignore`` is False, if you would like to ' - 'ignore the certain label and average loss over non-ignore ' - 'labels, which is the same with PyTorch official ' - 'cross_entropy, set ``avg_non_ignore=True``.') - - if self.use_sigmoid: - self.cls_criterion = binary_cross_entropy - elif self.use_mask: - self.cls_criterion = mask_cross_entropy - else: - self.cls_criterion = cross_entropy - - def extra_repr(self): - """Extra repr.""" - s = f'avg_non_ignore={self.avg_non_ignore}' - return s - - def forward(self, - cls_score, - label, - weight=None, - avg_factor=None, - reduction_override=None, - ignore_index=None, - **kwargs): - """Forward function. - - Args: - cls_score (torch.Tensor): The prediction. - label (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The method used to reduce the - loss. Options are "none", "mean" and "sum". - ignore_index (int | None): The label index to be ignored. - If not None, it will override the default value. Default: None. - Returns: - torch.Tensor: The calculated loss. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if ignore_index is None: - ignore_index = self.ignore_index - - if self.class_weight is not None: - class_weight = cls_score.new_tensor( - self.class_weight, device=cls_score.device) - else: - class_weight = None - loss_cls = self.loss_weight * self.cls_criterion( - cls_score, - label, - weight, - class_weight=class_weight, - reduction=reduction, - avg_factor=avg_factor, - ignore_index=ignore_index, - avg_non_ignore=self.avg_non_ignore, - **kwargs) - return loss_cls diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/dice_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/dice_loss.py deleted file mode 100644 index 585beeaf1c6bb86205f40c73a54e2826edc1fe5d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/dice_loss.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn - -from ..builder import LOSSES -from .utils import weight_reduce_loss - - -def dice_loss(pred, - target, - weight=None, - eps=1e-3, - reduction='mean', - naive_dice=False, - avg_factor=None): - """Calculate dice loss, there are two forms of dice loss is supported: - - - the one proposed in `V-Net: Fully Convolutional Neural - Networks for Volumetric Medical Image Segmentation - `_. - - the dice loss in which the power of the number in the - denominator is the first power instead of the second - power. - - Args: - pred (torch.Tensor): The prediction, has a shape (n, *) - target (torch.Tensor): The learning label of the prediction, - shape (n, *), same shape of pred. - weight (torch.Tensor, optional): The weight of loss for each - prediction, has a shape (n,). Defaults to None. - eps (float): Avoid dividing by zero. Default: 1e-3. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. - Options are "none", "mean" and "sum". - naive_dice (bool, optional): If false, use the dice - loss defined in the V-Net paper, otherwise, use the - naive dice loss in which the power of the number in the - denominator is the first power instead of the second - power.Defaults to False. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - - input = pred.flatten(1) - target = target.flatten(1).float() - - a = torch.sum(input * target, 1) - if naive_dice: - b = torch.sum(input, 1) - c = torch.sum(target, 1) - d = (2 * a + eps) / (b + c + eps) - else: - b = torch.sum(input * input, 1) + eps - c = torch.sum(target * target, 1) + eps - d = (2 * a) / (b + c) - - loss = 1 - d - if weight is not None: - assert weight.ndim == loss.ndim - assert len(weight) == len(pred) - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -@LOSSES.register_module() -class DiceLoss(nn.Module): - - def __init__(self, - use_sigmoid=True, - activate=True, - reduction='mean', - naive_dice=False, - loss_weight=1.0, - eps=1e-3): - """Compute dice loss. - - Args: - use_sigmoid (bool, optional): Whether to the prediction is - used for sigmoid or softmax. Defaults to True. - activate (bool): Whether to activate the predictions inside, - this will disable the inside sigmoid operation. - Defaults to True. - reduction (str, optional): The method used - to reduce the loss. Options are "none", - "mean" and "sum". Defaults to 'mean'. - naive_dice (bool, optional): If false, use the dice - loss defined in the V-Net paper, otherwise, use the - naive dice loss in which the power of the number in the - denominator is the first power instead of the second - power. Defaults to False. - loss_weight (float, optional): Weight of loss. Defaults to 1.0. - eps (float): Avoid dividing by zero. Defaults to 1e-3. - """ - - super(DiceLoss, self).__init__() - self.use_sigmoid = use_sigmoid - self.reduction = reduction - self.naive_dice = naive_dice - self.loss_weight = loss_weight - self.eps = eps - self.activate = activate - - def forward(self, - pred, - target, - weight=None, - reduction_override=None, - avg_factor=None): - """Forward function. - - Args: - pred (torch.Tensor): The prediction, has a shape (n, *). - target (torch.Tensor): The label of the prediction, - shape (n, *), same shape of pred. - weight (torch.Tensor, optional): The weight of loss for each - prediction, has a shape (n,). Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Options are "none", "mean" and "sum". - - Returns: - torch.Tensor: The calculated loss - """ - - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - - if self.activate: - if self.use_sigmoid: - pred = pred.sigmoid() - else: - raise NotImplementedError - - loss = self.loss_weight * dice_loss( - pred, - target, - weight, - eps=self.eps, - reduction=reduction, - naive_dice=self.naive_dice, - avg_factor=avg_factor) - - return loss diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/focal_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/focal_loss.py deleted file mode 100644 index 2858c198101c75942d6cc9d18e275dbd6ab359dd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/focal_loss.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss - -from ..builder import LOSSES -from .utils import weight_reduce_loss - - -# This method is only for debugging -def py_sigmoid_focal_loss(pred, - target, - weight=None, - gamma=2.0, - alpha=0.25, - reduction='mean', - avg_factor=None): - """PyTorch version of `Focal Loss `_. - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the - number of classes - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - pred_sigmoid = pred.sigmoid() - target = target.type_as(pred) - pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) - focal_weight = (alpha * target + (1 - alpha) * - (1 - target)) * pt.pow(gamma) - loss = F.binary_cross_entropy_with_logits( - pred, target, reduction='none') * focal_weight - if weight is not None: - if weight.shape != loss.shape: - if weight.size(0) == loss.size(0): - # For most cases, weight is of shape (num_priors, ), - # which means it does not have the second axis num_class - weight = weight.view(-1, 1) - else: - # Sometimes, weight per anchor per class is also needed. e.g. - # in FSAF. But it may be flattened of shape - # (num_priors x num_class, ), while loss is still of shape - # (num_priors, num_class). - assert weight.numel() == loss.numel() - weight = weight.view(loss.size(0), -1) - assert weight.ndim == loss.ndim - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -def py_focal_loss_with_prob(pred, - target, - weight=None, - gamma=2.0, - alpha=0.25, - reduction='mean', - avg_factor=None): - """PyTorch version of `Focal Loss `_. - Different from `py_sigmoid_focal_loss`, this function accepts probability - as input. - - Args: - pred (torch.Tensor): The prediction probability with shape (N, C), - C is the number of classes. - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - num_classes = pred.size(1) - target = F.one_hot(target, num_classes=num_classes + 1) - target = target[:, :num_classes] - - target = target.type_as(pred) - pt = (1 - pred) * target + pred * (1 - target) - focal_weight = (alpha * target + (1 - alpha) * - (1 - target)) * pt.pow(gamma) - loss = F.binary_cross_entropy( - pred, target, reduction='none') * focal_weight - if weight is not None: - if weight.shape != loss.shape: - if weight.size(0) == loss.size(0): - # For most cases, weight is of shape (num_priors, ), - # which means it does not have the second axis num_class - weight = weight.view(-1, 1) - else: - # Sometimes, weight per anchor per class is also needed. e.g. - # in FSAF. But it may be flattened of shape - # (num_priors x num_class, ), while loss is still of shape - # (num_priors, num_class). - assert weight.numel() == loss.numel() - weight = weight.view(loss.size(0), -1) - assert weight.ndim == loss.ndim - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -def sigmoid_focal_loss(pred, - target, - weight=None, - gamma=2.0, - alpha=0.25, - reduction='mean', - avg_factor=None): - r"""A wrapper of cuda version `Focal Loss - `_. - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the number - of classes. - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): Sample-wise loss weight. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - # Function.apply does not accept keyword arguments, so the decorator - # "weighted_loss" is not applicable - loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma, - alpha, None, 'none') - if weight is not None: - if weight.shape != loss.shape: - if weight.size(0) == loss.size(0): - # For most cases, weight is of shape (num_priors, ), - # which means it does not have the second axis num_class - weight = weight.view(-1, 1) - else: - # Sometimes, weight per anchor per class is also needed. e.g. - # in FSAF. But it may be flattened of shape - # (num_priors x num_class, ), while loss is still of shape - # (num_priors, num_class). - assert weight.numel() == loss.numel() - weight = weight.view(loss.size(0), -1) - assert weight.ndim == loss.ndim - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -@LOSSES.register_module() -class FocalLoss(nn.Module): - - def __init__(self, - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - reduction='mean', - loss_weight=1.0, - activated=False): - """`Focal Loss `_ - - Args: - use_sigmoid (bool, optional): Whether to the prediction is - used for sigmoid or softmax. Defaults to True. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 0.25. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. Options are "none", "mean" and - "sum". - loss_weight (float, optional): Weight of loss. Defaults to 1.0. - activated (bool, optional): Whether the input is activated. - If True, it means the input has been activated and can be - treated as probabilities. Else, it should be treated as logits. - Defaults to False. - """ - super(FocalLoss, self).__init__() - assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' - self.use_sigmoid = use_sigmoid - self.gamma = gamma - self.alpha = alpha - self.reduction = reduction - self.loss_weight = loss_weight - self.activated = activated - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning label of the prediction. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Options are "none", "mean" and "sum". - - Returns: - torch.Tensor: The calculated loss - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if self.use_sigmoid: - if self.activated: - calculate_loss_func = py_focal_loss_with_prob - else: - if torch.cuda.is_available() and pred.is_cuda: - calculate_loss_func = sigmoid_focal_loss - else: - num_classes = pred.size(1) - target = F.one_hot(target, num_classes=num_classes + 1) - target = target[:, :num_classes] - calculate_loss_func = py_sigmoid_focal_loss - - loss_cls = self.loss_weight * calculate_loss_func( - pred, - target, - weight, - gamma=self.gamma, - alpha=self.alpha, - reduction=reduction, - avg_factor=avg_factor) - - else: - raise NotImplementedError - return loss_cls diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/gaussian_focal_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/gaussian_focal_loss.py deleted file mode 100644 index 7abcb691acbfbb300597a72fcce67ca3b5e9f2f2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/gaussian_focal_loss.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch.nn as nn - -from ..builder import LOSSES -from .utils import weighted_loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): - """`Focal Loss `_ for targets in gaussian - distribution. - - Args: - pred (torch.Tensor): The prediction. - gaussian_target (torch.Tensor): The learning target of the prediction - in gaussian distribution. - alpha (float, optional): A balanced form for Focal Loss. - Defaults to 2.0. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 4.0. - """ - eps = 1e-12 - pos_weights = gaussian_target.eq(1) - neg_weights = (1 - gaussian_target).pow(gamma) - pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights - neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights - return pos_loss + neg_loss - - -@LOSSES.register_module() -class GaussianFocalLoss(nn.Module): - """GaussianFocalLoss is a variant of focal loss. - - More details can be found in the `paper - `_ - Code is modified from `kp_utils.py - `_ # noqa: E501 - Please notice that the target in GaussianFocalLoss is a gaussian heatmap, - not 0/1 binary target. - - Args: - alpha (float): Power of prediction. - gamma (float): Power of target for negative samples. - reduction (str): Options are "none", "mean" and "sum". - loss_weight (float): Loss weight of current loss. - """ - - def __init__(self, - alpha=2.0, - gamma=4.0, - reduction='mean', - loss_weight=1.0): - super(GaussianFocalLoss, self).__init__() - self.alpha = alpha - self.gamma = gamma - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning target of the prediction - in gaussian distribution. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss_reg = self.loss_weight * gaussian_focal_loss( - pred, - target, - weight, - alpha=self.alpha, - gamma=self.gamma, - reduction=reduction, - avg_factor=avg_factor) - return loss_reg diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/gfocal_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/gfocal_loss.py deleted file mode 100644 index 0e8d26373f83f35ad032322d96cdbac995be2749..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/gfocal_loss.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES -from .utils import weighted_loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def quality_focal_loss(pred, target, beta=2.0): - r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning - Qualified and Distributed Bounding Boxes for Dense Object Detection - `_. - - Args: - pred (torch.Tensor): Predicted joint representation of classification - and quality (IoU) estimation with shape (N, C), C is the number of - classes. - target (tuple([torch.Tensor])): Target category label with shape (N,) - and target quality label with shape (N,). - beta (float): The beta parameter for calculating the modulating factor. - Defaults to 2.0. - - Returns: - torch.Tensor: Loss tensor with shape (N,). - """ - assert len(target) == 2, """target for QFL must be a tuple of two elements, - including category label and quality label, respectively""" - # label denotes the category id, score denotes the quality score - label, score = target - - # negatives are supervised by 0 quality score - pred_sigmoid = pred.sigmoid() - scale_factor = pred_sigmoid - zerolabel = scale_factor.new_zeros(pred.shape) - loss = F.binary_cross_entropy_with_logits( - pred, zerolabel, reduction='none') * scale_factor.pow(beta) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = pred.size(1) - pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) - pos_label = label[pos].long() - # positives are supervised by bbox quality (IoU) score - scale_factor = score[pos] - pred_sigmoid[pos, pos_label] - loss[pos, pos_label] = F.binary_cross_entropy_with_logits( - pred[pos, pos_label], score[pos], - reduction='none') * scale_factor.abs().pow(beta) - - loss = loss.sum(dim=1, keepdim=False) - return loss - - -@weighted_loss -def quality_focal_loss_with_prob(pred, target, beta=2.0): - r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning - Qualified and Distributed Bounding Boxes for Dense Object Detection - `_. - Different from `quality_focal_loss`, this function accepts probability - as input. - - Args: - pred (torch.Tensor): Predicted joint representation of classification - and quality (IoU) estimation with shape (N, C), C is the number of - classes. - target (tuple([torch.Tensor])): Target category label with shape (N,) - and target quality label with shape (N,). - beta (float): The beta parameter for calculating the modulating factor. - Defaults to 2.0. - - Returns: - torch.Tensor: Loss tensor with shape (N,). - """ - assert len(target) == 2, """target for QFL must be a tuple of two elements, - including category label and quality label, respectively""" - # label denotes the category id, score denotes the quality score - label, score = target - - # negatives are supervised by 0 quality score - pred_sigmoid = pred - scale_factor = pred_sigmoid - zerolabel = scale_factor.new_zeros(pred.shape) - loss = F.binary_cross_entropy( - pred, zerolabel, reduction='none') * scale_factor.pow(beta) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = pred.size(1) - pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1) - pos_label = label[pos].long() - # positives are supervised by bbox quality (IoU) score - scale_factor = score[pos] - pred_sigmoid[pos, pos_label] - loss[pos, pos_label] = F.binary_cross_entropy( - pred[pos, pos_label], score[pos], - reduction='none') * scale_factor.abs().pow(beta) - - loss = loss.sum(dim=1, keepdim=False) - return loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def distribution_focal_loss(pred, label): - r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning - Qualified and Distributed Bounding Boxes for Dense Object Detection - `_. - - Args: - pred (torch.Tensor): Predicted general distribution of bounding boxes - (before softmax) with shape (N, n+1), n is the max value of the - integral set `{0, ..., n}` in paper. - label (torch.Tensor): Target distance label for bounding boxes with - shape (N,). - - Returns: - torch.Tensor: Loss tensor with shape (N,). - """ - dis_left = label.long() - dis_right = dis_left + 1 - weight_left = dis_right.float() - label - weight_right = label - dis_left.float() - loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \ - + F.cross_entropy(pred, dis_right, reduction='none') * weight_right - return loss - - -@LOSSES.register_module() -class QualityFocalLoss(nn.Module): - r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss: - Learning Qualified and Distributed Bounding Boxes for Dense Object - Detection `_. - - Args: - use_sigmoid (bool): Whether sigmoid operation is conducted in QFL. - Defaults to True. - beta (float): The beta parameter for calculating the modulating factor. - Defaults to 2.0. - reduction (str): Options are "none", "mean" and "sum". - loss_weight (float): Loss weight of current loss. - activated (bool, optional): Whether the input is activated. - If True, it means the input has been activated and can be - treated as probabilities. Else, it should be treated as logits. - Defaults to False. - """ - - def __init__(self, - use_sigmoid=True, - beta=2.0, - reduction='mean', - loss_weight=1.0, - activated=False): - super(QualityFocalLoss, self).__init__() - assert use_sigmoid is True, 'Only sigmoid in QFL supported now.' - self.use_sigmoid = use_sigmoid - self.beta = beta - self.reduction = reduction - self.loss_weight = loss_weight - self.activated = activated - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (torch.Tensor): Predicted joint representation of - classification and quality (IoU) estimation with shape (N, C), - C is the number of classes. - target (tuple([torch.Tensor])): Target category label with shape - (N,) and target quality label with shape (N,). - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if self.use_sigmoid: - if self.activated: - calculate_loss_func = quality_focal_loss_with_prob - else: - calculate_loss_func = quality_focal_loss - loss_cls = self.loss_weight * calculate_loss_func( - pred, - target, - weight, - beta=self.beta, - reduction=reduction, - avg_factor=avg_factor) - else: - raise NotImplementedError - return loss_cls - - -@LOSSES.register_module() -class DistributionFocalLoss(nn.Module): - r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss: - Learning Qualified and Distributed Bounding Boxes for Dense Object - Detection `_. - - Args: - reduction (str): Options are `'none'`, `'mean'` and `'sum'`. - loss_weight (float): Loss weight of current loss. - """ - - def __init__(self, reduction='mean', loss_weight=1.0): - super(DistributionFocalLoss, self).__init__() - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (torch.Tensor): Predicted general distribution of bounding - boxes (before softmax) with shape (N, n+1), n is the max value - of the integral set `{0, ..., n}` in paper. - target (torch.Tensor): Target distance label for bounding boxes - with shape (N,). - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss_cls = self.loss_weight * distribution_focal_loss( - pred, target, weight, reduction=reduction, avg_factor=avg_factor) - return loss_cls diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/ghm_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/ghm_loss.py deleted file mode 100644 index a4df9fe8e17c9f8aea75f4e995db491e929bd206..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/ghm_loss.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES -from .utils import weight_reduce_loss - - -def _expand_onehot_labels(labels, label_weights, label_channels): - bin_labels = labels.new_full((labels.size(0), label_channels), 0) - inds = torch.nonzero( - (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() - if inds.numel() > 0: - bin_labels[inds, labels[inds]] = 1 - bin_label_weights = label_weights.view(-1, 1).expand( - label_weights.size(0), label_channels) - return bin_labels, bin_label_weights - - -# TODO: code refactoring to make it consistent with other losses -@LOSSES.register_module() -class GHMC(nn.Module): - """GHM Classification Loss. - - Details of the theorem can be viewed in the paper - `Gradient Harmonized Single-stage Detector - `_. - - Args: - bins (int): Number of the unit regions for distribution calculation. - momentum (float): The parameter for moving average. - use_sigmoid (bool): Can only be true for BCE based loss now. - loss_weight (float): The weight of the total GHM-C loss. - reduction (str): Options are "none", "mean" and "sum". - Defaults to "mean" - """ - - def __init__(self, - bins=10, - momentum=0, - use_sigmoid=True, - loss_weight=1.0, - reduction='mean'): - super(GHMC, self).__init__() - self.bins = bins - self.momentum = momentum - edges = torch.arange(bins + 1).float() / bins - self.register_buffer('edges', edges) - self.edges[-1] += 1e-6 - if momentum > 0: - acc_sum = torch.zeros(bins) - self.register_buffer('acc_sum', acc_sum) - self.use_sigmoid = use_sigmoid - if not self.use_sigmoid: - raise NotImplementedError - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, - pred, - target, - label_weight, - reduction_override=None, - **kwargs): - """Calculate the GHM-C loss. - - Args: - pred (float tensor of size [batch_num, class_num]): - The direct prediction of classification fc layer. - target (float tensor of size [batch_num, class_num]): - Binary class target for each sample. - label_weight (float tensor of size [batch_num, class_num]): - the value is 1 if the sample is valid and 0 if ignored. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - Returns: - The gradient harmonized loss. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - # the target should be binary class label - if pred.dim() != target.dim(): - target, label_weight = _expand_onehot_labels( - target, label_weight, pred.size(-1)) - target, label_weight = target.float(), label_weight.float() - edges = self.edges - mmt = self.momentum - weights = torch.zeros_like(pred) - - # gradient length - g = torch.abs(pred.sigmoid().detach() - target) - - valid = label_weight > 0 - tot = max(valid.float().sum().item(), 1.0) - n = 0 # n valid bins - for i in range(self.bins): - inds = (g >= edges[i]) & (g < edges[i + 1]) & valid - num_in_bin = inds.sum().item() - if num_in_bin > 0: - if mmt > 0: - self.acc_sum[i] = mmt * self.acc_sum[i] \ - + (1 - mmt) * num_in_bin - weights[inds] = tot / self.acc_sum[i] - else: - weights[inds] = tot / num_in_bin - n += 1 - if n > 0: - weights = weights / n - - loss = F.binary_cross_entropy_with_logits( - pred, target, reduction='none') - loss = weight_reduce_loss( - loss, weights, reduction=reduction, avg_factor=tot) - return loss * self.loss_weight - - -# TODO: code refactoring to make it consistent with other losses -@LOSSES.register_module() -class GHMR(nn.Module): - """GHM Regression Loss. - - Details of the theorem can be viewed in the paper - `Gradient Harmonized Single-stage Detector - `_. - - Args: - mu (float): The parameter for the Authentic Smooth L1 loss. - bins (int): Number of the unit regions for distribution calculation. - momentum (float): The parameter for moving average. - loss_weight (float): The weight of the total GHM-R loss. - reduction (str): Options are "none", "mean" and "sum". - Defaults to "mean" - """ - - def __init__(self, - mu=0.02, - bins=10, - momentum=0, - loss_weight=1.0, - reduction='mean'): - super(GHMR, self).__init__() - self.mu = mu - self.bins = bins - edges = torch.arange(bins + 1).float() / bins - self.register_buffer('edges', edges) - self.edges[-1] = 1e3 - self.momentum = momentum - if momentum > 0: - acc_sum = torch.zeros(bins) - self.register_buffer('acc_sum', acc_sum) - self.loss_weight = loss_weight - self.reduction = reduction - - # TODO: support reduction parameter - def forward(self, - pred, - target, - label_weight, - avg_factor=None, - reduction_override=None): - """Calculate the GHM-R loss. - - Args: - pred (float tensor of size [batch_num, 4 (* class_num)]): - The prediction of box regression layer. Channel number can be 4 - or 4 * class_num depending on whether it is class-agnostic. - target (float tensor of size [batch_num, 4 (* class_num)]): - The target regression values with the same size of pred. - label_weight (float tensor of size [batch_num, 4 (* class_num)]): - The weight of each sample, 0 if ignored. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - Returns: - The gradient harmonized loss. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - mu = self.mu - edges = self.edges - mmt = self.momentum - - # ASL1 loss - diff = pred - target - loss = torch.sqrt(diff * diff + mu * mu) - mu - - # gradient length - g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() - weights = torch.zeros_like(g) - - valid = label_weight > 0 - tot = max(label_weight.float().sum().item(), 1.0) - n = 0 # n: valid bins - for i in range(self.bins): - inds = (g >= edges[i]) & (g < edges[i + 1]) & valid - num_in_bin = inds.sum().item() - if num_in_bin > 0: - n += 1 - if mmt > 0: - self.acc_sum[i] = mmt * self.acc_sum[i] \ - + (1 - mmt) * num_in_bin - weights[inds] = tot / self.acc_sum[i] - else: - weights[inds] = tot / num_in_bin - if n > 0: - weights /= n - loss = weight_reduce_loss( - loss, weights, reduction=reduction, avg_factor=tot) - return loss * self.loss_weight diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/iou_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/iou_loss.py deleted file mode 100644 index bf1ed04e1903d19ee339bd131b897df5b51d311a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/iou_loss.py +++ /dev/null @@ -1,474 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -import warnings - -import mmcv -import torch -import torch.nn as nn - -from mmdet.core import bbox_overlaps -from ..builder import LOSSES -from .utils import weighted_loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def iou_loss(pred, target, linear=False, mode='log', eps=1e-6): - """IoU loss. - - Computing the IoU loss between a set of predicted bboxes and target bboxes. - The loss is calculated as negative log of IoU. - - Args: - pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), - shape (n, 4). - target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). - linear (bool, optional): If True, use linear scale of loss instead of - log scale. Default: False. - mode (str): Loss scaling mode, including "linear", "square", and "log". - Default: 'log' - eps (float): Eps to avoid log(0). - - Return: - torch.Tensor: Loss tensor. - """ - assert mode in ['linear', 'square', 'log'] - if linear: - mode = 'linear' - warnings.warn('DeprecationWarning: Setting "linear=True" in ' - 'iou_loss is deprecated, please use "mode=`linear`" ' - 'instead.') - ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) - if mode == 'linear': - loss = 1 - ious - elif mode == 'square': - loss = 1 - ious**2 - elif mode == 'log': - loss = -ious.log() - else: - raise NotImplementedError - return loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3): - """BIoULoss. - - This is an implementation of paper - `Improving Object Localization with Fitness NMS and Bounded IoU Loss. - `_. - - Args: - pred (torch.Tensor): Predicted bboxes. - target (torch.Tensor): Target bboxes. - beta (float): beta parameter in smoothl1. - eps (float): eps to avoid NaN. - """ - pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 - pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 - pred_w = pred[:, 2] - pred[:, 0] - pred_h = pred[:, 3] - pred[:, 1] - with torch.no_grad(): - target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 - target_ctry = (target[:, 1] + target[:, 3]) * 0.5 - target_w = target[:, 2] - target[:, 0] - target_h = target[:, 3] - target[:, 1] - - dx = target_ctrx - pred_ctrx - dy = target_ctry - pred_ctry - - loss_dx = 1 - torch.max( - (target_w - 2 * dx.abs()) / - (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) - loss_dy = 1 - torch.max( - (target_h - 2 * dy.abs()) / - (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) - loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / - (target_w + eps)) - loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / - (target_h + eps)) - # view(..., -1) does not work for empty tensor - loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], - dim=-1).flatten(1) - - loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, - loss_comb - 0.5 * beta) - return loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def giou_loss(pred, target, eps=1e-7): - r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding - Box Regression `_. - - Args: - pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), - shape (n, 4). - target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). - eps (float): Eps to avoid log(0). - - Return: - Tensor: Loss tensor. - """ - gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps) - loss = 1 - gious - return loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def diou_loss(pred, target, eps=1e-7): - r"""`Implementation of Distance-IoU Loss: Faster and Better - Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_. - - Code is modified from https://github.com/Zzh-tju/DIoU. - - Args: - pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), - shape (n, 4). - target (Tensor): Corresponding gt bboxes, shape (n, 4). - eps (float): Eps to avoid log(0). - Return: - Tensor: Loss tensor. - """ - # overlap - lt = torch.max(pred[:, :2], target[:, :2]) - rb = torch.min(pred[:, 2:], target[:, 2:]) - wh = (rb - lt).clamp(min=0) - overlap = wh[:, 0] * wh[:, 1] - - # union - ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) - ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) - union = ap + ag - overlap + eps - - # IoU - ious = overlap / union - - # enclose area - enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) - enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) - enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) - - cw = enclose_wh[:, 0] - ch = enclose_wh[:, 1] - - c2 = cw**2 + ch**2 + eps - - b1_x1, b1_y1 = pred[:, 0], pred[:, 1] - b1_x2, b1_y2 = pred[:, 2], pred[:, 3] - b2_x1, b2_y1 = target[:, 0], target[:, 1] - b2_x2, b2_y2 = target[:, 2], target[:, 3] - - left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 - right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 - rho2 = left + right - - # DIoU - dious = ious - rho2 / c2 - loss = 1 - dious - return loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def ciou_loss(pred, target, eps=1e-7): - r"""`Implementation of paper `Enhancing Geometric Factors into - Model Learning and Inference for Object Detection and Instance - Segmentation `_. - - Code is modified from https://github.com/Zzh-tju/CIoU. - - Args: - pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), - shape (n, 4). - target (Tensor): Corresponding gt bboxes, shape (n, 4). - eps (float): Eps to avoid log(0). - Return: - Tensor: Loss tensor. - """ - # overlap - lt = torch.max(pred[:, :2], target[:, :2]) - rb = torch.min(pred[:, 2:], target[:, 2:]) - wh = (rb - lt).clamp(min=0) - overlap = wh[:, 0] * wh[:, 1] - - # union - ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) - ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) - union = ap + ag - overlap + eps - - # IoU - ious = overlap / union - - # enclose area - enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) - enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) - enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) - - cw = enclose_wh[:, 0] - ch = enclose_wh[:, 1] - - c2 = cw**2 + ch**2 + eps - - b1_x1, b1_y1 = pred[:, 0], pred[:, 1] - b1_x2, b1_y2 = pred[:, 2], pred[:, 3] - b2_x1, b2_y1 = target[:, 0], target[:, 1] - b2_x2, b2_y2 = target[:, 2], target[:, 3] - - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - - left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 - right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 - rho2 = left + right - - factor = 4 / math.pi**2 - v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - - with torch.no_grad(): - alpha = (ious > 0.5).float() * v / (1 - ious + v) - - # CIoU - cious = ious - (rho2 / c2 + alpha * v) - loss = 1 - cious.clamp(min=-1.0, max=1.0) - return loss - - -@LOSSES.register_module() -class IoULoss(nn.Module): - """IoULoss. - - Computing the IoU loss between a set of predicted bboxes and target bboxes. - - Args: - linear (bool): If True, use linear scale of loss else determined - by mode. Default: False. - eps (float): Eps to avoid log(0). - reduction (str): Options are "none", "mean" and "sum". - loss_weight (float): Weight of loss. - mode (str): Loss scaling mode, including "linear", "square", and "log". - Default: 'log' - """ - - def __init__(self, - linear=False, - eps=1e-6, - reduction='mean', - loss_weight=1.0, - mode='log'): - super(IoULoss, self).__init__() - assert mode in ['linear', 'square', 'log'] - if linear: - mode = 'linear' - warnings.warn('DeprecationWarning: Setting "linear=True" in ' - 'IOULoss is deprecated, please use "mode=`linear`" ' - 'instead.') - self.mode = mode - self.linear = linear - self.eps = eps - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None, - **kwargs): - """Forward function. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning target of the prediction. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. Options are "none", "mean" and "sum". - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if (weight is not None) and (not torch.any(weight > 0)) and ( - reduction != 'none'): - if pred.dim() == weight.dim() + 1: - weight = weight.unsqueeze(1) - return (pred * weight).sum() # 0 - if weight is not None and weight.dim() > 1: - # TODO: remove this in the future - # reduce the weight of shape (n, 4) to (n,) to match the - # iou_loss of shape (n,) - assert weight.shape == pred.shape - weight = weight.mean(-1) - loss = self.loss_weight * iou_loss( - pred, - target, - weight, - mode=self.mode, - eps=self.eps, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss - - -@LOSSES.register_module() -class BoundedIoULoss(nn.Module): - - def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0): - super(BoundedIoULoss, self).__init__() - self.beta = beta - self.eps = eps - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None, - **kwargs): - if weight is not None and not torch.any(weight > 0): - if pred.dim() == weight.dim() + 1: - weight = weight.unsqueeze(1) - return (pred * weight).sum() # 0 - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss = self.loss_weight * bounded_iou_loss( - pred, - target, - weight, - beta=self.beta, - eps=self.eps, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss - - -@LOSSES.register_module() -class GIoULoss(nn.Module): - - def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): - super(GIoULoss, self).__init__() - self.eps = eps - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None, - **kwargs): - if weight is not None and not torch.any(weight > 0): - if pred.dim() == weight.dim() + 1: - weight = weight.unsqueeze(1) - return (pred * weight).sum() # 0 - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if weight is not None and weight.dim() > 1: - # TODO: remove this in the future - # reduce the weight of shape (n, 4) to (n,) to match the - # giou_loss of shape (n,) - assert weight.shape == pred.shape - weight = weight.mean(-1) - loss = self.loss_weight * giou_loss( - pred, - target, - weight, - eps=self.eps, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss - - -@LOSSES.register_module() -class DIoULoss(nn.Module): - - def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): - super(DIoULoss, self).__init__() - self.eps = eps - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None, - **kwargs): - if weight is not None and not torch.any(weight > 0): - if pred.dim() == weight.dim() + 1: - weight = weight.unsqueeze(1) - return (pred * weight).sum() # 0 - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if weight is not None and weight.dim() > 1: - # TODO: remove this in the future - # reduce the weight of shape (n, 4) to (n,) to match the - # giou_loss of shape (n,) - assert weight.shape == pred.shape - weight = weight.mean(-1) - loss = self.loss_weight * diou_loss( - pred, - target, - weight, - eps=self.eps, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss - - -@LOSSES.register_module() -class CIoULoss(nn.Module): - - def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): - super(CIoULoss, self).__init__() - self.eps = eps - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None, - **kwargs): - if weight is not None and not torch.any(weight > 0): - if pred.dim() == weight.dim() + 1: - weight = weight.unsqueeze(1) - return (pred * weight).sum() # 0 - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if weight is not None and weight.dim() > 1: - # TODO: remove this in the future - # reduce the weight of shape (n, 4) to (n,) to match the - # giou_loss of shape (n,) - assert weight.shape == pred.shape - weight = weight.mean(-1) - loss = self.loss_weight * ciou_loss( - pred, - target, - weight, - eps=self.eps, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/kd_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/kd_loss.py deleted file mode 100644 index 75c19355fee4e20c03e553f2794e5d63446ad69b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/kd_loss.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES -from .utils import weighted_loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def knowledge_distillation_kl_div_loss(pred, - soft_label, - T, - detach_target=True): - r"""Loss function for knowledge distilling using KL divergence. - - Args: - pred (Tensor): Predicted logits with shape (N, n + 1). - soft_label (Tensor): Target logits with shape (N, N + 1). - T (int): Temperature for distillation. - detach_target (bool): Remove soft_label from automatic differentiation - - Returns: - torch.Tensor: Loss tensor with shape (N,). - """ - assert pred.size() == soft_label.size() - target = F.softmax(soft_label / T, dim=1) - if detach_target: - target = target.detach() - - kd_loss = F.kl_div( - F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * ( - T * T) - - return kd_loss - - -@LOSSES.register_module() -class KnowledgeDistillationKLDivLoss(nn.Module): - """Loss function for knowledge distilling using KL divergence. - - Args: - reduction (str): Options are `'none'`, `'mean'` and `'sum'`. - loss_weight (float): Loss weight of current loss. - T (int): Temperature for distillation. - """ - - def __init__(self, reduction='mean', loss_weight=1.0, T=10): - super(KnowledgeDistillationKLDivLoss, self).__init__() - assert T >= 1 - self.reduction = reduction - self.loss_weight = loss_weight - self.T = T - - def forward(self, - pred, - soft_label, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (Tensor): Predicted logits with shape (N, n + 1). - soft_label (Tensor): Target logits with shape (N, N + 1). - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - - reduction = ( - reduction_override if reduction_override else self.reduction) - - loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss( - pred, - soft_label, - weight, - reduction=reduction, - avg_factor=avg_factor, - T=self.T) - - return loss_kd diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/mse_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/mse_loss.py deleted file mode 100644 index 2ebd161f007a8cc6dea7b5cba1aac38ec342e3d2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/mse_loss.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES -from .utils import weighted_loss - - -@weighted_loss -def mse_loss(pred, target): - """Wrapper of mse loss.""" - return F.mse_loss(pred, target, reduction='none') - - -@LOSSES.register_module() -class MSELoss(nn.Module): - """MSELoss. - - Args: - reduction (str, optional): The method that reduces the loss to a - scalar. Options are "none", "mean" and "sum". - loss_weight (float, optional): The weight of the loss. Defaults to 1.0 - """ - - def __init__(self, reduction='mean', loss_weight=1.0): - super().__init__() - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function of loss. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning target of the prediction. - weight (torch.Tensor, optional): Weight of the loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - - Returns: - torch.Tensor: The calculated loss - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss = self.loss_weight * mse_loss( - pred, target, weight, reduction=reduction, avg_factor=avg_factor) - return loss diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/pisa_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/pisa_loss.py deleted file mode 100644 index 6afea0e5d27ad5ca122a4d16e3fb627a92460772..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/pisa_loss.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch - -from mmdet.core import bbox_overlaps - - -@mmcv.jit(derivate=True, coderize=True) -def isr_p(cls_score, - bbox_pred, - bbox_targets, - rois, - sampling_results, - loss_cls, - bbox_coder, - k=2, - bias=0, - num_class=80): - """Importance-based Sample Reweighting (ISR_P), positive part. - - Args: - cls_score (Tensor): Predicted classification scores. - bbox_pred (Tensor): Predicted bbox deltas. - bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are - labels, label_weights, bbox_targets, bbox_weights, respectively. - rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs - (two_stage) in shape (n, 5). - sampling_results (obj): Sampling results. - loss_cls (func): Classification loss func of the head. - bbox_coder (obj): BBox coder of the head. - k (float): Power of the non-linear mapping. - bias (float): Shift of the non-linear mapping. - num_class (int): Number of classes, default: 80. - - Return: - tuple([Tensor]): labels, imp_based_label_weights, bbox_targets, - bbox_target_weights - """ - - labels, label_weights, bbox_targets, bbox_weights = bbox_targets - pos_label_inds = ((labels >= 0) & - (labels < num_class)).nonzero().reshape(-1) - pos_labels = labels[pos_label_inds] - - # if no positive samples, return the original targets - num_pos = float(pos_label_inds.size(0)) - if num_pos == 0: - return labels, label_weights, bbox_targets, bbox_weights - - # merge pos_assigned_gt_inds of per image to a single tensor - gts = list() - last_max_gt = 0 - for i in range(len(sampling_results)): - gt_i = sampling_results[i].pos_assigned_gt_inds - gts.append(gt_i + last_max_gt) - if len(gt_i) != 0: - last_max_gt = gt_i.max() + 1 - gts = torch.cat(gts) - assert len(gts) == num_pos - - cls_score = cls_score.detach() - bbox_pred = bbox_pred.detach() - - # For single stage detectors, rois here indicate anchors, in shape (N, 4) - # For two stage detectors, rois are in shape (N, 5) - if rois.size(-1) == 5: - pos_rois = rois[pos_label_inds][:, 1:] - else: - pos_rois = rois[pos_label_inds] - - if bbox_pred.size(-1) > 4: - bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) - pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4) - else: - pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4) - - # compute iou of the predicted bbox and the corresponding GT - pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4) - pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred) - target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target) - ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True) - - pos_imp_weights = label_weights[pos_label_inds] - # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally, - # then sorted again within the same-rank group - max_l_num = pos_labels.bincount().max() - for label in pos_labels.unique(): - l_inds = (pos_labels == label).nonzero().view(-1) - l_gts = gts[l_inds] - for t in l_gts.unique(): - t_inds = l_inds[l_gts == t] - t_ious = ious[t_inds] - _, t_iou_rank_idx = t_ious.sort(descending=True) - _, t_iou_rank = t_iou_rank_idx.sort() - ious[t_inds] += max_l_num - t_iou_rank.float() - l_ious = ious[l_inds] - _, l_iou_rank_idx = l_ious.sort(descending=True) - _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR - # linearly map HLR to label weights - pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num - - pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k) - - # normalize to make the new weighted loss value equal to the original loss - pos_loss_cls = loss_cls( - cls_score[pos_label_inds], pos_labels, reduction_override='none') - if pos_loss_cls.dim() > 1: - ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:, - None] - new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None] - else: - ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds] - new_pos_loss_cls = pos_loss_cls * pos_imp_weights - pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum() - pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio - label_weights[pos_label_inds] = pos_imp_weights - - bbox_targets = labels, label_weights, bbox_targets, bbox_weights - return bbox_targets - - -@mmcv.jit(derivate=True, coderize=True) -def carl_loss(cls_score, - labels, - bbox_pred, - bbox_targets, - loss_bbox, - k=1, - bias=0.2, - avg_factor=None, - sigmoid=False, - num_class=80): - """Classification-Aware Regression Loss (CARL). - - Args: - cls_score (Tensor): Predicted classification scores. - labels (Tensor): Targets of classification. - bbox_pred (Tensor): Predicted bbox deltas. - bbox_targets (Tensor): Target of bbox regression. - loss_bbox (func): Regression loss func of the head. - bbox_coder (obj): BBox coder of the head. - k (float): Power of the non-linear mapping. - bias (float): Shift of the non-linear mapping. - avg_factor (int): Average factor used in regression loss. - sigmoid (bool): Activation of the classification score. - num_class (int): Number of classes, default: 80. - - Return: - dict: CARL loss dict. - """ - pos_label_inds = ((labels >= 0) & - (labels < num_class)).nonzero().reshape(-1) - if pos_label_inds.numel() == 0: - return dict(loss_carl=cls_score.sum()[None] * 0.) - pos_labels = labels[pos_label_inds] - - # multiply pos_cls_score with the corresponding bbox weight - # and remain gradient - if sigmoid: - pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels] - else: - pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels] - carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k) - - # normalize carl_loss_weight to make its sum equal to num positive - num_pos = float(pos_cls_score.size(0)) - weight_ratio = num_pos / carl_loss_weights.sum() - carl_loss_weights *= weight_ratio - - if avg_factor is None: - avg_factor = bbox_targets.size(0) - # if is class agnostic, bbox pred is in shape (N, 4) - # otherwise, bbox pred is in shape (N, #classes, 4) - if bbox_pred.size(-1) > 4: - bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) - pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels] - else: - pos_bbox_preds = bbox_pred[pos_label_inds] - ori_loss_reg = loss_bbox( - pos_bbox_preds, - bbox_targets[pos_label_inds], - reduction_override='none') / avg_factor - loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum() - return dict(loss_carl=loss_carl[None]) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/seesaw_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/seesaw_loss.py deleted file mode 100644 index 01040472d85a79fbb1f78fecb403057c40703f0c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/seesaw_loss.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES -from .accuracy import accuracy -from .cross_entropy_loss import cross_entropy -from .utils import weight_reduce_loss - - -def seesaw_ce_loss(cls_score, - labels, - label_weights, - cum_samples, - num_classes, - p, - q, - eps, - reduction='mean', - avg_factor=None): - """Calculate the Seesaw CrossEntropy loss. - - Args: - cls_score (torch.Tensor): The prediction with shape (N, C), - C is the number of classes. - labels (torch.Tensor): The learning label of the prediction. - label_weights (torch.Tensor): Sample-wise loss weight. - cum_samples (torch.Tensor): Cumulative samples for each category. - num_classes (int): The number of classes. - p (float): The ``p`` in the mitigation factor. - q (float): The ``q`` in the compenstation factor. - eps (float): The minimal value of divisor to smooth - the computation of compensation factor - reduction (str, optional): The method used to reduce the loss. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - - Returns: - torch.Tensor: The calculated loss - """ - assert cls_score.size(-1) == num_classes - assert len(cum_samples) == num_classes - - onehot_labels = F.one_hot(labels, num_classes) - seesaw_weights = cls_score.new_ones(onehot_labels.size()) - - # mitigation factor - if p > 0: - sample_ratio_matrix = cum_samples[None, :].clamp( - min=1) / cum_samples[:, None].clamp(min=1) - index = (sample_ratio_matrix < 1.0).float() - sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index) - mitigation_factor = sample_weights[labels.long(), :] - seesaw_weights = seesaw_weights * mitigation_factor - - # compensation factor - if q > 0: - scores = F.softmax(cls_score.detach(), dim=1) - self_scores = scores[ - torch.arange(0, len(scores)).to(scores.device).long(), - labels.long()] - score_matrix = scores / self_scores[:, None].clamp(min=eps) - index = (score_matrix > 1.0).float() - compensation_factor = score_matrix.pow(q) * index + (1 - index) - seesaw_weights = seesaw_weights * compensation_factor - - cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels)) - - loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none') - - if label_weights is not None: - label_weights = label_weights.float() - loss = weight_reduce_loss( - loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor) - return loss - - -@LOSSES.register_module() -class SeesawLoss(nn.Module): - """ - Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) - arXiv: https://arxiv.org/abs/2008.10032 - - Args: - use_sigmoid (bool, optional): Whether the prediction uses sigmoid - of softmax. Only False is supported. - p (float, optional): The ``p`` in the mitigation factor. - Defaults to 0.8. - q (float, optional): The ``q`` in the compenstation factor. - Defaults to 2.0. - num_classes (int, optional): The number of classes. - Default to 1203 for LVIS v1 dataset. - eps (float, optional): The minimal value of divisor to smooth - the computation of compensation factor - reduction (str, optional): The method that reduces the loss to a - scalar. Options are "none", "mean" and "sum". - loss_weight (float, optional): The weight of the loss. Defaults to 1.0 - return_dict (bool, optional): Whether return the losses as a dict. - Default to True. - """ - - def __init__(self, - use_sigmoid=False, - p=0.8, - q=2.0, - num_classes=1203, - eps=1e-2, - reduction='mean', - loss_weight=1.0, - return_dict=True): - super(SeesawLoss, self).__init__() - assert not use_sigmoid - self.use_sigmoid = False - self.p = p - self.q = q - self.num_classes = num_classes - self.eps = eps - self.reduction = reduction - self.loss_weight = loss_weight - self.return_dict = return_dict - - # 0 for pos, 1 for neg - self.cls_criterion = seesaw_ce_loss - - # cumulative samples for each category - self.register_buffer( - 'cum_samples', - torch.zeros(self.num_classes + 1, dtype=torch.float)) - - # custom output channels of the classifier - self.custom_cls_channels = True - # custom activation of cls_score - self.custom_activation = True - # custom accuracy of the classsifier - self.custom_accuracy = True - - def _split_cls_score(self, cls_score): - # split cls_score to cls_score_classes and cls_score_objectness - assert cls_score.size(-1) == self.num_classes + 2 - cls_score_classes = cls_score[..., :-2] - cls_score_objectness = cls_score[..., -2:] - return cls_score_classes, cls_score_objectness - - def get_cls_channels(self, num_classes): - """Get custom classification channels. - - Args: - num_classes (int): The number of classes. - - Returns: - int: The custom classification channels. - """ - assert num_classes == self.num_classes - return num_classes + 2 - - def get_activation(self, cls_score): - """Get custom activation of cls_score. - - Args: - cls_score (torch.Tensor): The prediction with shape (N, C + 2). - - Returns: - torch.Tensor: The custom activation of cls_score with shape - (N, C + 1). - """ - cls_score_classes, cls_score_objectness = self._split_cls_score( - cls_score) - score_classes = F.softmax(cls_score_classes, dim=-1) - score_objectness = F.softmax(cls_score_objectness, dim=-1) - score_pos = score_objectness[..., [0]] - score_neg = score_objectness[..., [1]] - score_classes = score_classes * score_pos - scores = torch.cat([score_classes, score_neg], dim=-1) - return scores - - def get_accuracy(self, cls_score, labels): - """Get custom accuracy w.r.t. cls_score and labels. - - Args: - cls_score (torch.Tensor): The prediction with shape (N, C + 2). - labels (torch.Tensor): The learning label of the prediction. - - Returns: - Dict [str, torch.Tensor]: The accuracy for objectness and classes, - respectively. - """ - pos_inds = labels < self.num_classes - obj_labels = (labels == self.num_classes).long() - cls_score_classes, cls_score_objectness = self._split_cls_score( - cls_score) - acc_objectness = accuracy(cls_score_objectness, obj_labels) - acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds]) - acc = dict() - acc['acc_objectness'] = acc_objectness - acc['acc_classes'] = acc_classes - return acc - - def forward(self, - cls_score, - labels, - label_weights=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - cls_score (torch.Tensor): The prediction with shape (N, C + 2). - labels (torch.Tensor): The learning label of the prediction. - label_weights (torch.Tensor, optional): Sample-wise loss weight. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction (str, optional): The method used to reduce the loss. - Options are "none", "mean" and "sum". - Returns: - torch.Tensor | Dict [str, torch.Tensor]: - if return_dict == False: The calculated loss | - if return_dict == True: The dict of calculated losses - for objectness and classes, respectively. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - assert cls_score.size(-1) == self.num_classes + 2 - pos_inds = labels < self.num_classes - # 0 for pos, 1 for neg - obj_labels = (labels == self.num_classes).long() - - # accumulate the samples for each category - unique_labels = labels.unique() - for u_l in unique_labels: - inds_ = labels == u_l.item() - self.cum_samples[u_l] += inds_.sum() - - if label_weights is not None: - label_weights = label_weights.float() - else: - label_weights = labels.new_ones(labels.size(), dtype=torch.float) - - cls_score_classes, cls_score_objectness = self._split_cls_score( - cls_score) - # calculate loss_cls_classes (only need pos samples) - if pos_inds.sum() > 0: - loss_cls_classes = self.loss_weight * self.cls_criterion( - cls_score_classes[pos_inds], labels[pos_inds], - label_weights[pos_inds], self.cum_samples[:self.num_classes], - self.num_classes, self.p, self.q, self.eps, reduction, - avg_factor) - else: - loss_cls_classes = cls_score_classes[pos_inds].sum() - # calculate loss_cls_objectness - loss_cls_objectness = self.loss_weight * cross_entropy( - cls_score_objectness, obj_labels, label_weights, reduction, - avg_factor) - - if self.return_dict: - loss_cls = dict() - loss_cls['loss_cls_objectness'] = loss_cls_objectness - loss_cls['loss_cls_classes'] = loss_cls_classes - else: - loss_cls = loss_cls_classes + loss_cls_objectness - return loss_cls diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/smooth_l1_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/smooth_l1_loss.py deleted file mode 100644 index 551174672933cb0d23c93cbe22053e3910a9dcfb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/smooth_l1_loss.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch -import torch.nn as nn - -from ..builder import LOSSES -from .utils import weighted_loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def smooth_l1_loss(pred, target, beta=1.0): - """Smooth L1 loss. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning target of the prediction. - beta (float, optional): The threshold in the piecewise function. - Defaults to 1.0. - - Returns: - torch.Tensor: Calculated loss - """ - assert beta > 0 - if target.numel() == 0: - return pred.sum() * 0 - - assert pred.size() == target.size() - diff = torch.abs(pred - target) - loss = torch.where(diff < beta, 0.5 * diff * diff / beta, - diff - 0.5 * beta) - return loss - - -@mmcv.jit(derivate=True, coderize=True) -@weighted_loss -def l1_loss(pred, target): - """L1 loss. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning target of the prediction. - - Returns: - torch.Tensor: Calculated loss - """ - if target.numel() == 0: - return pred.sum() * 0 - - assert pred.size() == target.size() - loss = torch.abs(pred - target) - return loss - - -@LOSSES.register_module() -class SmoothL1Loss(nn.Module): - """Smooth L1 loss. - - Args: - beta (float, optional): The threshold in the piecewise function. - Defaults to 1.0. - reduction (str, optional): The method to reduce the loss. - Options are "none", "mean" and "sum". Defaults to "mean". - loss_weight (float, optional): The weight of loss. - """ - - def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): - super(SmoothL1Loss, self).__init__() - self.beta = beta - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None, - **kwargs): - """Forward function. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning target of the prediction. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss_bbox = self.loss_weight * smooth_l1_loss( - pred, - target, - weight, - beta=self.beta, - reduction=reduction, - avg_factor=avg_factor, - **kwargs) - return loss_bbox - - -@LOSSES.register_module() -class L1Loss(nn.Module): - """L1 loss. - - Args: - reduction (str, optional): The method to reduce the loss. - Options are "none", "mean" and "sum". - loss_weight (float, optional): The weight of loss. - """ - - def __init__(self, reduction='mean', loss_weight=1.0): - super(L1Loss, self).__init__() - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning target of the prediction. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss_bbox = self.loss_weight * l1_loss( - pred, target, weight, reduction=reduction, avg_factor=avg_factor) - return loss_bbox diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/utils.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/utils.py deleted file mode 100644 index 778237ebfd57160a3533d6d82b3d8fd7a36bf481..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/utils.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import functools - -import mmcv -import torch -import torch.nn.functional as F - - -def reduce_loss(loss, reduction): - """Reduce loss as specified. - - Args: - loss (Tensor): Elementwise loss tensor. - reduction (str): Options are "none", "mean" and "sum". - - Return: - Tensor: Reduced loss tensor. - """ - reduction_enum = F._Reduction.get_enum(reduction) - # none: 0, elementwise_mean:1, sum: 2 - if reduction_enum == 0: - return loss - elif reduction_enum == 1: - return loss.mean() - elif reduction_enum == 2: - return loss.sum() - - -@mmcv.jit(derivate=True, coderize=True) -def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): - """Apply element-wise weight and reduce loss. - - Args: - loss (Tensor): Element-wise loss. - weight (Tensor): Element-wise weights. - reduction (str): Same as built-in losses of PyTorch. - avg_factor (float): Average factor when computing the mean of losses. - - Returns: - Tensor: Processed loss values. - """ - # if weight is specified, apply element-wise weight - if weight is not None: - loss = loss * weight - - # if avg_factor is not specified, just reduce the loss - if avg_factor is None: - loss = reduce_loss(loss, reduction) - else: - # if reduction is mean, then average the loss by avg_factor - if reduction == 'mean': - # Avoid causing ZeroDivisionError when avg_factor is 0.0, - # i.e., all labels of an image belong to ignore index. - eps = torch.finfo(torch.float32).eps - loss = loss.sum() / (avg_factor + eps) - # if reduction is 'none', then do nothing, otherwise raise an error - elif reduction != 'none': - raise ValueError('avg_factor can not be used with reduction="sum"') - return loss - - -def weighted_loss(loss_func): - """Create a weighted version of a given loss function. - - To use this decorator, the loss function must have the signature like - `loss_func(pred, target, **kwargs)`. The function only needs to compute - element-wise loss without any reduction. This decorator will add weight - and reduction arguments to the function. The decorated function will have - the signature like `loss_func(pred, target, weight=None, reduction='mean', - avg_factor=None, **kwargs)`. - - :Example: - - >>> import torch - >>> @weighted_loss - >>> def l1_loss(pred, target): - >>> return (pred - target).abs() - - >>> pred = torch.Tensor([0, 2, 3]) - >>> target = torch.Tensor([1, 1, 1]) - >>> weight = torch.Tensor([1, 0, 1]) - - >>> l1_loss(pred, target) - tensor(1.3333) - >>> l1_loss(pred, target, weight) - tensor(1.) - >>> l1_loss(pred, target, reduction='none') - tensor([1., 1., 2.]) - >>> l1_loss(pred, target, weight, avg_factor=2) - tensor(1.5000) - """ - - @functools.wraps(loss_func) - def wrapper(pred, - target, - weight=None, - reduction='mean', - avg_factor=None, - **kwargs): - # get element-wise loss - loss = loss_func(pred, target, **kwargs) - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - return wrapper diff --git a/cv/detection/co-detr/pytorch/mmdet/models/losses/varifocal_loss.py b/cv/detection/co-detr/pytorch/mmdet/models/losses/varifocal_loss.py deleted file mode 100644 index 42f0eef9c62e2a66b97914cf8b43a25112c4e79f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/losses/varifocal_loss.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES -from .utils import weight_reduce_loss - - -@mmcv.jit(derivate=True, coderize=True) -def varifocal_loss(pred, - target, - weight=None, - alpha=0.75, - gamma=2.0, - iou_weighted=True, - reduction='mean', - avg_factor=None): - """`Varifocal Loss `_ - - Args: - pred (torch.Tensor): The prediction with shape (N, C), C is the - number of classes - target (torch.Tensor): The learning target of the iou-aware - classification score with shape (N, C), C is the number of classes. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - alpha (float, optional): A balance factor for the negative part of - Varifocal Loss, which is different from the alpha of Focal Loss. - Defaults to 0.75. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - iou_weighted (bool, optional): Whether to weight the loss of the - positive example with the iou target. Defaults to True. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. Options are "none", "mean" and - "sum". - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - """ - # pred and target should be of the same size - assert pred.size() == target.size() - pred_sigmoid = pred.sigmoid() - target = target.type_as(pred) - if iou_weighted: - focal_weight = target * (target > 0.0).float() + \ - alpha * (pred_sigmoid - target).abs().pow(gamma) * \ - (target <= 0.0).float() - else: - focal_weight = (target > 0.0).float() + \ - alpha * (pred_sigmoid - target).abs().pow(gamma) * \ - (target <= 0.0).float() - loss = F.binary_cross_entropy_with_logits( - pred, target, reduction='none') * focal_weight - loss = weight_reduce_loss(loss, weight, reduction, avg_factor) - return loss - - -@LOSSES.register_module() -class VarifocalLoss(nn.Module): - - def __init__(self, - use_sigmoid=True, - alpha=0.75, - gamma=2.0, - iou_weighted=True, - reduction='mean', - loss_weight=1.0): - """`Varifocal Loss `_ - - Args: - use_sigmoid (bool, optional): Whether the prediction is - used for sigmoid or softmax. Defaults to True. - alpha (float, optional): A balance factor for the negative part of - Varifocal Loss, which is different from the alpha of Focal - Loss. Defaults to 0.75. - gamma (float, optional): The gamma for calculating the modulating - factor. Defaults to 2.0. - iou_weighted (bool, optional): Whether to weight the loss of the - positive examples with the iou target. Defaults to True. - reduction (str, optional): The method used to reduce the loss into - a scalar. Defaults to 'mean'. Options are "none", "mean" and - "sum". - loss_weight (float, optional): Weight of loss. Defaults to 1.0. - """ - super(VarifocalLoss, self).__init__() - assert use_sigmoid is True, \ - 'Only sigmoid varifocal loss supported now.' - assert alpha >= 0.0 - self.use_sigmoid = use_sigmoid - self.alpha = alpha - self.gamma = gamma - self.iou_weighted = iou_weighted - self.reduction = reduction - self.loss_weight = loss_weight - - def forward(self, - pred, - target, - weight=None, - avg_factor=None, - reduction_override=None): - """Forward function. - - Args: - pred (torch.Tensor): The prediction. - target (torch.Tensor): The learning target of the prediction. - weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - avg_factor (int, optional): Average factor that is used to average - the loss. Defaults to None. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Options are "none", "mean" and "sum". - - Returns: - torch.Tensor: The calculated loss - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - if self.use_sigmoid: - loss_cls = self.loss_weight * varifocal_loss( - pred, - target, - weight, - alpha=self.alpha, - gamma=self.gamma, - iou_weighted=self.iou_weighted, - reduction=reduction, - avg_factor=avg_factor) - else: - raise NotImplementedError - return loss_cls diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/__init__.py deleted file mode 100644 index 6f2fa823fb35fdd90c07065cc93238d08385ce8b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .bfp import BFP -from .channel_mapper import ChannelMapper -from .ct_resnet_neck import CTResNetNeck -from .dilated_encoder import DilatedEncoder -from .dyhead import DyHead -from .fpg import FPG -from .fpn import FPN -from .fpn_carafe import FPN_CARAFE -from .hrfpn import HRFPN -from .nas_fpn import NASFPN -from .nasfcos_fpn import NASFCOS_FPN -from .pafpn import PAFPN -from .rfp import RFP -from .ssd_neck import SSDNeck -from .yolo_neck import YOLOV3Neck -from .yolox_pafpn import YOLOXPAFPN - -__all__ = [ - 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', - 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder', - 'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/bfp.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/bfp.py deleted file mode 100644 index 9fdfa036ddf693bbb7fbf77fe2089c2f98a2bb93..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/bfp.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.cnn.bricks import NonLocal2d -from mmcv.runner import BaseModule - -from ..builder import NECKS - - -@NECKS.register_module() -class BFP(BaseModule): - """BFP (Balanced Feature Pyramids) - - BFP takes multi-level features as inputs and gather them into a single one, - then refine the gathered feature and scatter the refined results to - multi-level features. This module is used in Libra R-CNN (CVPR 2019), see - the paper `Libra R-CNN: Towards Balanced Learning for Object Detection - `_ for details. - - Args: - in_channels (int): Number of input channels (feature maps of all levels - should have the same channels). - num_levels (int): Number of input feature levels. - conv_cfg (dict): The config dict for convolution layers. - norm_cfg (dict): The config dict for normalization layers. - refine_level (int): Index of integration and refine level of BSF in - multi-level features from bottom to top. - refine_type (str): Type of the refine op, currently support - [None, 'conv', 'non_local']. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - num_levels, - refine_level=2, - refine_type=None, - conv_cfg=None, - norm_cfg=None, - init_cfg=dict( - type='Xavier', layer='Conv2d', distribution='uniform')): - super(BFP, self).__init__(init_cfg) - assert refine_type in [None, 'conv', 'non_local'] - - self.in_channels = in_channels - self.num_levels = num_levels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.refine_level = refine_level - self.refine_type = refine_type - assert 0 <= self.refine_level < self.num_levels - - if self.refine_type == 'conv': - self.refine = ConvModule( - self.in_channels, - self.in_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - elif self.refine_type == 'non_local': - self.refine = NonLocal2d( - self.in_channels, - reduction=1, - use_scale=False, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == self.num_levels - - # step 1: gather multi-level features by resize and average - feats = [] - gather_size = inputs[self.refine_level].size()[2:] - for i in range(self.num_levels): - if i < self.refine_level: - gathered = F.adaptive_max_pool2d( - inputs[i], output_size=gather_size) - else: - gathered = F.interpolate( - inputs[i], size=gather_size, mode='nearest') - feats.append(gathered) - - bsf = sum(feats) / len(feats) - - # step 2: refine gathered features - if self.refine_type is not None: - bsf = self.refine(bsf) - - # step 3: scatter refined features to multi-levels by a residual path - outs = [] - for i in range(self.num_levels): - out_size = inputs[i].size()[2:] - if i < self.refine_level: - residual = F.interpolate(bsf, size=out_size, mode='nearest') - else: - residual = F.adaptive_max_pool2d(bsf, output_size=out_size) - outs.append(residual + inputs[i]) - - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/channel_mapper.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/channel_mapper.py deleted file mode 100644 index 774bdb1d7a522583df462fc09177a6a6ee899f17..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/channel_mapper.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule - -from ..builder import NECKS - - -@NECKS.register_module() -class ChannelMapper(BaseModule): - r"""Channel Mapper to reduce/increase channels of backbone features. - - This is used to reduce/increase channels of backbone features. - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale). - kernel_size (int, optional): kernel_size for reducing channels (used - at each scale). Default: 3. - conv_cfg (dict, optional): Config dict for convolution layer. - Default: None. - norm_cfg (dict, optional): Config dict for normalization layer. - Default: None. - act_cfg (dict, optional): Config dict for activation layer in - ConvModule. Default: dict(type='ReLU'). - num_outs (int, optional): Number of output feature maps. There - would be extra_convs when num_outs larger than the length - of in_channels. - init_cfg (dict or list[dict], optional): Initialization config dict. - Example: - >>> import torch - >>> in_channels = [2, 3, 5, 7] - >>> scales = [340, 170, 84, 43] - >>> inputs = [torch.rand(1, c, s, s) - ... for c, s in zip(in_channels, scales)] - >>> self = ChannelMapper(in_channels, 11, 3).eval() - >>> outputs = self.forward(inputs) - >>> for i in range(len(outputs)): - ... print(f'outputs[{i}].shape = {outputs[i].shape}') - outputs[0].shape = torch.Size([1, 11, 340, 340]) - outputs[1].shape = torch.Size([1, 11, 170, 170]) - outputs[2].shape = torch.Size([1, 11, 84, 84]) - outputs[3].shape = torch.Size([1, 11, 43, 43]) - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='ReLU'), - num_outs=None, - init_cfg=dict( - type='Xavier', layer='Conv2d', distribution='uniform')): - super(ChannelMapper, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.extra_convs = None - if num_outs is None: - num_outs = len(in_channels) - self.convs = nn.ModuleList() - for in_channel in in_channels: - self.convs.append( - ConvModule( - in_channel, - out_channels, - kernel_size, - padding=(kernel_size - 1) // 2, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - if num_outs > len(in_channels): - self.extra_convs = nn.ModuleList() - for i in range(len(in_channels), num_outs): - if i == len(in_channels): - in_channel = in_channels[-1] - else: - in_channel = out_channels - self.extra_convs.append( - ConvModule( - in_channel, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.convs) - outs = [self.convs[i](inputs[i]) for i in range(len(inputs))] - if self.extra_convs: - for i in range(len(self.extra_convs)): - if i == 0: - outs.append(self.extra_convs[0](inputs[-1])) - else: - outs.append(self.extra_convs[i](outs[-1])) - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/ct_resnet_neck.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/ct_resnet_neck.py deleted file mode 100644 index 40eb2685767fbf0f365529eefc160e735608bab5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/ct_resnet_neck.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, auto_fp16 - -from mmdet.models.builder import NECKS - - -@NECKS.register_module() -class CTResNetNeck(BaseModule): - """The neck used in `CenterNet `_ for - object classification and box regression. - - Args: - in_channel (int): Number of input channels. - num_deconv_filters (tuple[int]): Number of filters per stage. - num_deconv_kernels (tuple[int]): Number of kernels per stage. - use_dcn (bool): If True, use DCNv2. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channel, - num_deconv_filters, - num_deconv_kernels, - use_dcn=True, - init_cfg=None): - super(CTResNetNeck, self).__init__(init_cfg) - assert len(num_deconv_filters) == len(num_deconv_kernels) - self.fp16_enabled = False - self.use_dcn = use_dcn - self.in_channel = in_channel - self.deconv_layers = self._make_deconv_layer(num_deconv_filters, - num_deconv_kernels) - - def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels): - """use deconv layers to upsample backbone's output.""" - layers = [] - for i in range(len(num_deconv_filters)): - feat_channel = num_deconv_filters[i] - conv_module = ConvModule( - self.in_channel, - feat_channel, - 3, - padding=1, - conv_cfg=dict(type='DCNv2') if self.use_dcn else None, - norm_cfg=dict(type='BN')) - layers.append(conv_module) - upsample_module = ConvModule( - feat_channel, - feat_channel, - num_deconv_kernels[i], - stride=2, - padding=1, - conv_cfg=dict(type='deconv'), - norm_cfg=dict(type='BN')) - layers.append(upsample_module) - self.in_channel = feat_channel - - return nn.Sequential(*layers) - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.ConvTranspose2d): - # In order to be consistent with the source code, - # reset the ConvTranspose2d initialization parameters - m.reset_parameters() - # Simulated bilinear upsampling kernel - w = m.weight.data - f = math.ceil(w.size(2) / 2) - c = (2 * f - 1 - f % 2) / (2. * f) - for i in range(w.size(2)): - for j in range(w.size(3)): - w[0, 0, i, j] = \ - (1 - math.fabs(i / f - c)) * ( - 1 - math.fabs(j / f - c)) - for c in range(1, w.size(0)): - w[c, 0, :, :] = w[0, 0, :, :] - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - # self.use_dcn is False - elif not self.use_dcn and isinstance(m, nn.Conv2d): - # In order to be consistent with the source code, - # reset the Conv2d initialization parameters - m.reset_parameters() - - @auto_fp16() - def forward(self, inputs): - assert isinstance(inputs, (list, tuple)) - outs = self.deconv_layers(inputs[-1]) - return outs, diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/dilated_encoder.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/dilated_encoder.py deleted file mode 100644 index 79a8f4bb31b3387154a75c5c915df6bc59fc3638..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/dilated_encoder.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm, - normal_init) -from torch.nn import BatchNorm2d - -from ..builder import NECKS - - -class Bottleneck(nn.Module): - """Bottleneck block for DilatedEncoder used in `YOLOF. - - `. - - The Bottleneck contains three ConvLayers and one residual connection. - - Args: - in_channels (int): The number of input channels. - mid_channels (int): The number of middle output channels. - dilation (int): Dilation rate. - norm_cfg (dict): Dictionary to construct and config norm layer. - """ - - def __init__(self, - in_channels, - mid_channels, - dilation, - norm_cfg=dict(type='BN', requires_grad=True)): - super(Bottleneck, self).__init__() - self.conv1 = ConvModule( - in_channels, mid_channels, 1, norm_cfg=norm_cfg) - self.conv2 = ConvModule( - mid_channels, - mid_channels, - 3, - padding=dilation, - dilation=dilation, - norm_cfg=norm_cfg) - self.conv3 = ConvModule( - mid_channels, in_channels, 1, norm_cfg=norm_cfg) - - def forward(self, x): - identity = x - out = self.conv1(x) - out = self.conv2(out) - out = self.conv3(out) - out = out + identity - return out - - -@NECKS.register_module() -class DilatedEncoder(nn.Module): - """Dilated Encoder for YOLOF `. - - This module contains two types of components: - - the original FPN lateral convolution layer and fpn convolution layer, - which are 1x1 conv + 3x3 conv - - the dilated residual block - - Args: - in_channels (int): The number of input channels. - out_channels (int): The number of output channels. - block_mid_channels (int): The number of middle block output channels - num_residual_blocks (int): The number of residual blocks. - block_dilations (list): The list of residual blocks dilation. - """ - - def __init__(self, in_channels, out_channels, block_mid_channels, - num_residual_blocks, block_dilations): - super(DilatedEncoder, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.block_mid_channels = block_mid_channels - self.num_residual_blocks = num_residual_blocks - self.block_dilations = block_dilations - self._init_layers() - - def _init_layers(self): - self.lateral_conv = nn.Conv2d( - self.in_channels, self.out_channels, kernel_size=1) - self.lateral_norm = BatchNorm2d(self.out_channels) - self.fpn_conv = nn.Conv2d( - self.out_channels, self.out_channels, kernel_size=3, padding=1) - self.fpn_norm = BatchNorm2d(self.out_channels) - encoder_blocks = [] - for i in range(self.num_residual_blocks): - dilation = self.block_dilations[i] - encoder_blocks.append( - Bottleneck( - self.out_channels, - self.block_mid_channels, - dilation=dilation)) - self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks) - - def init_weights(self): - caffe2_xavier_init(self.lateral_conv) - caffe2_xavier_init(self.fpn_conv) - for m in [self.lateral_norm, self.fpn_norm]: - constant_init(m, 1) - for m in self.dilated_encoder_blocks.modules(): - if isinstance(m, nn.Conv2d): - normal_init(m, mean=0, std=0.01) - if is_norm(m): - constant_init(m, 1) - - def forward(self, feature): - out = self.lateral_norm(self.lateral_conv(feature[-1])) - out = self.fpn_norm(self.fpn_conv(out)) - return self.dilated_encoder_blocks(out), diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/dyhead.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/dyhead.py deleted file mode 100644 index 649bb4ca2f46e1e7ec9324083d5f7e7d7ec1ab3f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/dyhead.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import (build_activation_layer, build_norm_layer, constant_init, - normal_init) -from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d -from mmcv.runner import BaseModule - -from ..builder import NECKS -from ..utils import DyReLU - -# Reference: -# https://github.com/microsoft/DynamicHead -# https://github.com/jshilong/SEPC - - -class DyDCNv2(nn.Module): - """ModulatedDeformConv2d with normalization layer used in DyHead. - - This module cannot be configured with `conv_cfg=dict(type='DCNv2')` - because DyHead calculates offset and mask from middle-level feature. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - stride (int | tuple[int], optional): Stride of the convolution. - Default: 1. - norm_cfg (dict, optional): Config dict for normalization layer. - Default: dict(type='GN', num_groups=16, requires_grad=True). - """ - - def __init__(self, - in_channels, - out_channels, - stride=1, - norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)): - super().__init__() - self.with_norm = norm_cfg is not None - bias = not self.with_norm - self.conv = ModulatedDeformConv2d( - in_channels, out_channels, 3, stride=stride, padding=1, bias=bias) - if self.with_norm: - self.norm = build_norm_layer(norm_cfg, out_channels)[1] - - def forward(self, x, offset, mask): - """Forward function.""" - x = self.conv(x.contiguous(), offset.contiguous(), mask) - if self.with_norm: - x = self.norm(x) - return x - - -class DyHeadBlock(nn.Module): - """DyHead Block with three types of attention. - - HSigmoid arguments in default act_cfg follow official code, not paper. - https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - zero_init_offset (bool, optional): Whether to use zero init for - `spatial_conv_offset`. Default: True. - act_cfg (dict, optional): Config dict for the last activation layer of - scale-aware attention. Default: dict(type='HSigmoid', bias=3.0, - divisor=6.0). - """ - - def __init__(self, - in_channels, - out_channels, - zero_init_offset=True, - act_cfg=dict(type='HSigmoid', bias=3.0, divisor=6.0)): - super().__init__() - self.zero_init_offset = zero_init_offset - # (offset_x, offset_y, mask) * kernel_size_y * kernel_size_x - self.offset_and_mask_dim = 3 * 3 * 3 - self.offset_dim = 2 * 3 * 3 - - self.spatial_conv_high = DyDCNv2(in_channels, out_channels) - self.spatial_conv_mid = DyDCNv2(in_channels, out_channels) - self.spatial_conv_low = DyDCNv2(in_channels, out_channels, stride=2) - self.spatial_conv_offset = nn.Conv2d( - in_channels, self.offset_and_mask_dim, 3, padding=1) - self.scale_attn_module = nn.Sequential( - nn.AdaptiveAvgPool2d(1), nn.Conv2d(out_channels, 1, 1), - nn.ReLU(inplace=True), build_activation_layer(act_cfg)) - self.task_attn_module = DyReLU(out_channels) - self._init_weights() - - def _init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - normal_init(m, 0, 0.01) - if self.zero_init_offset: - constant_init(self.spatial_conv_offset, 0) - - def forward(self, x): - """Forward function.""" - outs = [] - for level in range(len(x)): - # calculate offset and mask of DCNv2 from middle-level feature - offset_and_mask = self.spatial_conv_offset(x[level]) - offset = offset_and_mask[:, :self.offset_dim, :, :] - mask = offset_and_mask[:, self.offset_dim:, :, :].sigmoid() - - mid_feat = self.spatial_conv_mid(x[level], offset, mask) - sum_feat = mid_feat * self.scale_attn_module(mid_feat) - summed_levels = 1 - if level > 0: - low_feat = self.spatial_conv_low(x[level - 1], offset, mask) - sum_feat = sum_feat + \ - low_feat * self.scale_attn_module(low_feat) - summed_levels += 1 - if level < len(x) - 1: - # this upsample order is weird, but faster than natural order - # https://github.com/microsoft/DynamicHead/issues/25 - high_feat = F.interpolate( - self.spatial_conv_high(x[level + 1], offset, mask), - size=x[level].shape[-2:], - mode='bilinear', - align_corners=True) - sum_feat = sum_feat + high_feat * \ - self.scale_attn_module(high_feat) - summed_levels += 1 - outs.append(self.task_attn_module(sum_feat / summed_levels)) - - return outs - - -@NECKS.register_module() -class DyHead(BaseModule): - """DyHead neck consisting of multiple DyHead Blocks. - - See `Dynamic Head: Unifying Object Detection Heads with Attentions - `_ for details. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - num_blocks (int, optional): Number of DyHead Blocks. Default: 6. - zero_init_offset (bool, optional): Whether to use zero init for - `spatial_conv_offset`. Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - in_channels, - out_channels, - num_blocks=6, - zero_init_offset=True, - init_cfg=None): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_blocks = num_blocks - self.zero_init_offset = zero_init_offset - - dyhead_blocks = [] - for i in range(num_blocks): - in_channels = self.in_channels if i == 0 else self.out_channels - dyhead_blocks.append( - DyHeadBlock( - in_channels, - self.out_channels, - zero_init_offset=zero_init_offset)) - self.dyhead_blocks = nn.Sequential(*dyhead_blocks) - - def forward(self, inputs): - """Forward function.""" - assert isinstance(inputs, (tuple, list)) - outs = self.dyhead_blocks(inputs) - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/fpg.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/fpg.py deleted file mode 100644 index a6a2a12ed415bbb517b056d01172a83f6e30833d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/fpg.py +++ /dev/null @@ -1,406 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule - -from ..builder import NECKS - - -class Transition(BaseModule): - """Base class for transition. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - """ - - def __init__(self, in_channels, out_channels, init_cfg=None): - super().__init__(init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - - def forward(x): - pass - - -class UpInterpolationConv(Transition): - """A transition used for up-sampling. - - Up-sample the input by interpolation then refines the feature by - a convolution layer. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - scale_factor (int): Up-sampling factor. Default: 2. - mode (int): Interpolation mode. Default: nearest. - align_corners (bool): Whether align corners when interpolation. - Default: None. - kernel_size (int): Kernel size for the conv. Default: 3. - """ - - def __init__(self, - in_channels, - out_channels, - scale_factor=2, - mode='nearest', - align_corners=None, - kernel_size=3, - init_cfg=None, - **kwargs): - super().__init__(in_channels, out_channels, init_cfg) - self.mode = mode - self.scale_factor = scale_factor - self.align_corners = align_corners - self.conv = ConvModule( - in_channels, - out_channels, - kernel_size, - padding=(kernel_size - 1) // 2, - **kwargs) - - def forward(self, x): - x = F.interpolate( - x, - scale_factor=self.scale_factor, - mode=self.mode, - align_corners=self.align_corners) - x = self.conv(x) - return x - - -class LastConv(Transition): - """A transition used for refining the output of the last stage. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - num_inputs (int): Number of inputs of the FPN features. - kernel_size (int): Kernel size for the conv. Default: 3. - """ - - def __init__(self, - in_channels, - out_channels, - num_inputs, - kernel_size=3, - init_cfg=None, - **kwargs): - super().__init__(in_channels, out_channels, init_cfg) - self.num_inputs = num_inputs - self.conv_out = ConvModule( - in_channels, - out_channels, - kernel_size, - padding=(kernel_size - 1) // 2, - **kwargs) - - def forward(self, inputs): - assert len(inputs) == self.num_inputs - return self.conv_out(inputs[-1]) - - -@NECKS.register_module() -class FPG(BaseModule): - """FPG. - - Implementation of `Feature Pyramid Grids (FPG) - `_. - This implementation only gives the basic structure stated in the paper. - But users can implement different type of transitions to fully explore the - the potential power of the structure of FPG. - - Args: - in_channels (int): Number of input channels (feature maps of all levels - should have the same channels). - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - stack_times (int): The number of times the pyramid architecture will - be stacked. - paths (list[str]): Specify the path order of each stack level. - Each element in the list should be either 'bu' (bottom-up) or - 'td' (top-down). - inter_channels (int): Number of inter channels. - same_up_trans (dict): Transition that goes down at the same stage. - same_down_trans (dict): Transition that goes up at the same stage. - across_lateral_trans (dict): Across-pathway same-stage - across_down_trans (dict): Across-pathway bottom-up connection. - across_up_trans (dict): Across-pathway top-down connection. - across_skip_trans (dict): Across-pathway skip connection. - output_trans (dict): Transition that trans the output of the - last stage. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): It decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - norm_cfg (dict): Config dict for normalization layer. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - transition_types = { - 'conv': ConvModule, - 'interpolation_conv': UpInterpolationConv, - 'last_conv': LastConv, - } - - def __init__(self, - in_channels, - out_channels, - num_outs, - stack_times, - paths, - inter_channels=None, - same_down_trans=None, - same_up_trans=dict( - type='conv', kernel_size=3, stride=2, padding=1), - across_lateral_trans=dict(type='conv', kernel_size=1), - across_down_trans=dict(type='conv', kernel_size=3), - across_up_trans=None, - across_skip_trans=dict(type='identity'), - output_trans=dict(type='last_conv', kernel_size=3), - start_level=0, - end_level=-1, - add_extra_convs=False, - norm_cfg=None, - skip_inds=None, - init_cfg=[ - dict(type='Caffe2Xavier', layer='Conv2d'), - dict( - type='Constant', - layer=[ - '_BatchNorm', '_InstanceNorm', 'GroupNorm', - 'LayerNorm' - ], - val=1.0) - ]): - super(FPG, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - if inter_channels is None: - self.inter_channels = [out_channels for _ in range(num_outs)] - elif isinstance(inter_channels, int): - self.inter_channels = [inter_channels for _ in range(num_outs)] - else: - assert isinstance(inter_channels, list) - assert len(inter_channels) == num_outs - self.inter_channels = inter_channels - self.stack_times = stack_times - self.paths = paths - assert isinstance(paths, list) and len(paths) == stack_times - for d in paths: - assert d in ('bu', 'td') - - self.same_down_trans = same_down_trans - self.same_up_trans = same_up_trans - self.across_lateral_trans = across_lateral_trans - self.across_down_trans = across_down_trans - self.across_up_trans = across_up_trans - self.output_trans = output_trans - self.across_skip_trans = across_skip_trans - - self.with_bias = norm_cfg is None - # skip inds must be specified if across skip trans is not None - if self.across_skip_trans is not None: - skip_inds is not None - self.skip_inds = skip_inds - assert len(self.skip_inds[0]) <= self.stack_times - - if end_level == -1 or end_level == self.num_ins - 1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level is not the last level, no extra level is allowed - self.backbone_end_level = end_level + 1 - assert end_level < self.num_ins - assert num_outs == end_level - start_level + 1 - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - - # build lateral 1x1 convs to reduce channels - self.lateral_convs = nn.ModuleList() - for i in range(self.start_level, self.backbone_end_level): - l_conv = nn.Conv2d(self.in_channels[i], - self.inter_channels[i - self.start_level], 1) - self.lateral_convs.append(l_conv) - - extra_levels = num_outs - self.backbone_end_level + self.start_level - self.extra_downsamples = nn.ModuleList() - for i in range(extra_levels): - if self.add_extra_convs: - fpn_idx = self.backbone_end_level - self.start_level + i - extra_conv = nn.Conv2d( - self.inter_channels[fpn_idx - 1], - self.inter_channels[fpn_idx], - 3, - stride=2, - padding=1) - self.extra_downsamples.append(extra_conv) - else: - self.extra_downsamples.append(nn.MaxPool2d(1, stride=2)) - - self.fpn_transitions = nn.ModuleList() # stack times - for s in range(self.stack_times): - stage_trans = nn.ModuleList() # num of feature levels - for i in range(self.num_outs): - # same, across_lateral, across_down, across_up - trans = nn.ModuleDict() - if s in self.skip_inds[i]: - stage_trans.append(trans) - continue - # build same-stage down trans (used in bottom-up paths) - if i == 0 or self.same_up_trans is None: - same_up_trans = None - else: - same_up_trans = self.build_trans( - self.same_up_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['same_up'] = same_up_trans - # build same-stage up trans (used in top-down paths) - if i == self.num_outs - 1 or self.same_down_trans is None: - same_down_trans = None - else: - same_down_trans = self.build_trans( - self.same_down_trans, self.inter_channels[i + 1], - self.inter_channels[i]) - trans['same_down'] = same_down_trans - # build across lateral trans - across_lateral_trans = self.build_trans( - self.across_lateral_trans, self.inter_channels[i], - self.inter_channels[i]) - trans['across_lateral'] = across_lateral_trans - # build across down trans - if i == self.num_outs - 1 or self.across_down_trans is None: - across_down_trans = None - else: - across_down_trans = self.build_trans( - self.across_down_trans, self.inter_channels[i + 1], - self.inter_channels[i]) - trans['across_down'] = across_down_trans - # build across up trans - if i == 0 or self.across_up_trans is None: - across_up_trans = None - else: - across_up_trans = self.build_trans( - self.across_up_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['across_up'] = across_up_trans - if self.across_skip_trans is None: - across_skip_trans = None - else: - across_skip_trans = self.build_trans( - self.across_skip_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['across_skip'] = across_skip_trans - # build across_skip trans - stage_trans.append(trans) - self.fpn_transitions.append(stage_trans) - - self.output_transition = nn.ModuleList() # output levels - for i in range(self.num_outs): - trans = self.build_trans( - self.output_trans, - self.inter_channels[i], - self.out_channels, - num_inputs=self.stack_times + 1) - self.output_transition.append(trans) - - self.relu = nn.ReLU(inplace=True) - - def build_trans(self, cfg, in_channels, out_channels, **extra_args): - cfg_ = cfg.copy() - trans_type = cfg_.pop('type') - trans_cls = self.transition_types[trans_type] - return trans_cls(in_channels, out_channels, **cfg_, **extra_args) - - def fuse(self, fuse_dict): - out = None - for item in fuse_dict.values(): - if item is not None: - if out is None: - out = item - else: - out = out + item - return out - - def forward(self, inputs): - assert len(inputs) == len(self.in_channels) - - # build all levels from original feature maps - feats = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - for downsample in self.extra_downsamples: - feats.append(downsample(feats[-1])) - - outs = [feats] - - for i in range(self.stack_times): - current_outs = outs[-1] - next_outs = [] - direction = self.paths[i] - for j in range(self.num_outs): - if i in self.skip_inds[j]: - next_outs.append(outs[-1][j]) - continue - # feature level - if direction == 'td': - lvl = self.num_outs - j - 1 - else: - lvl = j - # get transitions - if direction == 'td': - same_trans = self.fpn_transitions[i][lvl]['same_down'] - else: - same_trans = self.fpn_transitions[i][lvl]['same_up'] - across_lateral_trans = self.fpn_transitions[i][lvl][ - 'across_lateral'] - across_down_trans = self.fpn_transitions[i][lvl]['across_down'] - across_up_trans = self.fpn_transitions[i][lvl]['across_up'] - across_skip_trans = self.fpn_transitions[i][lvl]['across_skip'] - # init output - to_fuse = dict( - same=None, lateral=None, across_up=None, across_down=None) - # same downsample/upsample - if same_trans is not None: - to_fuse['same'] = same_trans(next_outs[-1]) - # across lateral - if across_lateral_trans is not None: - to_fuse['lateral'] = across_lateral_trans( - current_outs[lvl]) - # across downsample - if lvl > 0 and across_up_trans is not None: - to_fuse['across_up'] = across_up_trans(current_outs[lvl - - 1]) - # across upsample - if (lvl < self.num_outs - 1 and across_down_trans is not None): - to_fuse['across_down'] = across_down_trans( - current_outs[lvl + 1]) - if across_skip_trans is not None: - to_fuse['across_skip'] = across_skip_trans(outs[0][lvl]) - x = self.fuse(to_fuse) - next_outs.append(x) - - if direction == 'td': - outs.append(next_outs[::-1]) - else: - outs.append(next_outs) - - # output trans - final_outs = [] - for i in range(self.num_outs): - lvl_out_list = [] - for s in range(len(outs)): - lvl_out_list.append(outs[s][i]) - lvl_out = self.output_transition[i](lvl_out_list) - final_outs.append(lvl_out) - - return final_outs diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/fpn.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/fpn.py deleted file mode 100644 index 4bdb5b22156b579dc262894fd0c4a141f4479854..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/fpn.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, auto_fp16 - -from ..builder import NECKS - - -@NECKS.register_module() -class FPN(BaseModule): - r"""Feature Pyramid Network. - - This is an implementation of paper `Feature Pyramid Networks for Object - Detection `_. - - Args: - in_channels (list[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale). - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool | str): If bool, it decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, it is equivalent to `add_extra_convs='on_input'`. - If str, it specifies the source feature map of the extra convs. - Only the following options are allowed - - - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - - 'on_lateral': Last feature map after lateral convs. - - 'on_output': The last output feature map after fpn convs. - relu_before_extra_convs (bool): Whether to apply relu before the extra - conv. Default: False. - no_norm_on_lateral (bool): Whether to apply norm on lateral. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (dict): Config dict for activation layer in ConvModule. - Default: None. - upsample_cfg (dict): Config dict for interpolate layer. - Default: dict(mode='nearest'). - init_cfg (dict or list[dict], optional): Initialization config dict. - - Example: - >>> import torch - >>> in_channels = [2, 3, 5, 7] - >>> scales = [340, 170, 84, 43] - >>> inputs = [torch.rand(1, c, s, s) - ... for c, s in zip(in_channels, scales)] - >>> self = FPN(in_channels, 11, len(in_channels)).eval() - >>> outputs = self.forward(inputs) - >>> for i in range(len(outputs)): - ... print(f'outputs[{i}].shape = {outputs[i].shape}') - outputs[0].shape = torch.Size([1, 11, 340, 340]) - outputs[1].shape = torch.Size([1, 11, 170, 170]) - outputs[2].shape = torch.Size([1, 11, 84, 84]) - outputs[3].shape = torch.Size([1, 11, 43, 43]) - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - add_extra_convs=False, - relu_before_extra_convs=False, - no_norm_on_lateral=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None, - upsample_cfg=dict(mode='nearest'), - init_cfg=dict( - type='Xavier', layer='Conv2d', distribution='uniform')): - super(FPN, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.relu_before_extra_convs = relu_before_extra_convs - self.no_norm_on_lateral = no_norm_on_lateral - self.fp16_enabled = False - self.upsample_cfg = upsample_cfg.copy() - - if end_level == -1 or end_level == self.num_ins - 1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level is not the last level, no extra level is allowed - self.backbone_end_level = end_level + 1 - assert end_level < self.num_ins - assert num_outs == end_level - start_level + 1 - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - assert isinstance(add_extra_convs, (str, bool)) - if isinstance(add_extra_convs, str): - # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' - assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') - elif add_extra_convs: # True - self.add_extra_convs = 'on_input' - - self.lateral_convs = nn.ModuleList() - self.fpn_convs = nn.ModuleList() - - for i in range(self.start_level, self.backbone_end_level): - l_conv = ConvModule( - in_channels[i], - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, - act_cfg=act_cfg, - inplace=False) - fpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - - self.lateral_convs.append(l_conv) - self.fpn_convs.append(fpn_conv) - - # add extra conv layers (e.g., RetinaNet) - extra_levels = num_outs - self.backbone_end_level + self.start_level - if self.add_extra_convs and extra_levels >= 1: - for i in range(extra_levels): - if i == 0 and self.add_extra_convs == 'on_input': - in_channels = self.in_channels[self.backbone_end_level - 1] - else: - in_channels = out_channels - extra_fpn_conv = ConvModule( - in_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - self.fpn_convs.append(extra_fpn_conv) - - @auto_fp16() - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - # In some cases, fixing `scale factor` (e.g. 2) is preferred, but - # it cannot co-exist with `size` in `F.interpolate`. - if 'scale_factor' in self.upsample_cfg: - # fix runtime error of "+=" inplace operation in PyTorch 1.10 - laterals[i - 1] = laterals[i - 1] + F.interpolate( - laterals[i], **self.upsample_cfg) - else: - prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] = laterals[i - 1] + F.interpolate( - laterals[i], size=prev_shape, **self.upsample_cfg) - - # build outputs - # part 1: from original levels - outs = [ - self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) - ] - # part 2: add extra levels - if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs - # (e.g., Faster R-CNN, Mask R-CNN) - if not self.add_extra_convs: - for i in range(self.num_outs - used_backbone_levels): - outs.append(F.max_pool2d(outs[-1], 1, stride=2)) - # add conv layers on top of original feature maps (RetinaNet) - else: - if self.add_extra_convs == 'on_input': - extra_source = inputs[self.backbone_end_level - 1] - elif self.add_extra_convs == 'on_lateral': - extra_source = laterals[-1] - elif self.add_extra_convs == 'on_output': - extra_source = outs[-1] - else: - raise NotImplementedError - outs.append(self.fpn_convs[used_backbone_levels](extra_source)) - for i in range(used_backbone_levels + 1, self.num_outs): - if self.relu_before_extra_convs: - outs.append(self.fpn_convs[i](F.relu(outs[-1]))) - else: - outs.append(self.fpn_convs[i](outs[-1])) - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/fpn_carafe.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/fpn_carafe.py deleted file mode 100644 index fdd91f34c94129eefb477451dd7c1f7a7854135e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/fpn_carafe.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init -from mmcv.ops.carafe import CARAFEPack -from mmcv.runner import BaseModule, ModuleList - -from ..builder import NECKS - - -@NECKS.register_module() -class FPN_CARAFE(BaseModule): - """FPN_CARAFE is a more flexible implementation of FPN. It allows more - choice for upsample methods during the top-down pathway. - - It can reproduce the performance of ICCV 2019 paper - CARAFE: Content-Aware ReAssembly of FEatures - Please refer to https://arxiv.org/abs/1905.02188 for more details. - - Args: - in_channels (list[int]): Number of channels for each input feature map. - out_channels (int): Output channels of feature pyramids. - num_outs (int): Number of output stages. - start_level (int): Start level of feature pyramids. - (Default: 0) - end_level (int): End level of feature pyramids. - (Default: -1 indicates the last level). - norm_cfg (dict): Dictionary to construct and config norm layer. - activate (str): Type of activation function in ConvModule - (Default: None indicates w/o activation). - order (dict): Order of components in ConvModule. - upsample (str): Type of upsample layer. - upsample_cfg (dict): Dictionary to construct and config upsample layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - norm_cfg=None, - act_cfg=None, - order=('conv', 'norm', 'act'), - upsample_cfg=dict( - type='carafe', - up_kernel=5, - up_group=1, - encoder_kernel=3, - encoder_dilation=1), - init_cfg=None): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super(FPN_CARAFE, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.with_bias = norm_cfg is None - self.upsample_cfg = upsample_cfg.copy() - self.upsample = self.upsample_cfg.get('type') - self.relu = nn.ReLU(inplace=False) - - self.order = order - assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')] - - assert self.upsample in [ - 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None - ] - if self.upsample in ['deconv', 'pixel_shuffle']: - assert hasattr( - self.upsample_cfg, - 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0 - self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel') - - if end_level == -1 or end_level == self.num_ins - 1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level is not the last level, no extra level is allowed - self.backbone_end_level = end_level + 1 - assert end_level < self.num_ins - assert num_outs == end_level - start_level + 1 - self.start_level = start_level - self.end_level = end_level - - self.lateral_convs = ModuleList() - self.fpn_convs = ModuleList() - self.upsample_modules = ModuleList() - - for i in range(self.start_level, self.backbone_end_level): - l_conv = ConvModule( - in_channels[i], - out_channels, - 1, - norm_cfg=norm_cfg, - bias=self.with_bias, - act_cfg=act_cfg, - inplace=False, - order=self.order) - fpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - norm_cfg=self.norm_cfg, - bias=self.with_bias, - act_cfg=act_cfg, - inplace=False, - order=self.order) - if i != self.backbone_end_level - 1: - upsample_cfg_ = self.upsample_cfg.copy() - if self.upsample == 'deconv': - upsample_cfg_.update( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=self.upsample_kernel, - stride=2, - padding=(self.upsample_kernel - 1) // 2, - output_padding=(self.upsample_kernel - 1) // 2) - elif self.upsample == 'pixel_shuffle': - upsample_cfg_.update( - in_channels=out_channels, - out_channels=out_channels, - scale_factor=2, - upsample_kernel=self.upsample_kernel) - elif self.upsample == 'carafe': - upsample_cfg_.update(channels=out_channels, scale_factor=2) - else: - # suppress warnings - align_corners = (None - if self.upsample == 'nearest' else False) - upsample_cfg_.update( - scale_factor=2, - mode=self.upsample, - align_corners=align_corners) - upsample_module = build_upsample_layer(upsample_cfg_) - self.upsample_modules.append(upsample_module) - self.lateral_convs.append(l_conv) - self.fpn_convs.append(fpn_conv) - - # add extra conv layers (e.g., RetinaNet) - extra_out_levels = ( - num_outs - self.backbone_end_level + self.start_level) - if extra_out_levels >= 1: - for i in range(extra_out_levels): - in_channels = ( - self.in_channels[self.backbone_end_level - - 1] if i == 0 else out_channels) - extra_l_conv = ConvModule( - in_channels, - out_channels, - 3, - stride=2, - padding=1, - norm_cfg=norm_cfg, - bias=self.with_bias, - act_cfg=act_cfg, - inplace=False, - order=self.order) - if self.upsample == 'deconv': - upsampler_cfg_ = dict( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=self.upsample_kernel, - stride=2, - padding=(self.upsample_kernel - 1) // 2, - output_padding=(self.upsample_kernel - 1) // 2) - elif self.upsample == 'pixel_shuffle': - upsampler_cfg_ = dict( - in_channels=out_channels, - out_channels=out_channels, - scale_factor=2, - upsample_kernel=self.upsample_kernel) - elif self.upsample == 'carafe': - upsampler_cfg_ = dict( - channels=out_channels, - scale_factor=2, - **self.upsample_cfg) - else: - # suppress warnings - align_corners = (None - if self.upsample == 'nearest' else False) - upsampler_cfg_ = dict( - scale_factor=2, - mode=self.upsample, - align_corners=align_corners) - upsampler_cfg_['type'] = self.upsample - upsample_module = build_upsample_layer(upsampler_cfg_) - extra_fpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - norm_cfg=self.norm_cfg, - bias=self.with_bias, - act_cfg=act_cfg, - inplace=False, - order=self.order) - self.upsample_modules.append(upsample_module) - self.fpn_convs.append(extra_fpn_conv) - self.lateral_convs.append(extra_l_conv) - - # default init_weights for conv(msra) and norm in ConvModule - def init_weights(self): - """Initialize the weights of module.""" - super(FPN_CARAFE, self).init_weights() - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - xavier_init(m, distribution='uniform') - for m in self.modules(): - if isinstance(m, CARAFEPack): - m.init_weights() - - def slice_as(self, src, dst): - """Slice ``src`` as ``dst`` - - Note: - ``src`` should have the same or larger size than ``dst``. - - Args: - src (torch.Tensor): Tensors to be sliced. - dst (torch.Tensor): ``src`` will be sliced to have the same - size as ``dst``. - - Returns: - torch.Tensor: Sliced tensor. - """ - assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3)) - if src.size(2) == dst.size(2) and src.size(3) == dst.size(3): - return src - else: - return src[:, :, :dst.size(2), :dst.size(3)] - - def tensor_add(self, a, b): - """Add tensors ``a`` and ``b`` that might have different sizes.""" - if a.size() == b.size(): - c = a + b - else: - c = a + self.slice_as(b, a) - return c - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [] - for i, lateral_conv in enumerate(self.lateral_convs): - if i <= self.backbone_end_level - self.start_level: - input = inputs[min(i + self.start_level, len(inputs) - 1)] - else: - input = laterals[-1] - lateral = lateral_conv(input) - laterals.append(lateral) - - # build top-down path - for i in range(len(laterals) - 1, 0, -1): - if self.upsample is not None: - upsample_feat = self.upsample_modules[i - 1](laterals[i]) - else: - upsample_feat = laterals[i] - laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat) - - # build outputs - num_conv_outs = len(self.fpn_convs) - outs = [] - for i in range(num_conv_outs): - out = self.fpn_convs[i](laterals[i]) - outs.append(out) - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/hrfpn.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/hrfpn.py deleted file mode 100644 index ca15be6b29877b1023fdd9f93226690f816504bf..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/hrfpn.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule -from torch.utils.checkpoint import checkpoint - -from ..builder import NECKS - - -@NECKS.register_module() -class HRFPN(BaseModule): - """HRFPN (High Resolution Feature Pyramids) - - paper: `High-Resolution Representations for Labeling Pixels and Regions - `_. - - Args: - in_channels (list): number of channels for each branch. - out_channels (int): output channels of feature pyramids. - num_outs (int): number of output stages. - pooling_type (str): pooling for generating feature pyramids - from {MAX, AVG}. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - stride (int): stride of 3x3 convolutional layers - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - num_outs=5, - pooling_type='AVG', - conv_cfg=None, - norm_cfg=None, - with_cp=False, - stride=1, - init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): - super(HRFPN, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.with_cp = with_cp - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.reduction_conv = ConvModule( - sum(in_channels), - out_channels, - kernel_size=1, - conv_cfg=self.conv_cfg, - act_cfg=None) - - self.fpn_convs = nn.ModuleList() - for i in range(self.num_outs): - self.fpn_convs.append( - ConvModule( - out_channels, - out_channels, - kernel_size=3, - padding=1, - stride=stride, - conv_cfg=self.conv_cfg, - act_cfg=None)) - - if pooling_type == 'MAX': - self.pooling = F.max_pool2d - else: - self.pooling = F.avg_pool2d - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == self.num_ins - outs = [inputs[0]] - for i in range(1, self.num_ins): - outs.append( - F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) - out = torch.cat(outs, dim=1) - if out.requires_grad and self.with_cp: - out = checkpoint(self.reduction_conv, out) - else: - out = self.reduction_conv(out) - outs = [out] - for i in range(1, self.num_outs): - outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) - outputs = [] - - for i in range(self.num_outs): - if outs[i].requires_grad and self.with_cp: - tmp_out = checkpoint(self.fpn_convs[i], outs[i]) - else: - tmp_out = self.fpn_convs[i](outs[i]) - outputs.append(tmp_out) - return tuple(outputs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/nas_fpn.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/nas_fpn.py deleted file mode 100644 index 710592eccb4f483e64d4bc09b3d8669170dc8f0f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/nas_fpn.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell -from mmcv.runner import BaseModule, ModuleList - -from ..builder import NECKS - - -@NECKS.register_module() -class NASFPN(BaseModule): - """NAS-FPN. - - Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture - for Object Detection `_ - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - stack_times (int): The number of times the pyramid architecture will - be stacked. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): It decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - stack_times, - start_level=0, - end_level=-1, - add_extra_convs=False, - norm_cfg=None, - init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')): - super(NASFPN, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) # num of input feature levels - self.num_outs = num_outs # num of output feature levels - self.stack_times = stack_times - self.norm_cfg = norm_cfg - - if end_level == -1 or end_level == self.num_ins - 1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level is not the last level, no extra level is allowed - self.backbone_end_level = end_level + 1 - assert end_level < self.num_ins - assert num_outs == end_level - start_level + 1 - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - - # add lateral connections - self.lateral_convs = nn.ModuleList() - for i in range(self.start_level, self.backbone_end_level): - l_conv = ConvModule( - in_channels[i], - out_channels, - 1, - norm_cfg=norm_cfg, - act_cfg=None) - self.lateral_convs.append(l_conv) - - # add extra downsample layers (stride-2 pooling or conv) - extra_levels = num_outs - self.backbone_end_level + self.start_level - self.extra_downsamples = nn.ModuleList() - for i in range(extra_levels): - extra_conv = ConvModule( - out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None) - self.extra_downsamples.append( - nn.Sequential(extra_conv, nn.MaxPool2d(2, 2))) - - # add NAS FPN connections - self.fpn_stages = ModuleList() - for _ in range(self.stack_times): - stage = nn.ModuleDict() - # gp(p6, p4) -> p4_1 - stage['gp_64_4'] = GlobalPoolingCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p4_1, p4) -> p4_2 - stage['sum_44_4'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p4_2, p3) -> p3_out - stage['sum_43_3'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p3_out, p4_2) -> p4_out - stage['sum_34_4'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p5, gp(p4_out, p3_out)) -> p5_out - stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False) - stage['sum_55_5'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # sum(p7, gp(p5_out, p4_2)) -> p7_out - stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False) - stage['sum_77_7'] = SumCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - # gp(p7_out, p5_out) -> p6_out - stage['gp_75_6'] = GlobalPoolingCell( - in_channels=out_channels, - out_channels=out_channels, - out_norm_cfg=norm_cfg) - self.fpn_stages.append(stage) - - def forward(self, inputs): - """Forward function.""" - # build P3-P5 - feats = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - # build P6-P7 on top of P5 - for downsample in self.extra_downsamples: - feats.append(downsample(feats[-1])) - - p3, p4, p5, p6, p7 = feats - - for stage in self.fpn_stages: - # gp(p6, p4) -> p4_1 - p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:]) - # sum(p4_1, p4) -> p4_2 - p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:]) - # sum(p4_2, p3) -> p3_out - p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:]) - # sum(p3_out, p4_2) -> p4_out - p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:]) - # sum(p5, gp(p4_out, p3_out)) -> p5_out - p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:]) - p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:]) - # sum(p7, gp(p5_out, p4_2)) -> p7_out - p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:]) - p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:]) - # gp(p7_out, p5_out) -> p6_out - p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:]) - - return p3, p4, p5, p6, p7 diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/nasfcos_fpn.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/nasfcos_fpn.py deleted file mode 100644 index c4abfe7bde8a69c1219e7532669761c3e9e64e15..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/nasfcos_fpn.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, caffe2_xavier_init -from mmcv.ops.merge_cells import ConcatCell -from mmcv.runner import BaseModule - -from ..builder import NECKS - - -@NECKS.register_module() -class NASFCOS_FPN(BaseModule): - """FPN structure in NASFPN. - - Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for - Object Detection `_ - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): It decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=1, - end_level=-1, - add_extra_convs=False, - conv_cfg=None, - norm_cfg=None, - init_cfg=None): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super(NASFCOS_FPN, self).__init__(init_cfg) - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.norm_cfg = norm_cfg - self.conv_cfg = conv_cfg - - if end_level == -1 or end_level == self.num_ins - 1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level is not the last level, no extra level is allowed - self.backbone_end_level = end_level + 1 - assert end_level < self.num_ins - assert num_outs == end_level - start_level + 1 - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - - self.adapt_convs = nn.ModuleList() - for i in range(self.start_level, self.backbone_end_level): - adapt_conv = ConvModule( - in_channels[i], - out_channels, - 1, - stride=1, - padding=0, - bias=False, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU', inplace=False)) - self.adapt_convs.append(adapt_conv) - - # C2 is omitted according to the paper - extra_levels = num_outs - self.backbone_end_level + self.start_level - - def build_concat_cell(with_input1_conv, with_input2_conv): - cell_conv_cfg = dict( - kernel_size=1, padding=0, bias=False, groups=out_channels) - return ConcatCell( - in_channels=out_channels, - out_channels=out_channels, - with_out_conv=True, - out_conv_cfg=cell_conv_cfg, - out_norm_cfg=dict(type='BN'), - out_conv_order=('norm', 'act', 'conv'), - with_input1_conv=with_input1_conv, - with_input2_conv=with_input2_conv, - input_conv_cfg=conv_cfg, - input_norm_cfg=norm_cfg, - upsample_mode='nearest') - - # Denote c3=f0, c4=f1, c5=f2 for convince - self.fpn = nn.ModuleDict() - self.fpn['c22_1'] = build_concat_cell(True, True) - self.fpn['c22_2'] = build_concat_cell(True, True) - self.fpn['c32'] = build_concat_cell(True, False) - self.fpn['c02'] = build_concat_cell(True, False) - self.fpn['c42'] = build_concat_cell(True, True) - self.fpn['c36'] = build_concat_cell(True, True) - self.fpn['c61'] = build_concat_cell(True, True) # f9 - self.extra_downsamples = nn.ModuleList() - for i in range(extra_levels): - extra_act_cfg = None if i == 0 \ - else dict(type='ReLU', inplace=False) - self.extra_downsamples.append( - ConvModule( - out_channels, - out_channels, - 3, - stride=2, - padding=1, - act_cfg=extra_act_cfg, - order=('act', 'norm', 'conv'))) - - def forward(self, inputs): - """Forward function.""" - feats = [ - adapt_conv(inputs[i + self.start_level]) - for i, adapt_conv in enumerate(self.adapt_convs) - ] - - for (i, module_name) in enumerate(self.fpn): - idx_1, idx_2 = int(module_name[1]), int(module_name[2]) - res = self.fpn[module_name](feats[idx_1], feats[idx_2]) - feats.append(res) - - ret = [] - for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5 - feats1, feats2 = feats[idx], feats[5] - feats2_resize = F.interpolate( - feats2, - size=feats1.size()[2:], - mode='bilinear', - align_corners=False) - - feats_sum = feats1 + feats2_resize - ret.append( - F.interpolate( - feats_sum, - size=inputs[input_idx].size()[2:], - mode='bilinear', - align_corners=False)) - - for submodule in self.extra_downsamples: - ret.append(submodule(ret[-1])) - - return tuple(ret) - - def init_weights(self): - """Initialize the weights of module.""" - super(NASFCOS_FPN, self).init_weights() - for module in self.fpn.values(): - if hasattr(module, 'conv_out'): - caffe2_xavier_init(module.out_conv.conv) - - for modules in [ - self.adapt_convs.modules(), - self.extra_downsamples.modules() - ]: - for module in modules: - if isinstance(module, nn.Conv2d): - caffe2_xavier_init(module) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/pafpn.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/pafpn.py deleted file mode 100644 index 2edd34879425891a16b8e93b92fe2d653af07022..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/pafpn.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import auto_fp16 - -from ..builder import NECKS -from .fpn import FPN - - -@NECKS.register_module() -class PAFPN(FPN): - """Path Aggregation Network for Instance Segmentation. - - This is an implementation of the `PAFPN in Path Aggregation Network - `_. - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool | str): If bool, it decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, it is equivalent to `add_extra_convs='on_input'`. - If str, it specifies the source feature map of the extra convs. - Only the following options are allowed - - - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - - 'on_lateral': Last feature map after lateral convs. - - 'on_output': The last output feature map after fpn convs. - relu_before_extra_convs (bool): Whether to apply relu before the extra - conv. Default: False. - no_norm_on_lateral (bool): Whether to apply norm on lateral. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (str): Config dict for activation layer in ConvModule. - Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - add_extra_convs=False, - relu_before_extra_convs=False, - no_norm_on_lateral=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None, - init_cfg=dict( - type='Xavier', layer='Conv2d', distribution='uniform')): - super(PAFPN, self).__init__( - in_channels, - out_channels, - num_outs, - start_level, - end_level, - add_extra_convs, - relu_before_extra_convs, - no_norm_on_lateral, - conv_cfg, - norm_cfg, - act_cfg, - init_cfg=init_cfg) - # add extra bottom up pathway - self.downsample_convs = nn.ModuleList() - self.pafpn_convs = nn.ModuleList() - for i in range(self.start_level + 1, self.backbone_end_level): - d_conv = ConvModule( - out_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - pafpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - self.downsample_convs.append(d_conv) - self.pafpn_convs.append(pafpn_conv) - - @auto_fp16() - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - prev_shape = laterals[i - 1].shape[2:] - # fix runtime error of "+=" inplace operation in PyTorch 1.10 - laterals[i - 1] = laterals[i - 1] + F.interpolate( - laterals[i], size=prev_shape, mode='nearest') - - # build outputs - # part 1: from original levels - inter_outs = [ - self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) - ] - - # part 2: add bottom-up path - for i in range(0, used_backbone_levels - 1): - inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i]) - - outs = [] - outs.append(inter_outs[0]) - outs.extend([ - self.pafpn_convs[i - 1](inter_outs[i]) - for i in range(1, used_backbone_levels) - ]) - - # part 3: add extra levels - if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs - # (e.g., Faster R-CNN, Mask R-CNN) - if not self.add_extra_convs: - for i in range(self.num_outs - used_backbone_levels): - outs.append(F.max_pool2d(outs[-1], 1, stride=2)) - # add conv layers on top of original feature maps (RetinaNet) - else: - if self.add_extra_convs == 'on_input': - orig = inputs[self.backbone_end_level - 1] - outs.append(self.fpn_convs[used_backbone_levels](orig)) - elif self.add_extra_convs == 'on_lateral': - outs.append(self.fpn_convs[used_backbone_levels]( - laterals[-1])) - elif self.add_extra_convs == 'on_output': - outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) - else: - raise NotImplementedError - for i in range(used_backbone_levels + 1, self.num_outs): - if self.relu_before_extra_convs: - outs.append(self.fpn_convs[i](F.relu(outs[-1]))) - else: - outs.append(self.fpn_convs[i](outs[-1])) - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/rfp.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/rfp.py deleted file mode 100644 index 6976f4daf25a04f63f7570ec7ca7633c50fc725d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/rfp.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import constant_init, xavier_init -from mmcv.runner import BaseModule, ModuleList - -from ..builder import NECKS, build_backbone -from .fpn import FPN - - -class ASPP(BaseModule): - """ASPP (Atrous Spatial Pyramid Pooling) - - This is an implementation of the ASPP module used in DetectoRS - (https://arxiv.org/pdf/2006.02334.pdf) - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of channels produced by this module - dilations (tuple[int]): Dilations of the four branches. - Default: (1, 3, 6, 1) - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - dilations=(1, 3, 6, 1), - init_cfg=dict(type='Kaiming', layer='Conv2d')): - super().__init__(init_cfg) - assert dilations[-1] == 1 - self.aspp = nn.ModuleList() - for dilation in dilations: - kernel_size = 3 if dilation > 1 else 1 - padding = dilation if dilation > 1 else 0 - conv = nn.Conv2d( - in_channels, - out_channels, - kernel_size=kernel_size, - stride=1, - dilation=dilation, - padding=padding, - bias=True) - self.aspp.append(conv) - self.gap = nn.AdaptiveAvgPool2d(1) - - def forward(self, x): - avg_x = self.gap(x) - out = [] - for aspp_idx in range(len(self.aspp)): - inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x - out.append(F.relu_(self.aspp[aspp_idx](inp))) - out[-1] = out[-1].expand_as(out[-2]) - out = torch.cat(out, dim=1) - return out - - -@NECKS.register_module() -class RFP(FPN): - """RFP (Recursive Feature Pyramid) - - This is an implementation of RFP in `DetectoRS - `_. Different from standard FPN, the - input of RFP should be multi level features along with origin input image - of backbone. - - Args: - rfp_steps (int): Number of unrolled steps of RFP. - rfp_backbone (dict): Configuration of the backbone for RFP. - aspp_out_channels (int): Number of output channels of ASPP module. - aspp_dilations (tuple[int]): Dilation rates of four branches. - Default: (1, 3, 6, 1) - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - rfp_steps, - rfp_backbone, - aspp_out_channels, - aspp_dilations=(1, 3, 6, 1), - init_cfg=None, - **kwargs): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super().__init__(init_cfg=init_cfg, **kwargs) - self.rfp_steps = rfp_steps - # Be careful! Pretrained weights cannot be loaded when use - # nn.ModuleList - self.rfp_modules = ModuleList() - for rfp_idx in range(1, rfp_steps): - rfp_module = build_backbone(rfp_backbone) - self.rfp_modules.append(rfp_module) - self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels, - aspp_dilations) - self.rfp_weight = nn.Conv2d( - self.out_channels, - 1, - kernel_size=1, - stride=1, - padding=0, - bias=True) - - def init_weights(self): - # Avoid using super().init_weights(), which may alter the default - # initialization of the modules in self.rfp_modules that have missing - # keys in the pretrained checkpoint. - for convs in [self.lateral_convs, self.fpn_convs]: - for m in convs.modules(): - if isinstance(m, nn.Conv2d): - xavier_init(m, distribution='uniform') - for rfp_idx in range(self.rfp_steps - 1): - self.rfp_modules[rfp_idx].init_weights() - constant_init(self.rfp_weight, 0) - - def forward(self, inputs): - inputs = list(inputs) - assert len(inputs) == len(self.in_channels) + 1 # +1 for input image - img = inputs.pop(0) - # FPN forward - x = super().forward(tuple(inputs)) - for rfp_idx in range(self.rfp_steps - 1): - rfp_feats = [x[0]] + list( - self.rfp_aspp(x[i]) for i in range(1, len(x))) - x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats) - # FPN forward - x_idx = super().forward(x_idx) - x_new = [] - for ft_idx in range(len(x_idx)): - add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx])) - x_new.append(add_weight * x_idx[ft_idx] + - (1 - add_weight) * x[ft_idx]) - x = x_new - return x diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/ssd_neck.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/ssd_neck.py deleted file mode 100644 index 179d575e172ef93dd42aecc9a55f216029db4aec..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/ssd_neck.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule -from mmcv.runner import BaseModule - -from ..builder import NECKS - - -@NECKS.register_module() -class SSDNeck(BaseModule): - """Extra layers of SSD backbone to generate multi-scale feature maps. - - Args: - in_channels (Sequence[int]): Number of input channels per scale. - out_channels (Sequence[int]): Number of output channels per scale. - level_strides (Sequence[int]): Stride of 3x3 conv per level. - level_paddings (Sequence[int]): Padding size of 3x3 conv per level. - l2_norm_scale (float|None): L2 normalization layer init scale. - If None, not use L2 normalization on the first input feature. - last_kernel_size (int): Kernel size of the last conv layer. - Default: 3. - use_depthwise (bool): Whether to use DepthwiseSeparableConv. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: None. - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels, - out_channels, - level_strides, - level_paddings, - l2_norm_scale=20., - last_kernel_size=3, - use_depthwise=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='ReLU'), - init_cfg=[ - dict( - type='Xavier', distribution='uniform', - layer='Conv2d'), - dict(type='Constant', val=1, layer='BatchNorm2d'), - ]): - super(SSDNeck, self).__init__(init_cfg) - assert len(out_channels) > len(in_channels) - assert len(out_channels) - len(in_channels) == len(level_strides) - assert len(level_strides) == len(level_paddings) - assert in_channels == out_channels[:len(in_channels)] - - if l2_norm_scale: - self.l2_norm = L2Norm(in_channels[0], l2_norm_scale) - self.init_cfg += [ - dict( - type='Constant', - val=self.l2_norm.scale, - override=dict(name='l2_norm')) - ] - - self.extra_layers = nn.ModuleList() - extra_layer_channels = out_channels[len(in_channels):] - second_conv = DepthwiseSeparableConvModule if \ - use_depthwise else ConvModule - - for i, (out_channel, stride, padding) in enumerate( - zip(extra_layer_channels, level_strides, level_paddings)): - kernel_size = last_kernel_size \ - if i == len(extra_layer_channels) - 1 else 3 - per_lvl_convs = nn.Sequential( - ConvModule( - out_channels[len(in_channels) - 1 + i], - out_channel // 2, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - second_conv( - out_channel // 2, - out_channel, - kernel_size, - stride=stride, - padding=padding, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - self.extra_layers.append(per_lvl_convs) - - def forward(self, inputs): - """Forward function.""" - outs = [feat for feat in inputs] - if hasattr(self, 'l2_norm'): - outs[0] = self.l2_norm(outs[0]) - - feat = outs[-1] - for layer in self.extra_layers: - feat = layer(feat) - outs.append(feat) - return tuple(outs) - - -class L2Norm(nn.Module): - - def __init__(self, n_dims, scale=20., eps=1e-10): - """L2 normalization layer. - - Args: - n_dims (int): Number of dimensions to be normalized - scale (float, optional): Defaults to 20.. - eps (float, optional): Used to avoid division by zero. - Defaults to 1e-10. - """ - super(L2Norm, self).__init__() - self.n_dims = n_dims - self.weight = nn.Parameter(torch.Tensor(self.n_dims)) - self.eps = eps - self.scale = scale - - def forward(self, x): - """Forward function.""" - # normalization layer convert to FP32 in FP16 training - x_float = x.float() - norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps - return (self.weight[None, :, None, None].float().expand_as(x_float) * - x_float / norm).type_as(x) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/yolo_neck.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/yolo_neck.py deleted file mode 100644 index c8eeb5737cdf871fa415c1a207956ea7753c304e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/yolo_neck.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Copyright (c) 2019 Western Digital Corporation or its affiliates. - -import torch -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule - -from ..builder import NECKS - - -class DetectionBlock(BaseModule): - """Detection block in YOLO neck. - - Let out_channels = n, the DetectionBlock contains: - Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer. - The first 6 ConvLayers are formed the following way: - 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n. - The Conv2D layer is 1x1x255. - Some block will have branch after the fifth ConvLayer. - The input channel is arbitrary (in_channels) - - Args: - in_channels (int): The number of input channels. - out_channels (int): The number of output channels. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - init_cfg=None): - super(DetectionBlock, self).__init__(init_cfg) - double_out_channels = out_channels * 2 - - # shortcut - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg) - self.conv2 = ConvModule( - out_channels, double_out_channels, 3, padding=1, **cfg) - self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg) - self.conv4 = ConvModule( - out_channels, double_out_channels, 3, padding=1, **cfg) - self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg) - - def forward(self, x): - tmp = self.conv1(x) - tmp = self.conv2(tmp) - tmp = self.conv3(tmp) - tmp = self.conv4(tmp) - out = self.conv5(tmp) - return out - - -@NECKS.register_module() -class YOLOV3Neck(BaseModule): - """The neck of YOLOV3. - - It can be treated as a simplified version of FPN. It - will take the result from Darknet backbone and do some upsampling and - concatenation. It will finally output the detection result. - - Note: - The input feats should be from top to bottom. - i.e., from high-lvl to low-lvl - But YOLOV3Neck will process them in reversed order. - i.e., from bottom (high-lvl) to top (low-lvl) - - Args: - num_scales (int): The number of scales / stages. - in_channels (List[int]): The number of input channels per scale. - out_channels (List[int]): The number of output channels per scale. - conv_cfg (dict, optional): Config dict for convolution layer. - Default: None. - norm_cfg (dict, optional): Dictionary to construct and config norm - layer. Default: dict(type='BN', requires_grad=True) - act_cfg (dict, optional): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - num_scales, - in_channels, - out_channels, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - init_cfg=None): - super(YOLOV3Neck, self).__init__(init_cfg) - assert (num_scales == len(in_channels) == len(out_channels)) - self.num_scales = num_scales - self.in_channels = in_channels - self.out_channels = out_channels - - # shortcut - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - - # To support arbitrary scales, the code looks awful, but it works. - # Better solution is welcomed. - self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg) - for i in range(1, self.num_scales): - in_c, out_c = self.in_channels[i], self.out_channels[i] - inter_c = out_channels[i - 1] - self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg)) - # in_c + out_c : High-lvl feats will be cat with low-lvl feats - self.add_module(f'detect{i+1}', - DetectionBlock(in_c + out_c, out_c, **cfg)) - - def forward(self, feats): - assert len(feats) == self.num_scales - - # processed from bottom (high-lvl) to top (low-lvl) - outs = [] - out = self.detect1(feats[-1]) - outs.append(out) - - for i, x in enumerate(reversed(feats[:-1])): - conv = getattr(self, f'conv{i+1}') - tmp = conv(out) - - # Cat with low-lvl feats - tmp = F.interpolate(tmp, scale_factor=2) - tmp = torch.cat((tmp, x), 1) - - detect = getattr(self, f'detect{i+2}') - out = detect(tmp) - outs.append(out) - - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/necks/yolox_pafpn.py b/cv/detection/co-detr/pytorch/mmdet/models/necks/yolox_pafpn.py deleted file mode 100644 index b0f6f7068645b6ce722556fa29c7e3d349934e74..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/necks/yolox_pafpn.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule -from mmcv.runner import BaseModule - -from ..builder import NECKS -from ..utils import CSPLayer - - -@NECKS.register_module() -class YOLOXPAFPN(BaseModule): - """Path Aggregation Network used in YOLOX. - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3 - use_depthwise (bool): Whether to depthwise separable convolution in - blocks. Default: False - upsample_cfg (dict): Config dict for interpolate layer. - Default: `dict(scale_factor=2, mode='nearest')` - conv_cfg (dict, optional): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN') - act_cfg (dict): Config dict for activation layer. - Default: dict(type='Swish') - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - in_channels, - out_channels, - num_csp_blocks=3, - use_depthwise=False, - upsample_cfg=dict(scale_factor=2, mode='nearest'), - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - init_cfg=dict( - type='Kaiming', - layer='Conv2d', - a=math.sqrt(5), - distribution='uniform', - mode='fan_in', - nonlinearity='leaky_relu')): - super(YOLOXPAFPN, self).__init__(init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - - conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule - - # build top-down blocks - self.upsample = nn.Upsample(**upsample_cfg) - self.reduce_layers = nn.ModuleList() - self.top_down_blocks = nn.ModuleList() - for idx in range(len(in_channels) - 1, 0, -1): - self.reduce_layers.append( - ConvModule( - in_channels[idx], - in_channels[idx - 1], - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - self.top_down_blocks.append( - CSPLayer( - in_channels[idx - 1] * 2, - in_channels[idx - 1], - num_blocks=num_csp_blocks, - add_identity=False, - use_depthwise=use_depthwise, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - - # build bottom-up blocks - self.downsamples = nn.ModuleList() - self.bottom_up_blocks = nn.ModuleList() - for idx in range(len(in_channels) - 1): - self.downsamples.append( - conv( - in_channels[idx], - in_channels[idx], - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - self.bottom_up_blocks.append( - CSPLayer( - in_channels[idx] * 2, - in_channels[idx + 1], - num_blocks=num_csp_blocks, - add_identity=False, - use_depthwise=use_depthwise, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - - self.out_convs = nn.ModuleList() - for i in range(len(in_channels)): - self.out_convs.append( - ConvModule( - in_channels[i], - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - - def forward(self, inputs): - """ - Args: - inputs (tuple[Tensor]): input features. - - Returns: - tuple[Tensor]: YOLOXPAFPN features. - """ - assert len(inputs) == len(self.in_channels) - - # top-down path - inner_outs = [inputs[-1]] - for idx in range(len(self.in_channels) - 1, 0, -1): - feat_heigh = inner_outs[0] - feat_low = inputs[idx - 1] - feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx]( - feat_heigh) - inner_outs[0] = feat_heigh - - upsample_feat = self.upsample(feat_heigh) - - inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx]( - torch.cat([upsample_feat, feat_low], 1)) - inner_outs.insert(0, inner_out) - - # bottom-up path - outs = [inner_outs[0]] - for idx in range(len(self.in_channels) - 1): - feat_low = outs[-1] - feat_height = inner_outs[idx + 1] - downsample_feat = self.downsamples[idx](feat_low) - out = self.bottom_up_blocks[idx]( - torch.cat([downsample_feat, feat_height], 1)) - outs.append(out) - - # out convs - for idx, conv in enumerate(self.out_convs): - outs[idx] = conv(outs[idx]) - - return tuple(outs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/plugins/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/plugins/__init__.py deleted file mode 100644 index a455c07bb99b9393e68b44d747cb5710b47c56fd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/plugins/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .dropblock import DropBlock -from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder -from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder - -__all__ = [ - 'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder', - 'MSDeformAttnPixelDecoder' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/plugins/dropblock.py b/cv/detection/co-detr/pytorch/mmdet/models/plugins/dropblock.py deleted file mode 100644 index bb00ade7384fadd086ad900d90c255b23f17a7da..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/plugins/dropblock.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import PLUGIN_LAYERS - -eps = 1e-6 - - -@PLUGIN_LAYERS.register_module() -class DropBlock(nn.Module): - """Randomly drop some regions of feature maps. - - Please refer to the method proposed in `DropBlock - `_ for details. - - Args: - drop_prob (float): The probability of dropping each block. - block_size (int): The size of dropped blocks. - warmup_iters (int): The drop probability will linearly increase - from `0` to `drop_prob` during the first `warmup_iters` iterations. - Default: 2000. - """ - - def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs): - super(DropBlock, self).__init__() - assert block_size % 2 == 1 - assert 0 < drop_prob <= 1 - assert warmup_iters >= 0 - self.drop_prob = drop_prob - self.block_size = block_size - self.warmup_iters = warmup_iters - self.iter_cnt = 0 - - def forward(self, x): - """ - Args: - x (Tensor): Input feature map on which some areas will be randomly - dropped. - - Returns: - Tensor: The tensor after DropBlock layer. - """ - if not self.training: - return x - self.iter_cnt += 1 - N, C, H, W = list(x.shape) - gamma = self._compute_gamma((H, W)) - mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1) - mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device)) - - mask = F.pad(mask, [self.block_size // 2] * 4, value=0) - mask = F.max_pool2d( - input=mask, - stride=(1, 1), - kernel_size=(self.block_size, self.block_size), - padding=self.block_size // 2) - mask = 1 - mask - x = x * mask * mask.numel() / (eps + mask.sum()) - return x - - def _compute_gamma(self, feat_size): - """Compute the value of gamma according to paper. gamma is the - parameter of bernoulli distribution, which controls the number of - features to drop. - - gamma = (drop_prob * fm_area) / (drop_area * keep_area) - - Args: - feat_size (tuple[int, int]): The height and width of feature map. - - Returns: - float: The value of gamma. - """ - gamma = (self.drop_prob * feat_size[0] * feat_size[1]) - gamma /= ((feat_size[0] - self.block_size + 1) * - (feat_size[1] - self.block_size + 1)) - gamma /= (self.block_size**2) - factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt / - self.warmup_iters) - return gamma * factor - - def extra_repr(self): - return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, ' - f'warmup_iters={self.warmup_iters}') diff --git a/cv/detection/co-detr/pytorch/mmdet/models/plugins/msdeformattn_pixel_decoder.py b/cv/detection/co-detr/pytorch/mmdet/models/plugins/msdeformattn_pixel_decoder.py deleted file mode 100644 index d553582baefc898da4f07089ba034d21dbbfb6d7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/plugins/msdeformattn_pixel_decoder.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import (PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init, - normal_init, xavier_init) -from mmcv.cnn.bricks.transformer import (build_positional_encoding, - build_transformer_layer_sequence) -from mmcv.runner import BaseModule, ModuleList - -from mmdet.core.anchor import MlvlPointGenerator -from mmdet.models.utils.transformer import MultiScaleDeformableAttention - - -@PLUGIN_LAYERS.register_module() -class MSDeformAttnPixelDecoder(BaseModule): - """Pixel decoder with multi-scale deformable attention. - - Args: - in_channels (list[int] | tuple[int]): Number of channels in the - input feature maps. - strides (list[int] | tuple[int]): Output strides of feature from - backbone. - feat_channels (int): Number of channels for feature. - out_channels (int): Number of channels for output. - num_outs (int): Number of output scales. - norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. - Defaults to dict(type='GN', num_groups=32). - act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. - Defaults to dict(type='ReLU'). - encoder (:obj:`mmcv.ConfigDict` | dict): Config for transformer - encoder. Defaults to `DetrTransformerEncoder`. - positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for - transformer encoder position encoding. Defaults to - dict(type='SinePositionalEncoding', num_feats=128, - normalize=True). - init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. - """ - - def __init__(self, - in_channels=[256, 512, 1024, 2048], - strides=[4, 8, 16, 32], - feat_channels=256, - out_channels=256, - num_outs=3, - norm_cfg=dict(type='GN', num_groups=32), - act_cfg=dict(type='ReLU'), - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=dict( - type='MultiScaleDeformableAttention', - embed_dims=256, - num_heads=8, - num_levels=3, - num_points=4, - im2col_step=64, - dropout=0.0, - batch_first=False, - norm_cfg=None, - init_cfg=None), - feedforward_channels=1024, - ffn_dropout=0.0, - operation_order=('self_attn', 'norm', 'ffn', 'norm')), - init_cfg=None), - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - normalize=True), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - self.strides = strides - self.num_input_levels = len(in_channels) - self.num_encoder_levels = \ - encoder.transformerlayers.attn_cfgs.num_levels - assert self.num_encoder_levels >= 1, \ - 'num_levels in attn_cfgs must be at least one' - input_conv_list = [] - # from top to down (low to high resolution) - for i in range(self.num_input_levels - 1, - self.num_input_levels - self.num_encoder_levels - 1, - -1): - input_conv = ConvModule( - in_channels[i], - feat_channels, - kernel_size=1, - norm_cfg=norm_cfg, - act_cfg=None, - bias=True) - input_conv_list.append(input_conv) - self.input_convs = ModuleList(input_conv_list) - - self.encoder = build_transformer_layer_sequence(encoder) - self.postional_encoding = build_positional_encoding( - positional_encoding) - # high resolution to low resolution - self.level_encoding = nn.Embedding(self.num_encoder_levels, - feat_channels) - - # fpn-like structure - self.lateral_convs = ModuleList() - self.output_convs = ModuleList() - self.use_bias = norm_cfg is None - # from top to down (low to high resolution) - # fpn for the rest features that didn't pass in encoder - for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1, - -1): - lateral_conv = ConvModule( - in_channels[i], - feat_channels, - kernel_size=1, - bias=self.use_bias, - norm_cfg=norm_cfg, - act_cfg=None) - output_conv = ConvModule( - feat_channels, - feat_channels, - kernel_size=3, - stride=1, - padding=1, - bias=self.use_bias, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.lateral_convs.append(lateral_conv) - self.output_convs.append(output_conv) - - self.mask_feature = Conv2d( - feat_channels, out_channels, kernel_size=1, stride=1, padding=0) - - self.num_outs = num_outs - self.point_generator = MlvlPointGenerator(strides) - - def init_weights(self): - """Initialize weights.""" - for i in range(0, self.num_encoder_levels): - xavier_init( - self.input_convs[i].conv, - gain=1, - bias=0, - distribution='uniform') - - for i in range(0, self.num_input_levels - self.num_encoder_levels): - caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) - caffe2_xavier_init(self.output_convs[i].conv, bias=0) - - caffe2_xavier_init(self.mask_feature, bias=0) - - normal_init(self.level_encoding, mean=0, std=1) - for p in self.encoder.parameters(): - if p.dim() > 1: - nn.init.xavier_normal_(p) - - # init_weights defined in MultiScaleDeformableAttention - for layer in self.encoder.layers: - for attn in layer.attentions: - if isinstance(attn, MultiScaleDeformableAttention): - attn.init_weights() - - def forward(self, feats): - """ - Args: - feats (list[Tensor]): Feature maps of each level. Each has - shape of (batch_size, c, h, w). - - Returns: - tuple: A tuple containing the following: - - - mask_feature (Tensor): shape (batch_size, c, h, w). - - multi_scale_features (list[Tensor]): Multi scale \ - features, each in shape (batch_size, c, h, w). - """ - # generate padding mask for each level, for each image - batch_size = feats[0].shape[0] - encoder_input_list = [] - padding_mask_list = [] - level_positional_encoding_list = [] - spatial_shapes = [] - reference_points_list = [] - for i in range(self.num_encoder_levels): - level_idx = self.num_input_levels - i - 1 - feat = feats[level_idx] - feat_projected = self.input_convs[i](feat) - h, w = feat.shape[-2:] - - # no padding - padding_mask_resized = feat.new_zeros( - (batch_size, ) + feat.shape[-2:], dtype=torch.bool) - pos_embed = self.postional_encoding(padding_mask_resized) - level_embed = self.level_encoding.weight[i] - level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed - # (h_i * w_i, 2) - reference_points = self.point_generator.single_level_grid_priors( - feat.shape[-2:], level_idx, device=feat.device) - # normalize - factor = feat.new_tensor([[w, h]]) * self.strides[level_idx] - reference_points = reference_points / factor - - # shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c) - feat_projected = feat_projected.flatten(2).permute(2, 0, 1) - level_pos_embed = level_pos_embed.flatten(2).permute(2, 0, 1) - padding_mask_resized = padding_mask_resized.flatten(1) - - encoder_input_list.append(feat_projected) - padding_mask_list.append(padding_mask_resized) - level_positional_encoding_list.append(level_pos_embed) - spatial_shapes.append(feat.shape[-2:]) - reference_points_list.append(reference_points) - # shape (batch_size, total_num_query), - # total_num_query=sum([., h_i * w_i,.]) - padding_masks = torch.cat(padding_mask_list, dim=1) - # shape (total_num_query, batch_size, c) - encoder_inputs = torch.cat(encoder_input_list, dim=0) - level_positional_encodings = torch.cat( - level_positional_encoding_list, dim=0) - device = encoder_inputs.device - # shape (num_encoder_levels, 2), from low - # resolution to high resolution - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=device) - # shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...) - level_start_index = torch.cat((spatial_shapes.new_zeros( - (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) - reference_points = torch.cat(reference_points_list, dim=0) - reference_points = reference_points[None, :, None].repeat( - batch_size, 1, self.num_encoder_levels, 1) - valid_radios = reference_points.new_ones( - (batch_size, self.num_encoder_levels, 2)) - # shape (num_total_query, batch_size, c) - memory = self.encoder( - query=encoder_inputs, - key=None, - value=None, - query_pos=level_positional_encodings, - key_pos=None, - attn_masks=None, - key_padding_mask=None, - query_key_padding_mask=padding_masks, - spatial_shapes=spatial_shapes, - reference_points=reference_points, - level_start_index=level_start_index, - valid_radios=valid_radios) - # (num_total_query, batch_size, c) -> (batch_size, c, num_total_query) - memory = memory.permute(1, 2, 0) - - # from low resolution to high resolution - num_query_per_level = [e[0] * e[1] for e in spatial_shapes] - outs = torch.split(memory, num_query_per_level, dim=-1) - outs = [ - x.reshape(batch_size, -1, spatial_shapes[i][0], - spatial_shapes[i][1]) for i, x in enumerate(outs) - ] - - for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1, - -1): - x = feats[i] - cur_feat = self.lateral_convs[i](x) - y = cur_feat + F.interpolate( - outs[-1], - size=cur_feat.shape[-2:], - mode='bilinear', - align_corners=False) - y = self.output_convs[i](y) - outs.append(y) - multi_scale_features = outs[:self.num_outs] - - mask_feature = self.mask_feature(outs[-1]) - return mask_feature, multi_scale_features diff --git a/cv/detection/co-detr/pytorch/mmdet/models/plugins/pixel_decoder.py b/cv/detection/co-detr/pytorch/mmdet/models/plugins/pixel_decoder.py deleted file mode 100644 index 537a187dc5c53279afff377c548e224ac092de69..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/plugins/pixel_decoder.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init -from mmcv.cnn.bricks.transformer import (build_positional_encoding, - build_transformer_layer_sequence) -from mmcv.runner import BaseModule, ModuleList - - -@PLUGIN_LAYERS.register_module() -class PixelDecoder(BaseModule): - """Pixel decoder with a structure like fpn. - - Args: - in_channels (list[int] | tuple[int]): Number of channels in the - input feature maps. - feat_channels (int): Number channels for feature. - out_channels (int): Number channels for output. - norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. - Defaults to dict(type='GN', num_groups=32). - act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. - Defaults to dict(type='ReLU'). - encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer - encoder.Defaults to None. - positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for - transformer encoder position encoding. Defaults to - dict(type='SinePositionalEncoding', num_feats=128, - normalize=True). - init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - feat_channels, - out_channels, - norm_cfg=dict(type='GN', num_groups=32), - act_cfg=dict(type='ReLU'), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.num_inputs = len(in_channels) - self.lateral_convs = ModuleList() - self.output_convs = ModuleList() - self.use_bias = norm_cfg is None - for i in range(0, self.num_inputs - 1): - lateral_conv = ConvModule( - in_channels[i], - feat_channels, - kernel_size=1, - bias=self.use_bias, - norm_cfg=norm_cfg, - act_cfg=None) - output_conv = ConvModule( - feat_channels, - feat_channels, - kernel_size=3, - stride=1, - padding=1, - bias=self.use_bias, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.lateral_convs.append(lateral_conv) - self.output_convs.append(output_conv) - - self.last_feat_conv = ConvModule( - in_channels[-1], - feat_channels, - kernel_size=3, - padding=1, - stride=1, - bias=self.use_bias, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.mask_feature = Conv2d( - feat_channels, out_channels, kernel_size=3, stride=1, padding=1) - - def init_weights(self): - """Initialize weights.""" - for i in range(0, self.num_inputs - 2): - caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) - caffe2_xavier_init(self.output_convs[i].conv, bias=0) - - caffe2_xavier_init(self.mask_feature, bias=0) - caffe2_xavier_init(self.last_feat_conv, bias=0) - - def forward(self, feats, img_metas): - """ - Args: - feats (list[Tensor]): Feature maps of each level. Each has - shape of (batch_size, c, h, w). - img_metas (list[dict]): List of image information. Pass in - for creating more accurate padding mask. Not used here. - - Returns: - tuple: a tuple containing the following: - - mask_feature (Tensor): Shape (batch_size, c, h, w). - - memory (Tensor): Output of last stage of backbone.\ - Shape (batch_size, c, h, w). - """ - y = self.last_feat_conv(feats[-1]) - for i in range(self.num_inputs - 2, -1, -1): - x = feats[i] - cur_feat = self.lateral_convs[i](x) - y = cur_feat + \ - F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest') - y = self.output_convs[i](y) - - mask_feature = self.mask_feature(y) - memory = feats[-1] - return mask_feature, memory - - -@PLUGIN_LAYERS.register_module() -class TransformerEncoderPixelDecoder(PixelDecoder): - """Pixel decoder with transormer encoder inside. - - Args: - in_channels (list[int] | tuple[int]): Number of channels in the - input feature maps. - feat_channels (int): Number channels for feature. - out_channels (int): Number channels for output. - norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. - Defaults to dict(type='GN', num_groups=32). - act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. - Defaults to dict(type='ReLU'). - encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer - encoder.Defaults to None. - positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for - transformer encoder position encoding. Defaults to - dict(type='SinePositionalEncoding', num_feats=128, - normalize=True). - init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - feat_channels, - out_channels, - norm_cfg=dict(type='GN', num_groups=32), - act_cfg=dict(type='ReLU'), - encoder=None, - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - normalize=True), - init_cfg=None): - super(TransformerEncoderPixelDecoder, self).__init__( - in_channels, - feat_channels, - out_channels, - norm_cfg, - act_cfg, - init_cfg=init_cfg) - self.last_feat_conv = None - - self.encoder = build_transformer_layer_sequence(encoder) - self.encoder_embed_dims = self.encoder.embed_dims - assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \ - 'tranformer encoder must equal to feat_channels({})'.format( - feat_channels, self.encoder_embed_dims) - self.positional_encoding = build_positional_encoding( - positional_encoding) - self.encoder_in_proj = Conv2d( - in_channels[-1], feat_channels, kernel_size=1) - self.encoder_out_proj = ConvModule( - feat_channels, - feat_channels, - kernel_size=3, - stride=1, - padding=1, - bias=self.use_bias, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - def init_weights(self): - """Initialize weights.""" - for i in range(0, self.num_inputs - 2): - caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) - caffe2_xavier_init(self.output_convs[i].conv, bias=0) - - caffe2_xavier_init(self.mask_feature, bias=0) - caffe2_xavier_init(self.encoder_in_proj, bias=0) - caffe2_xavier_init(self.encoder_out_proj.conv, bias=0) - - for p in self.encoder.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, feats, img_metas): - """ - Args: - feats (list[Tensor]): Feature maps of each level. Each has - shape of (batch_size, c, h, w). - img_metas (list[dict]): List of image information. Pass in - for creating more accurate padding mask. - - Returns: - tuple: a tuple containing the following: - - mask_feature (Tensor): shape (batch_size, c, h, w). - - memory (Tensor): shape (batch_size, c, h, w). - """ - feat_last = feats[-1] - bs, c, h, w = feat_last.shape - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w), - dtype=torch.float32) - for i in range(bs): - img_h, img_w, _ = img_metas[i]['img_shape'] - padding_mask[i, :img_h, :img_w] = 0 - padding_mask = F.interpolate( - padding_mask.unsqueeze(1), - size=feat_last.shape[-2:], - mode='nearest').to(torch.bool).squeeze(1) - - pos_embed = self.positional_encoding(padding_mask) - feat_last = self.encoder_in_proj(feat_last) - # (batch_size, c, h, w) -> (num_queries, batch_size, c) - feat_last = feat_last.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1) - # (batch_size, h, w) -> (batch_size, h*w) - padding_mask = padding_mask.flatten(1) - memory = self.encoder( - query=feat_last, - key=None, - value=None, - query_pos=pos_embed, - query_key_padding_mask=padding_mask) - # (num_queries, batch_size, c) -> (batch_size, c, h, w) - memory = memory.permute(1, 2, 0).view(bs, self.encoder_embed_dims, h, - w) - y = self.encoder_out_proj(memory) - for i in range(self.num_inputs - 2, -1, -1): - x = feats[i] - cur_feat = self.lateral_convs[i](x) - y = cur_feat + \ - F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest') - y = self.output_convs[i](y) - - mask_feature = self.mask_feature(y) - return mask_feature, memory diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/__init__.py deleted file mode 100644 index baae2a0535327ae3289398ff8e2df020a55aab93..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_roi_head import BaseRoIHead -from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead, - DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead, - Shared2FCBBoxHead, Shared4Conv1FCBBoxHead) -from .cascade_roi_head import CascadeRoIHead -from .double_roi_head import DoubleHeadRoIHead -from .dynamic_roi_head import DynamicRoIHead -from .grid_roi_head import GridRoIHead -from .htc_roi_head import HybridTaskCascadeRoIHead -from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead, - FusedSemanticHead, GlobalContextHead, GridHead, - HTCMaskHead, MaskIoUHead, MaskPointHead, - SCNetMaskHead, SCNetSemanticHead) -from .mask_scoring_roi_head import MaskScoringRoIHead -from .pisa_roi_head import PISARoIHead -from .point_rend_roi_head import PointRendRoIHead -from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor, - SingleRoIExtractor) -from .scnet_roi_head import SCNetRoIHead -from .shared_heads import ResLayer -from .sparse_roi_head import SparseRoIHead -from .standard_roi_head import StandardRoIHead -from .trident_roi_head import TridentRoIHead - -__all__ = [ - 'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead', - 'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead', - 'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead', - 'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', - 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', - 'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor', - 'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead', - 'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead', - 'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead', - 'FeatureRelayHead', 'GlobalContextHead' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/base_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/base_roi_head.py deleted file mode 100644 index 4adbdef8f2f9ffb9a75c23c45481fc9bb3de9246..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/base_roi_head.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -from mmcv.runner import BaseModule - -from ..builder import build_shared_head - - -class BaseRoIHead(BaseModule, metaclass=ABCMeta): - """Base class for RoIHeads.""" - - def __init__(self, - bbox_roi_extractor=None, - bbox_head=None, - mask_roi_extractor=None, - mask_head=None, - shared_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(BaseRoIHead, self).__init__(init_cfg) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - if shared_head is not None: - shared_head.pretrained = pretrained - self.shared_head = build_shared_head(shared_head) - - if bbox_head is not None: - self.init_bbox_head(bbox_roi_extractor, bbox_head) - - if mask_head is not None: - self.init_mask_head(mask_roi_extractor, mask_head) - - self.init_assigner_sampler() - - @property - def with_bbox(self): - """bool: whether the RoI head contains a `bbox_head`""" - return hasattr(self, 'bbox_head') and self.bbox_head is not None - - @property - def with_mask(self): - """bool: whether the RoI head contains a `mask_head`""" - return hasattr(self, 'mask_head') and self.mask_head is not None - - @property - def with_shared_head(self): - """bool: whether the RoI head contains a `shared_head`""" - return hasattr(self, 'shared_head') and self.shared_head is not None - - @abstractmethod - def init_bbox_head(self): - """Initialize ``bbox_head``""" - pass - - @abstractmethod - def init_mask_head(self): - """Initialize ``mask_head``""" - pass - - @abstractmethod - def init_assigner_sampler(self): - """Initialize assigner and sampler.""" - pass - - @abstractmethod - def forward_train(self, - x, - img_meta, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - **kwargs): - """Forward function during training.""" - - async def async_simple_test(self, - x, - proposal_list, - img_metas, - proposals=None, - rescale=False, - **kwargs): - """Asynchronized test function.""" - raise NotImplementedError - - def simple_test(self, - x, - proposal_list, - img_meta, - proposals=None, - rescale=False, - **kwargs): - """Test without augmentation.""" - - def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/__init__.py deleted file mode 100644 index d1207dbeead6fedc24e6b497fb98558998a14396..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .bbox_head import BBoxHead -from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead, - Shared4Conv1FCBBoxHead) -from .dii_head import DIIHead -from .double_bbox_head import DoubleConvFCBBoxHead -from .sabl_head import SABLHead -from .scnet_bbox_head import SCNetBBoxHead - -__all__ = [ - 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead', - 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead', - 'SCNetBBoxHead' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/bbox_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/bbox_head.py deleted file mode 100644 index 461b18b7fe4a408a2c01baf213ffa6170b7acc3a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/bbox_head.py +++ /dev/null @@ -1,594 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.runner import BaseModule, auto_fp16, force_fp32 -from torch.nn.modules.utils import _pair - -from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms -from mmdet.models.builder import HEADS, build_loss -from mmdet.models.losses import accuracy -from mmdet.models.utils import build_linear_layer - - -@HEADS.register_module() -class BBoxHead(BaseModule): - """Simplest RoI head, with only two fc layers for classification and - regression respectively.""" - - def __init__(self, - with_avg_pool=False, - with_cls=True, - with_reg=True, - roi_feat_size=7, - in_channels=256, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - clip_border=True, - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=False, - reg_predictor_cfg=dict(type='Linear'), - cls_predictor_cfg=dict(type='Linear'), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1.0), - init_cfg=None): - super(BBoxHead, self).__init__(init_cfg) - assert with_cls or with_reg - self.with_avg_pool = with_avg_pool - self.with_cls = with_cls - self.with_reg = with_reg - self.roi_feat_size = _pair(roi_feat_size) - self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] - self.in_channels = in_channels - self.num_classes = num_classes - self.reg_class_agnostic = reg_class_agnostic - self.reg_decoded_bbox = reg_decoded_bbox - self.reg_predictor_cfg = reg_predictor_cfg - self.cls_predictor_cfg = cls_predictor_cfg - self.fp16_enabled = False - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - - in_channels = self.in_channels - if self.with_avg_pool: - self.avg_pool = nn.AvgPool2d(self.roi_feat_size) - else: - in_channels *= self.roi_feat_area - if self.with_cls: - # need to add background class - if self.custom_cls_channels: - cls_channels = self.loss_cls.get_cls_channels(self.num_classes) - else: - cls_channels = num_classes + 1 - self.fc_cls = build_linear_layer( - self.cls_predictor_cfg, - in_features=in_channels, - out_features=cls_channels) - if self.with_reg: - out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes - self.fc_reg = build_linear_layer( - self.reg_predictor_cfg, - in_features=in_channels, - out_features=out_dim_reg) - self.debug_imgs = None - if init_cfg is None: - self.init_cfg = [] - if self.with_cls: - self.init_cfg += [ - dict( - type='Normal', std=0.01, override=dict(name='fc_cls')) - ] - if self.with_reg: - self.init_cfg += [ - dict( - type='Normal', std=0.001, override=dict(name='fc_reg')) - ] - - @property - def custom_cls_channels(self): - return getattr(self.loss_cls, 'custom_cls_channels', False) - - @property - def custom_activation(self): - return getattr(self.loss_cls, 'custom_activation', False) - - @property - def custom_accuracy(self): - return getattr(self.loss_cls, 'custom_accuracy', False) - - @auto_fp16() - def forward(self, x): - if self.with_avg_pool: - if x.numel() > 0: - x = self.avg_pool(x) - x = x.view(x.size(0), -1) - else: - # avg_pool does not support empty tensor, - # so use torch.mean instead it - x = torch.mean(x, dim=(-1, -2)) - cls_score = self.fc_cls(x) if self.with_cls else None - bbox_pred = self.fc_reg(x) if self.with_reg else None - return cls_score, bbox_pred - - def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes, - pos_gt_labels, cfg): - """Calculate the ground truth for proposals in the single image - according to the sampling results. - - Args: - pos_bboxes (Tensor): Contains all the positive boxes, - has shape (num_pos, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - neg_bboxes (Tensor): Contains all the negative boxes, - has shape (num_neg, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_bboxes (Tensor): Contains gt_boxes for - all positive samples, has shape (num_pos, 4), - the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_labels (Tensor): Contains gt_labels for - all positive samples, has shape (num_pos, ). - cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. - - Returns: - Tuple[Tensor]: Ground truth for proposals - in a single image. Containing the following Tensors: - - - labels(Tensor): Gt_labels for all proposals, has - shape (num_proposals,). - - label_weights(Tensor): Labels_weights for all - proposals, has shape (num_proposals,). - - bbox_targets(Tensor):Regression target for all - proposals, has shape (num_proposals, 4), the - last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights(Tensor):Regression weights for all - proposals, has shape (num_proposals, 4). - """ - num_pos = pos_bboxes.size(0) - num_neg = neg_bboxes.size(0) - num_samples = num_pos + num_neg - - # original implementation uses new_zeros since BG are set to be 0 - # now use empty & fill because BG cat_id = num_classes, - # FG cat_id = [0, num_classes-1] - labels = pos_bboxes.new_full((num_samples, ), - self.num_classes, - dtype=torch.long) - label_weights = pos_bboxes.new_zeros(num_samples) - bbox_targets = pos_bboxes.new_zeros(num_samples, 4) - bbox_weights = pos_bboxes.new_zeros(num_samples, 4) - if num_pos > 0: - labels[:num_pos] = pos_gt_labels - pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight - label_weights[:num_pos] = pos_weight - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - pos_bboxes, pos_gt_bboxes) - else: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, both - # the predicted boxes and regression targets should be with - # absolute coordinate format. - pos_bbox_targets = pos_gt_bboxes - bbox_targets[:num_pos, :] = pos_bbox_targets - bbox_weights[:num_pos, :] = 1 - if num_neg > 0: - label_weights[-num_neg:] = 1.0 - - return labels, label_weights, bbox_targets, bbox_weights - - def get_targets(self, - sampling_results, - gt_bboxes, - gt_labels, - rcnn_train_cfg, - concat=True): - """Calculate the ground truth for all samples in a batch according to - the sampling_results. - - Almost the same as the implementation in bbox_head, we passed - additional parameters pos_inds_list and neg_inds_list to - `_get_target_single` function. - - Args: - sampling_results (List[obj:SamplingResults]): Assign results of - all images in a batch after sampling. - gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, - each tensor has shape (num_gt, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - gt_labels (list[Tensor]): Gt_labels of all images in a batch, - each tensor has shape (num_gt,). - rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. - concat (bool): Whether to concatenate the results of all - the images in a single batch. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following list of Tensors: - - - labels (list[Tensor],Tensor): Gt_labels for all - proposals in a batch, each tensor in list has - shape (num_proposals,) when `concat=False`, otherwise - just a single tensor has shape (num_all_proposals,). - - label_weights (list[Tensor]): Labels_weights for - all proposals in a batch, each tensor in list has - shape (num_proposals,) when `concat=False`, otherwise - just a single tensor has shape (num_all_proposals,). - - bbox_targets (list[Tensor],Tensor): Regression target - for all proposals in a batch, each tensor in list - has shape (num_proposals, 4) when `concat=False`, - otherwise just a single tensor has shape - (num_all_proposals, 4), the last dimension 4 represents - [tl_x, tl_y, br_x, br_y]. - - bbox_weights (list[tensor],Tensor): Regression weights for - all proposals in a batch, each tensor in list has shape - (num_proposals, 4) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals, 4). - """ - pos_bboxes_list = [res.pos_bboxes for res in sampling_results] - neg_bboxes_list = [res.neg_bboxes for res in sampling_results] - pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] - pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] - labels, label_weights, bbox_targets, bbox_weights = multi_apply( - self._get_target_single, - pos_bboxes_list, - neg_bboxes_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - cfg=rcnn_train_cfg) - - if concat: - labels = torch.cat(labels, 0) - label_weights = torch.cat(label_weights, 0) - bbox_targets = torch.cat(bbox_targets, 0) - bbox_weights = torch.cat(bbox_weights, 0) - return labels, label_weights, bbox_targets, bbox_weights - - @force_fp32(apply_to=('cls_score', 'bbox_pred')) - def loss(self, - cls_score, - bbox_pred, - rois, - labels, - label_weights, - bbox_targets, - bbox_weights, - reduction_override=None): - losses = dict() - if cls_score is not None: - avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) - if cls_score.numel() > 0: - loss_cls_ = self.loss_cls( - cls_score, - labels, - label_weights, - avg_factor=avg_factor, - reduction_override=reduction_override) - if isinstance(loss_cls_, dict): - losses.update(loss_cls_) - else: - losses['loss_cls'] = loss_cls_ - if self.custom_activation: - acc_ = self.loss_cls.get_accuracy(cls_score, labels) - losses.update(acc_) - else: - losses['acc'] = accuracy(cls_score, labels) - if bbox_pred is not None: - bg_class_ind = self.num_classes - # 0~self.num_classes-1 are FG, self.num_classes is BG - pos_inds = (labels >= 0) & (labels < bg_class_ind) - # do not perform bounding box regression for BG anymore. - if pos_inds.any(): - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, - # `GIouLoss`, `DIouLoss`) is applied directly on - # the decoded bounding boxes, it decodes the - # already encoded coordinates to absolute format. - bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred) - if self.reg_class_agnostic: - pos_bbox_pred = bbox_pred.view( - bbox_pred.size(0), 4)[pos_inds.type(torch.bool)] - else: - pos_bbox_pred = bbox_pred.view( - bbox_pred.size(0), -1, - 4)[pos_inds.type(torch.bool), - labels[pos_inds.type(torch.bool)]] - losses['loss_bbox'] = self.loss_bbox( - pos_bbox_pred, - bbox_targets[pos_inds.type(torch.bool)], - bbox_weights[pos_inds.type(torch.bool)], - avg_factor=bbox_targets.size(0), - reduction_override=reduction_override) - else: - losses['loss_bbox'] = bbox_pred[pos_inds].sum() - return losses - - @force_fp32(apply_to=('cls_score', 'bbox_pred')) - def get_bboxes(self, - rois, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=False, - cfg=None): - """Transform network output for a batch into bbox predictions. - - Args: - rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). - last dimension 5 arrange as (batch_index, x1, y1, x2, y2). - cls_score (Tensor): Box scores, has shape - (num_boxes, num_classes + 1). - bbox_pred (Tensor, optional): Box energies / deltas. - has shape (num_boxes, num_classes * 4). - img_shape (Sequence[int], optional): Maximum bounds for boxes, - specifies (H, W, C) or (H, W). - scale_factor (ndarray): Scale factor of the - image arrange as (w_scale, h_scale, w_scale, h_scale). - rescale (bool): If True, return boxes in original image space. - Default: False. - cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None - - Returns: - tuple[Tensor, Tensor]: - First tensor is `det_bboxes`, has the shape - (num_boxes, 5) and last - dimension 5 represent (tl_x, tl_y, br_x, br_y, score). - Second tensor is the labels with shape (num_boxes, ). - """ - - # some loss (Seesaw loss..) may have custom activation - if self.custom_cls_channels: - scores = self.loss_cls.get_activation(cls_score) - else: - scores = F.softmax( - cls_score, dim=-1) if cls_score is not None else None - # bbox_pred would be None in some detector when with_reg is False, - # e.g. Grid R-CNN. - if bbox_pred is not None: - bboxes = self.bbox_coder.decode( - rois[..., 1:], bbox_pred, max_shape=img_shape) - else: - bboxes = rois[:, 1:].clone() - if img_shape is not None: - bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1]) - bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0]) - - if rescale and bboxes.size(0) > 0: - scale_factor = bboxes.new_tensor(scale_factor) - bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( - bboxes.size()[0], -1) - - if cfg is None: - return bboxes, scores - else: - det_bboxes, det_labels = multiclass_nms(bboxes, scores, - cfg.score_thr, cfg.nms, - cfg.max_per_img) - - return det_bboxes, det_labels - - @force_fp32(apply_to=('bbox_preds', )) - def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): - """Refine bboxes during training. - - Args: - rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, - and bs is the sampled RoIs per image. The first column is - the image id and the next 4 columns are x1, y1, x2, y2. - labels (Tensor): Shape (n*bs, ). - bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class). - pos_is_gts (list[Tensor]): Flags indicating if each positive bbox - is a gt bbox. - img_metas (list[dict]): Meta info of each image. - - Returns: - list[Tensor]: Refined bboxes of each image in a mini-batch. - - Example: - >>> # xdoctest: +REQUIRES(module:kwarray) - >>> import kwarray - >>> import numpy as np - >>> from mmdet.core.bbox.demodata import random_boxes - >>> self = BBoxHead(reg_class_agnostic=True) - >>> n_roi = 2 - >>> n_img = 4 - >>> scale = 512 - >>> rng = np.random.RandomState(0) - >>> img_metas = [{'img_shape': (scale, scale)} - ... for _ in range(n_img)] - >>> # Create rois in the expected format - >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng) - >>> img_ids = torch.randint(0, n_img, (n_roi,)) - >>> img_ids = img_ids.float() - >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1) - >>> # Create other args - >>> labels = torch.randint(0, 2, (n_roi,)).long() - >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) - >>> # For each image, pretend random positive boxes are gts - >>> is_label_pos = (labels.numpy() > 0).astype(np.int) - >>> lbl_per_img = kwarray.group_items(is_label_pos, - ... img_ids.numpy()) - >>> pos_per_img = [sum(lbl_per_img.get(gid, [])) - ... for gid in range(n_img)] - >>> pos_is_gts = [ - >>> torch.randint(0, 2, (npos,)).byte().sort( - >>> descending=True)[0] - >>> for npos in pos_per_img - >>> ] - >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds, - >>> pos_is_gts, img_metas) - >>> print(bboxes_list) - """ - img_ids = rois[:, 0].long().unique(sorted=True) - assert img_ids.numel() <= len(img_metas) - - bboxes_list = [] - for i in range(len(img_metas)): - inds = torch.nonzero( - rois[:, 0] == i, as_tuple=False).squeeze(dim=1) - num_rois = inds.numel() - - bboxes_ = rois[inds, 1:] - label_ = labels[inds] - bbox_pred_ = bbox_preds[inds] - img_meta_ = img_metas[i] - pos_is_gts_ = pos_is_gts[i] - - bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, - img_meta_) - - # filter gt bboxes - pos_keep = 1 - pos_is_gts_ - keep_inds = pos_is_gts_.new_ones(num_rois) - keep_inds[:len(pos_is_gts_)] = pos_keep - - bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) - - return bboxes_list - - @force_fp32(apply_to=('bbox_pred', )) - def regress_by_class(self, rois, label, bbox_pred, img_meta): - """Regress the bbox for the predicted class. Used in Cascade R-CNN. - - Args: - rois (Tensor): Rois from `rpn_head` or last stage - `bbox_head`, has shape (num_proposals, 4) or - (num_proposals, 5). - label (Tensor): Only used when `self.reg_class_agnostic` - is False, has shape (num_proposals, ). - bbox_pred (Tensor): Regression prediction of - current stage `bbox_head`. When `self.reg_class_agnostic` - is False, it has shape (n, num_classes * 4), otherwise - it has shape (n, 4). - img_meta (dict): Image meta info. - - Returns: - Tensor: Regressed bboxes, the same shape as input rois. - """ - - assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape) - - if not self.reg_class_agnostic: - label = label * 4 - inds = torch.stack((label, label + 1, label + 2, label + 3), 1) - bbox_pred = torch.gather(bbox_pred, 1, inds) - assert bbox_pred.size(1) == 4 - - max_shape = img_meta['img_shape'] - - if rois.size(1) == 4: - new_rois = self.bbox_coder.decode( - rois, bbox_pred, max_shape=max_shape) - else: - bboxes = self.bbox_coder.decode( - rois[:, 1:], bbox_pred, max_shape=max_shape) - new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) - - return new_rois - - def onnx_export(self, - rois, - cls_score, - bbox_pred, - img_shape, - cfg=None, - **kwargs): - """Transform network output for a batch into bbox predictions. - - Args: - rois (Tensor): Boxes to be transformed. - Has shape (B, num_boxes, 5) - cls_score (Tensor): Box scores. has shape - (B, num_boxes, num_classes + 1), 1 represent the background. - bbox_pred (Tensor, optional): Box energies / deltas for, - has shape (B, num_boxes, num_classes * 4) when. - img_shape (torch.Tensor): Shape of image. - cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - - assert rois.ndim == 3, 'Only support export two stage ' \ - 'model to ONNX ' \ - 'with batch dimension. ' - if self.custom_cls_channels: - scores = self.loss_cls.get_activation(cls_score) - else: - scores = F.softmax( - cls_score, dim=-1) if cls_score is not None else None - - if bbox_pred is not None: - bboxes = self.bbox_coder.decode( - rois[..., 1:], bbox_pred, max_shape=img_shape) - else: - bboxes = rois[..., 1:].clone() - if img_shape is not None: - max_shape = bboxes.new_tensor(img_shape)[..., :2] - min_xy = bboxes.new_tensor(0) - max_xy = torch.cat( - [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2) - bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) - bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) - - # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment - from mmdet.core.export import add_dummy_nms_for_onnx - max_output_boxes_per_class = cfg.nms.get('max_output_boxes_per_class', - cfg.max_per_img) - iou_threshold = cfg.nms.get('iou_threshold', 0.5) - score_threshold = cfg.score_thr - nms_pre = cfg.get('deploy_nms_pre', -1) - - scores = scores[..., :self.num_classes] - if self.reg_class_agnostic: - return add_dummy_nms_for_onnx( - bboxes, - scores, - max_output_boxes_per_class, - iou_threshold, - score_threshold, - pre_top_k=nms_pre, - after_top_k=cfg.max_per_img) - else: - batch_size = scores.shape[0] - labels = torch.arange( - self.num_classes, dtype=torch.long).to(scores.device) - labels = labels.view(1, 1, -1).expand_as(scores) - labels = labels.reshape(batch_size, -1) - scores = scores.reshape(batch_size, -1) - bboxes = bboxes.reshape(batch_size, -1, 4) - - max_size = torch.max(img_shape) - # Offset bboxes of each class so that bboxes of different labels - # do not overlap. - offsets = (labels * max_size + 1).unsqueeze(2) - bboxes_for_nms = bboxes + offsets - - batch_dets, labels = add_dummy_nms_for_onnx( - bboxes_for_nms, - scores.unsqueeze(2), - max_output_boxes_per_class, - iou_threshold, - score_threshold, - pre_top_k=nms_pre, - after_top_k=cfg.max_per_img, - labels=labels) - # Offset the bboxes back after dummy nms. - offsets = (labels * max_size + 1).unsqueeze(2) - # Indexing + inplace operation fails with dynamic shape in ONNX - # original style: batch_dets[..., :4] -= offsets - bboxes, scores = batch_dets[..., 0:4], batch_dets[..., 4:5] - bboxes -= offsets - batch_dets = torch.cat([bboxes, scores], dim=2) - return batch_dets, labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py deleted file mode 100644 index 21124b9c9f266d404a8dbbcf72630601d1376beb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule - -from mmdet.models.builder import HEADS -from mmdet.models.utils import build_linear_layer -from .bbox_head import BBoxHead - - -@HEADS.register_module() -class ConvFCBBoxHead(BBoxHead): - r"""More general bbox head, with shared conv and fc layers and two optional - separated branches. - - .. code-block:: none - - /-> cls convs -> cls fcs -> cls - shared convs -> shared fcs - \-> reg convs -> reg fcs -> reg - """ # noqa: W605 - - def __init__(self, - num_shared_convs=0, - num_shared_fcs=0, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - conv_out_channels=256, - fc_out_channels=1024, - conv_cfg=None, - norm_cfg=None, - init_cfg=None, - *args, - **kwargs): - super(ConvFCBBoxHead, self).__init__( - *args, init_cfg=init_cfg, **kwargs) - assert (num_shared_convs + num_shared_fcs + num_cls_convs + - num_cls_fcs + num_reg_convs + num_reg_fcs > 0) - if num_cls_convs > 0 or num_reg_convs > 0: - assert num_shared_fcs == 0 - if not self.with_cls: - assert num_cls_convs == 0 and num_cls_fcs == 0 - if not self.with_reg: - assert num_reg_convs == 0 and num_reg_fcs == 0 - self.num_shared_convs = num_shared_convs - self.num_shared_fcs = num_shared_fcs - self.num_cls_convs = num_cls_convs - self.num_cls_fcs = num_cls_fcs - self.num_reg_convs = num_reg_convs - self.num_reg_fcs = num_reg_fcs - self.conv_out_channels = conv_out_channels - self.fc_out_channels = fc_out_channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - # add shared convs and fcs - self.shared_convs, self.shared_fcs, last_layer_dim = \ - self._add_conv_fc_branch( - self.num_shared_convs, self.num_shared_fcs, self.in_channels, - True) - self.shared_out_channels = last_layer_dim - - # add cls specific branch - self.cls_convs, self.cls_fcs, self.cls_last_dim = \ - self._add_conv_fc_branch( - self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) - - # add reg specific branch - self.reg_convs, self.reg_fcs, self.reg_last_dim = \ - self._add_conv_fc_branch( - self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) - - if self.num_shared_fcs == 0 and not self.with_avg_pool: - if self.num_cls_fcs == 0: - self.cls_last_dim *= self.roi_feat_area - if self.num_reg_fcs == 0: - self.reg_last_dim *= self.roi_feat_area - - self.relu = nn.ReLU(inplace=True) - # reconstruct fc_cls and fc_reg since input channels are changed - if self.with_cls: - if self.custom_cls_channels: - cls_channels = self.loss_cls.get_cls_channels(self.num_classes) - else: - cls_channels = self.num_classes + 1 - self.fc_cls = build_linear_layer( - self.cls_predictor_cfg, - in_features=self.cls_last_dim, - out_features=cls_channels) - if self.with_reg: - out_dim_reg = (4 if self.reg_class_agnostic else 4 * - self.num_classes) - self.fc_reg = build_linear_layer( - self.reg_predictor_cfg, - in_features=self.reg_last_dim, - out_features=out_dim_reg) - - if init_cfg is None: - # when init_cfg is None, - # It has been set to - # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], - # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] - # after `super(ConvFCBBoxHead, self).__init__()` - # we only need to append additional configuration - # for `shared_fcs`, `cls_fcs` and `reg_fcs` - self.init_cfg += [ - dict( - type='Xavier', - distribution='uniform', - override=[ - dict(name='shared_fcs'), - dict(name='cls_fcs'), - dict(name='reg_fcs') - ]) - ] - - def _add_conv_fc_branch(self, - num_branch_convs, - num_branch_fcs, - in_channels, - is_shared=False): - """Add shared or separable branch. - - convs -> avg pool (optional) -> fcs - """ - last_layer_dim = in_channels - # add branch specific conv layers - branch_convs = nn.ModuleList() - if num_branch_convs > 0: - for i in range(num_branch_convs): - conv_in_channels = ( - last_layer_dim if i == 0 else self.conv_out_channels) - branch_convs.append( - ConvModule( - conv_in_channels, - self.conv_out_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - last_layer_dim = self.conv_out_channels - # add branch specific fc layers - branch_fcs = nn.ModuleList() - if num_branch_fcs > 0: - # for shared branch, only consider self.with_avg_pool - # for separated branches, also consider self.num_shared_fcs - if (is_shared - or self.num_shared_fcs == 0) and not self.with_avg_pool: - last_layer_dim *= self.roi_feat_area - for i in range(num_branch_fcs): - fc_in_channels = ( - last_layer_dim if i == 0 else self.fc_out_channels) - branch_fcs.append( - nn.Linear(fc_in_channels, self.fc_out_channels)) - last_layer_dim = self.fc_out_channels - return branch_convs, branch_fcs, last_layer_dim - - def forward(self, x): - # shared part - if self.num_shared_convs > 0: - for conv in self.shared_convs: - x = conv(x) - - if self.num_shared_fcs > 0: - if self.with_avg_pool: - x = self.avg_pool(x) - - x = x.flatten(1) - - for fc in self.shared_fcs: - x = self.relu(fc(x)) - # separate branches - x_cls = x - x_reg = x - - for conv in self.cls_convs: - x_cls = conv(x_cls) - if x_cls.dim() > 2: - if self.with_avg_pool: - x_cls = self.avg_pool(x_cls) - x_cls = x_cls.flatten(1) - for fc in self.cls_fcs: - x_cls = self.relu(fc(x_cls)) - - for conv in self.reg_convs: - x_reg = conv(x_reg) - if x_reg.dim() > 2: - if self.with_avg_pool: - x_reg = self.avg_pool(x_reg) - x_reg = x_reg.flatten(1) - for fc in self.reg_fcs: - x_reg = self.relu(fc(x_reg)) - - cls_score = self.fc_cls(x_cls) if self.with_cls else None - bbox_pred = self.fc_reg(x_reg) if self.with_reg else None - return cls_score, bbox_pred - - -@HEADS.register_module() -class Shared2FCBBoxHead(ConvFCBBoxHead): - - def __init__(self, fc_out_channels=1024, *args, **kwargs): - super(Shared2FCBBoxHead, self).__init__( - num_shared_convs=0, - num_shared_fcs=2, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - fc_out_channels=fc_out_channels, - *args, - **kwargs) - - -@HEADS.register_module() -class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): - - def __init__(self, fc_out_channels=1024, *args, **kwargs): - super(Shared4Conv1FCBBoxHead, self).__init__( - num_shared_convs=4, - num_shared_fcs=1, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - fc_out_channels=fc_out_channels, - *args, - **kwargs) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/dii_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/dii_head.py deleted file mode 100644 index 3777f52be4a9580662e6e7f5338229aedd310c7c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/dii_head.py +++ /dev/null @@ -1,426 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.cnn import (bias_init_with_prob, build_activation_layer, - build_norm_layer) -from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention -from mmcv.runner import auto_fp16, force_fp32 - -from mmdet.core import multi_apply -from mmdet.models.builder import HEADS, build_loss -from mmdet.models.dense_heads.atss_head import reduce_mean -from mmdet.models.losses import accuracy -from mmdet.models.utils import build_transformer -from .bbox_head import BBoxHead - - -@HEADS.register_module() -class DIIHead(BBoxHead): - r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object - Detection with Learnable Proposals `_ - - Args: - num_classes (int): Number of class in dataset. - Defaults to 80. - num_ffn_fcs (int): The number of fully-connected - layers in FFNs. Defaults to 2. - num_heads (int): The hidden dimension of FFNs. - Defaults to 8. - num_cls_fcs (int): The number of fully-connected - layers in classification subnet. Defaults to 1. - num_reg_fcs (int): The number of fully-connected - layers in regression subnet. Defaults to 3. - feedforward_channels (int): The hidden dimension - of FFNs. Defaults to 2048 - in_channels (int): Hidden_channels of MultiheadAttention. - Defaults to 256. - dropout (float): Probability of drop the channel. - Defaults to 0.0 - ffn_act_cfg (dict): The activation config for FFNs. - dynamic_conv_cfg (dict): The convolution config - for DynamicConv. - loss_iou (dict): The config for iou or giou loss. - - """ - - def __init__(self, - num_classes=80, - num_ffn_fcs=2, - num_heads=8, - num_cls_fcs=1, - num_reg_fcs=3, - feedforward_channels=2048, - in_channels=256, - dropout=0.0, - ffn_act_cfg=dict(type='ReLU', inplace=True), - dynamic_conv_cfg=dict( - type='DynamicConv', - in_channels=256, - feat_channels=64, - out_channels=256, - input_feat_shape=7, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN')), - loss_iou=dict(type='GIoULoss', loss_weight=2.0), - init_cfg=None, - **kwargs): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super(DIIHead, self).__init__( - num_classes=num_classes, - reg_decoded_bbox=True, - reg_class_agnostic=True, - init_cfg=init_cfg, - **kwargs) - self.loss_iou = build_loss(loss_iou) - self.in_channels = in_channels - self.fp16_enabled = False - self.attention = MultiheadAttention(in_channels, num_heads, dropout) - self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1] - - self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) - self.instance_interactive_conv_dropout = nn.Dropout(dropout) - self.instance_interactive_conv_norm = build_norm_layer( - dict(type='LN'), in_channels)[1] - - self.ffn = FFN( - in_channels, - feedforward_channels, - num_ffn_fcs, - act_cfg=ffn_act_cfg, - dropout=dropout) - self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] - - self.cls_fcs = nn.ModuleList() - for _ in range(num_cls_fcs): - self.cls_fcs.append( - nn.Linear(in_channels, in_channels, bias=False)) - self.cls_fcs.append( - build_norm_layer(dict(type='LN'), in_channels)[1]) - self.cls_fcs.append( - build_activation_layer(dict(type='ReLU', inplace=True))) - - # over load the self.fc_cls in BBoxHead - if self.loss_cls.use_sigmoid: - self.fc_cls = nn.Linear(in_channels, self.num_classes) - else: - self.fc_cls = nn.Linear(in_channels, self.num_classes + 1) - - self.reg_fcs = nn.ModuleList() - for _ in range(num_reg_fcs): - self.reg_fcs.append( - nn.Linear(in_channels, in_channels, bias=False)) - self.reg_fcs.append( - build_norm_layer(dict(type='LN'), in_channels)[1]) - self.reg_fcs.append( - build_activation_layer(dict(type='ReLU', inplace=True))) - # over load the self.fc_cls in BBoxHead - self.fc_reg = nn.Linear(in_channels, 4) - - assert self.reg_class_agnostic, 'DIIHead only ' \ - 'suppport `reg_class_agnostic=True` ' - assert self.reg_decoded_bbox, 'DIIHead only ' \ - 'suppport `reg_decoded_bbox=True`' - - def init_weights(self): - """Use xavier initialization for all weight parameter and set - classification head bias as a specific value when use focal loss.""" - super(DIIHead, self).init_weights() - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - else: - # adopt the default initialization for - # the weight and bias of the layer norm - pass - if self.loss_cls.use_sigmoid: - bias_init = bias_init_with_prob(0.01) - nn.init.constant_(self.fc_cls.bias, bias_init) - - @auto_fp16() - def forward(self, roi_feat, proposal_feat): - """Forward function of Dynamic Instance Interactive Head. - - Args: - roi_feat (Tensor): Roi-pooling features with shape - (batch_size*num_proposals, feature_dimensions, - pooling_h , pooling_w). - proposal_feat (Tensor): Intermediate feature get from - diihead in last stage, has shape - (batch_size, num_proposals, feature_dimensions) - - Returns: - tuple[Tensor]: Usually a tuple of classification scores - and bbox prediction and a intermediate feature. - - - cls_scores (Tensor): Classification scores for - all proposals, has shape - (batch_size, num_proposals, num_classes). - - bbox_preds (Tensor): Box energies / deltas for - all proposals, has shape - (batch_size, num_proposals, 4). - - obj_feat (Tensor): Object feature before classification - and regression subnet, has shape - (batch_size, num_proposal, feature_dimensions). - """ - N, num_proposals = proposal_feat.shape[:2] - - # Self attention - proposal_feat = proposal_feat.permute(1, 0, 2) - proposal_feat = self.attention_norm(self.attention(proposal_feat)) - attn_feats = proposal_feat.permute(1, 0, 2) - - # instance interactive - proposal_feat = attn_feats.reshape(-1, self.in_channels) - proposal_feat_iic = self.instance_interactive_conv( - proposal_feat, roi_feat) - proposal_feat = proposal_feat + self.instance_interactive_conv_dropout( - proposal_feat_iic) - obj_feat = self.instance_interactive_conv_norm(proposal_feat) - - # FFN - obj_feat = self.ffn_norm(self.ffn(obj_feat)) - - cls_feat = obj_feat - reg_feat = obj_feat - - for cls_layer in self.cls_fcs: - cls_feat = cls_layer(cls_feat) - for reg_layer in self.reg_fcs: - reg_feat = reg_layer(reg_feat) - - cls_score = self.fc_cls(cls_feat).view( - N, num_proposals, self.num_classes - if self.loss_cls.use_sigmoid else self.num_classes + 1) - bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4) - - return cls_score, bbox_delta, obj_feat.view( - N, num_proposals, self.in_channels), attn_feats - - @force_fp32(apply_to=('cls_score', 'bbox_pred')) - def loss(self, - cls_score, - bbox_pred, - labels, - label_weights, - bbox_targets, - bbox_weights, - imgs_whwh=None, - reduction_override=None, - **kwargs): - """"Loss function of DIIHead, get loss of all images. - - Args: - cls_score (Tensor): Classification prediction - results of all class, has shape - (batch_size * num_proposals_single_image, num_classes) - bbox_pred (Tensor): Regression prediction results, - has shape - (batch_size * num_proposals_single_image, 4), the last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - labels (Tensor): Label of each proposals, has shape - (batch_size * num_proposals_single_image - label_weights (Tensor): Classification loss - weight of each proposals, has shape - (batch_size * num_proposals_single_image - bbox_targets (Tensor): Regression targets of each - proposals, has shape - (batch_size * num_proposals_single_image, 4), - the last dimension 4 represents - [tl_x, tl_y, br_x, br_y]. - bbox_weights (Tensor): Regression loss weight of each - proposals's coordinate, has shape - (batch_size * num_proposals_single_image, 4), - imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\ - shape (batch_size, num_proposals, 4), the last - dimension means - [img_width,img_height, img_width, img_height]. - reduction_override (str, optional): The reduction - method used to override the original reduction - method of the loss. Options are "none", - "mean" and "sum". Defaults to None, - - Returns: - dict[str, Tensor]: Dictionary of loss components - """ - losses = dict() - bg_class_ind = self.num_classes - # note in spare rcnn num_gt == num_pos - pos_inds = (labels >= 0) & (labels < bg_class_ind) - num_pos = pos_inds.sum().float() - avg_factor = reduce_mean(num_pos) - if cls_score is not None: - if cls_score.numel() > 0: - losses['loss_cls'] = self.loss_cls( - cls_score, - labels, - label_weights, - avg_factor=avg_factor, - reduction_override=reduction_override) - losses['pos_acc'] = accuracy(cls_score[pos_inds], - labels[pos_inds]) - if bbox_pred is not None: - # 0~self.num_classes-1 are FG, self.num_classes is BG - # do not perform bounding box regression for BG anymore. - if pos_inds.any(): - pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0), - 4)[pos_inds.type(torch.bool)] - imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0), - 4)[pos_inds.type(torch.bool)] - losses['loss_bbox'] = self.loss_bbox( - pos_bbox_pred / imgs_whwh, - bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh, - bbox_weights[pos_inds.type(torch.bool)], - avg_factor=avg_factor) - losses['loss_iou'] = self.loss_iou( - pos_bbox_pred, - bbox_targets[pos_inds.type(torch.bool)], - bbox_weights[pos_inds.type(torch.bool)], - avg_factor=avg_factor) - else: - losses['loss_bbox'] = bbox_pred.sum() * 0 - losses['loss_iou'] = bbox_pred.sum() * 0 - return losses - - def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes, - pos_gt_bboxes, pos_gt_labels, cfg): - """Calculate the ground truth for proposals in the single image - according to the sampling results. - - Almost the same as the implementation in `bbox_head`, - we add pos_inds and neg_inds to select positive and - negative samples instead of selecting the first num_pos - as positive samples. - - Args: - pos_inds (Tensor): The length is equal to the - positive sample numbers contain all index - of the positive sample in the origin proposal set. - neg_inds (Tensor): The length is equal to the - negative sample numbers contain all index - of the negative sample in the origin proposal set. - pos_bboxes (Tensor): Contains all the positive boxes, - has shape (num_pos, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - neg_bboxes (Tensor): Contains all the negative boxes, - has shape (num_neg, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_bboxes (Tensor): Contains gt_boxes for - all positive samples, has shape (num_pos, 4), - the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_labels (Tensor): Contains gt_labels for - all positive samples, has shape (num_pos, ). - cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following Tensors: - - - labels(Tensor): Gt_labels for all proposals, has - shape (num_proposals,). - - label_weights(Tensor): Labels_weights for all proposals, has - shape (num_proposals,). - - bbox_targets(Tensor):Regression target for all proposals, has - shape (num_proposals, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights(Tensor):Regression weights for all proposals, - has shape (num_proposals, 4). - """ - num_pos = pos_bboxes.size(0) - num_neg = neg_bboxes.size(0) - num_samples = num_pos + num_neg - - # original implementation uses new_zeros since BG are set to be 0 - # now use empty & fill because BG cat_id = num_classes, - # FG cat_id = [0, num_classes-1] - labels = pos_bboxes.new_full((num_samples, ), - self.num_classes, - dtype=torch.long) - label_weights = pos_bboxes.new_zeros(num_samples) - bbox_targets = pos_bboxes.new_zeros(num_samples, 4) - bbox_weights = pos_bboxes.new_zeros(num_samples, 4) - if num_pos > 0: - labels[pos_inds] = pos_gt_labels - pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight - label_weights[pos_inds] = pos_weight - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - pos_bboxes, pos_gt_bboxes) - else: - pos_bbox_targets = pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1 - if num_neg > 0: - label_weights[neg_inds] = 1.0 - - return labels, label_weights, bbox_targets, bbox_weights - - def get_targets(self, - sampling_results, - gt_bboxes, - gt_labels, - rcnn_train_cfg, - concat=True): - """Calculate the ground truth for all samples in a batch according to - the sampling_results. - - Almost the same as the implementation in bbox_head, we passed - additional parameters pos_inds_list and neg_inds_list to - `_get_target_single` function. - - Args: - sampling_results (List[obj:SamplingResults]): Assign results of - all images in a batch after sampling. - gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, - each tensor has shape (num_gt, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - gt_labels (list[Tensor]): Gt_labels of all images in a batch, - each tensor has shape (num_gt,). - rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN. - concat (bool): Whether to concatenate the results of all - the images in a single batch. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following list of Tensors: - - - labels (list[Tensor],Tensor): Gt_labels for all - proposals in a batch, each tensor in list has - shape (num_proposals,) when `concat=False`, otherwise just - a single tensor has shape (num_all_proposals,). - - label_weights (list[Tensor]): Labels_weights for - all proposals in a batch, each tensor in list has shape - (num_proposals,) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals,). - - bbox_targets (list[Tensor],Tensor): Regression target - for all proposals in a batch, each tensor in list has - shape (num_proposals, 4) when `concat=False`, otherwise - just a single tensor has shape (num_all_proposals, 4), - the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights (list[tensor],Tensor): Regression weights for - all proposals in a batch, each tensor in list has shape - (num_proposals, 4) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals, 4). - """ - pos_inds_list = [res.pos_inds for res in sampling_results] - neg_inds_list = [res.neg_inds for res in sampling_results] - pos_bboxes_list = [res.pos_bboxes for res in sampling_results] - neg_bboxes_list = [res.neg_bboxes for res in sampling_results] - pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] - pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] - labels, label_weights, bbox_targets, bbox_weights = multi_apply( - self._get_target_single, - pos_inds_list, - neg_inds_list, - pos_bboxes_list, - neg_bboxes_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - cfg=rcnn_train_cfg) - if concat: - labels = torch.cat(labels, 0) - label_weights = torch.cat(label_weights, 0) - bbox_targets = torch.cat(bbox_targets, 0) - bbox_weights = torch.cat(bbox_weights, 0) - return labels, label_weights, bbox_targets, bbox_weights diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py deleted file mode 100644 index 2a38d591f8c8c44a93985762a8d7c7389f448ec1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, ModuleList - -from mmdet.models.backbones.resnet import Bottleneck -from mmdet.models.builder import HEADS -from .bbox_head import BBoxHead - - -class BasicResBlock(BaseModule): - """Basic residual block. - - This block is a little different from the block in the ResNet backbone. - The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. - - Args: - in_channels (int): Channels of the input feature map. - out_channels (int): Channels of the output feature map. - conv_cfg (dict): The config dict for convolution layers. - norm_cfg (dict): The config dict for normalization layers. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - in_channels, - out_channels, - conv_cfg=None, - norm_cfg=dict(type='BN'), - init_cfg=None): - super(BasicResBlock, self).__init__(init_cfg) - - # main path - self.conv1 = ConvModule( - in_channels, - in_channels, - kernel_size=3, - padding=1, - bias=False, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg) - self.conv2 = ConvModule( - in_channels, - out_channels, - kernel_size=1, - bias=False, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - # identity path - self.conv_identity = ConvModule( - in_channels, - out_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - identity = x - - x = self.conv1(x) - x = self.conv2(x) - - identity = self.conv_identity(identity) - out = x + identity - - out = self.relu(out) - return out - - -@HEADS.register_module() -class DoubleConvFCBBoxHead(BBoxHead): - r"""Bbox head used in Double-Head R-CNN - - .. code-block:: none - - /-> cls - /-> shared convs -> - \-> reg - roi features - /-> cls - \-> shared fc -> - \-> reg - """ # noqa: W605 - - def __init__(self, - num_convs=0, - num_fcs=0, - conv_out_channels=1024, - fc_out_channels=1024, - conv_cfg=None, - norm_cfg=dict(type='BN'), - init_cfg=dict( - type='Normal', - override=[ - dict(type='Normal', name='fc_cls', std=0.01), - dict(type='Normal', name='fc_reg', std=0.001), - dict( - type='Xavier', - name='fc_branch', - distribution='uniform') - ]), - **kwargs): - kwargs.setdefault('with_avg_pool', True) - super(DoubleConvFCBBoxHead, self).__init__(init_cfg=init_cfg, **kwargs) - assert self.with_avg_pool - assert num_convs > 0 - assert num_fcs > 0 - self.num_convs = num_convs - self.num_fcs = num_fcs - self.conv_out_channels = conv_out_channels - self.fc_out_channels = fc_out_channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - # increase the channel of input features - self.res_block = BasicResBlock(self.in_channels, - self.conv_out_channels) - - # add conv heads - self.conv_branch = self._add_conv_branch() - # add fc heads - self.fc_branch = self._add_fc_branch() - - out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes - self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) - - self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1) - self.relu = nn.ReLU(inplace=True) - - def _add_conv_branch(self): - """Add the fc branch which consists of a sequential of conv layers.""" - branch_convs = ModuleList() - for i in range(self.num_convs): - branch_convs.append( - Bottleneck( - inplanes=self.conv_out_channels, - planes=self.conv_out_channels // 4, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - return branch_convs - - def _add_fc_branch(self): - """Add the fc branch which consists of a sequential of fc layers.""" - branch_fcs = ModuleList() - for i in range(self.num_fcs): - fc_in_channels = ( - self.in_channels * - self.roi_feat_area if i == 0 else self.fc_out_channels) - branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) - return branch_fcs - - def forward(self, x_cls, x_reg): - # conv head - x_conv = self.res_block(x_reg) - - for conv in self.conv_branch: - x_conv = conv(x_conv) - - if self.with_avg_pool: - x_conv = self.avg_pool(x_conv) - - x_conv = x_conv.view(x_conv.size(0), -1) - bbox_pred = self.fc_reg(x_conv) - - # fc head - x_fc = x_cls.view(x_cls.size(0), -1) - for fc in self.fc_branch: - x_fc = self.relu(fc(x_fc)) - - cls_score = self.fc_cls(x_fc) - - return cls_score, bbox_pred diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/sabl_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/sabl_head.py deleted file mode 100644 index 0ce986b9a29ed2264e48ac4df89b407dfc66eeca..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/sabl_head.py +++ /dev/null @@ -1,596 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, force_fp32 - -from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms -from mmdet.models.builder import HEADS, build_loss -from mmdet.models.losses import accuracy - - -@HEADS.register_module() -class SABLHead(BaseModule): - """Side-Aware Boundary Localization (SABL) for RoI-Head. - - Side-Aware features are extracted by conv layers - with an attention mechanism. - Boundary Localization with Bucketing and Bucketing Guided Rescoring - are implemented in BucketingBBoxCoder. - - Please refer to https://arxiv.org/abs/1912.04260 for more details. - - Args: - cls_in_channels (int): Input channels of cls RoI feature. \ - Defaults to 256. - reg_in_channels (int): Input channels of reg RoI feature. \ - Defaults to 256. - roi_feat_size (int): Size of RoI features. Defaults to 7. - reg_feat_up_ratio (int): Upsample ratio of reg features. \ - Defaults to 2. - reg_pre_kernel (int): Kernel of 2D conv layers before \ - attention pooling. Defaults to 3. - reg_post_kernel (int): Kernel of 1D conv layers after \ - attention pooling. Defaults to 3. - reg_pre_num (int): Number of pre convs. Defaults to 2. - reg_post_num (int): Number of post convs. Defaults to 1. - num_classes (int): Number of classes in dataset. Defaults to 80. - cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024. - reg_offset_out_channels (int): Hidden and output channel \ - of reg offset branch. Defaults to 256. - reg_cls_out_channels (int): Hidden and output channel \ - of reg cls branch. Defaults to 256. - num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1. - num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0. - reg_class_agnostic (bool): Class agnostic regression or not. \ - Defaults to True. - norm_cfg (dict): Config of norm layers. Defaults to None. - bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'. - loss_cls (dict): Config of classification loss. - loss_bbox_cls (dict): Config of classification loss for bbox branch. - loss_bbox_reg (dict): Config of regression loss for bbox branch. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - num_classes, - cls_in_channels=256, - reg_in_channels=256, - roi_feat_size=7, - reg_feat_up_ratio=2, - reg_pre_kernel=3, - reg_post_kernel=3, - reg_pre_num=2, - reg_post_num=1, - cls_out_channels=1024, - reg_offset_out_channels=256, - reg_cls_out_channels=256, - num_cls_fcs=1, - num_reg_fcs=0, - reg_class_agnostic=True, - norm_cfg=None, - bbox_coder=dict( - type='BucketingBBoxCoder', - num_buckets=14, - scale_factor=1.7), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_bbox_reg=dict( - type='SmoothL1Loss', beta=0.1, loss_weight=1.0), - init_cfg=None): - super(SABLHead, self).__init__(init_cfg) - self.cls_in_channels = cls_in_channels - self.reg_in_channels = reg_in_channels - self.roi_feat_size = roi_feat_size - self.reg_feat_up_ratio = int(reg_feat_up_ratio) - self.num_buckets = bbox_coder['num_buckets'] - assert self.reg_feat_up_ratio // 2 >= 1 - self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio - assert self.up_reg_feat_size == bbox_coder['num_buckets'] - self.reg_pre_kernel = reg_pre_kernel - self.reg_post_kernel = reg_post_kernel - self.reg_pre_num = reg_pre_num - self.reg_post_num = reg_post_num - self.num_classes = num_classes - self.cls_out_channels = cls_out_channels - self.reg_offset_out_channels = reg_offset_out_channels - self.reg_cls_out_channels = reg_cls_out_channels - self.num_cls_fcs = num_cls_fcs - self.num_reg_fcs = num_reg_fcs - self.reg_class_agnostic = reg_class_agnostic - assert self.reg_class_agnostic - self.norm_cfg = norm_cfg - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.loss_cls = build_loss(loss_cls) - self.loss_bbox_cls = build_loss(loss_bbox_cls) - self.loss_bbox_reg = build_loss(loss_bbox_reg) - - self.cls_fcs = self._add_fc_branch(self.num_cls_fcs, - self.cls_in_channels, - self.roi_feat_size, - self.cls_out_channels) - - self.side_num = int(np.ceil(self.num_buckets / 2)) - - if self.reg_feat_up_ratio > 1: - self.upsample_x = nn.ConvTranspose1d( - reg_in_channels, - reg_in_channels, - self.reg_feat_up_ratio, - stride=self.reg_feat_up_ratio) - self.upsample_y = nn.ConvTranspose1d( - reg_in_channels, - reg_in_channels, - self.reg_feat_up_ratio, - stride=self.reg_feat_up_ratio) - - self.reg_pre_convs = nn.ModuleList() - for i in range(self.reg_pre_num): - reg_pre_conv = ConvModule( - reg_in_channels, - reg_in_channels, - kernel_size=reg_pre_kernel, - padding=reg_pre_kernel // 2, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')) - self.reg_pre_convs.append(reg_pre_conv) - - self.reg_post_conv_xs = nn.ModuleList() - for i in range(self.reg_post_num): - reg_post_conv_x = ConvModule( - reg_in_channels, - reg_in_channels, - kernel_size=(1, reg_post_kernel), - padding=(0, reg_post_kernel // 2), - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')) - self.reg_post_conv_xs.append(reg_post_conv_x) - self.reg_post_conv_ys = nn.ModuleList() - for i in range(self.reg_post_num): - reg_post_conv_y = ConvModule( - reg_in_channels, - reg_in_channels, - kernel_size=(reg_post_kernel, 1), - padding=(reg_post_kernel // 2, 0), - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU')) - self.reg_post_conv_ys.append(reg_post_conv_y) - - self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1) - self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1) - - self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1) - self.relu = nn.ReLU(inplace=True) - - self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs, - self.reg_in_channels, 1, - self.reg_cls_out_channels) - self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs, - self.reg_in_channels, 1, - self.reg_offset_out_channels) - self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1) - self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1) - - if init_cfg is None: - self.init_cfg = [ - dict( - type='Xavier', - layer='Linear', - distribution='uniform', - override=[ - dict(type='Normal', name='reg_conv_att_x', std=0.01), - dict(type='Normal', name='reg_conv_att_y', std=0.01), - dict(type='Normal', name='fc_reg_cls', std=0.01), - dict(type='Normal', name='fc_cls', std=0.01), - dict(type='Normal', name='fc_reg_offset', std=0.001) - ]) - ] - if self.reg_feat_up_ratio > 1: - self.init_cfg += [ - dict( - type='Kaiming', - distribution='normal', - override=[ - dict(name='upsample_x'), - dict(name='upsample_y') - ]) - ] - - @property - def custom_cls_channels(self): - return getattr(self.loss_cls, 'custom_cls_channels', False) - - @property - def custom_activation(self): - return getattr(self.loss_cls, 'custom_activation', False) - - @property - def custom_accuracy(self): - return getattr(self.loss_cls, 'custom_accuracy', False) - - def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size, - fc_out_channels): - in_channels = in_channels * roi_feat_size * roi_feat_size - branch_fcs = nn.ModuleList() - for i in range(num_branch_fcs): - fc_in_channels = (in_channels if i == 0 else fc_out_channels) - branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels)) - return branch_fcs - - def cls_forward(self, cls_x): - cls_x = cls_x.view(cls_x.size(0), -1) - for fc in self.cls_fcs: - cls_x = self.relu(fc(cls_x)) - cls_score = self.fc_cls(cls_x) - return cls_score - - def attention_pool(self, reg_x): - """Extract direction-specific features fx and fy with attention - methanism.""" - reg_fx = reg_x - reg_fy = reg_x - reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid() - reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid() - reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2) - reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3) - reg_fx = (reg_fx * reg_fx_att).sum(dim=2) - reg_fy = (reg_fy * reg_fy_att).sum(dim=3) - return reg_fx, reg_fy - - def side_aware_feature_extractor(self, reg_x): - """Refine and extract side-aware features without split them.""" - for reg_pre_conv in self.reg_pre_convs: - reg_x = reg_pre_conv(reg_x) - reg_fx, reg_fy = self.attention_pool(reg_x) - - if self.reg_post_num > 0: - reg_fx = reg_fx.unsqueeze(2) - reg_fy = reg_fy.unsqueeze(3) - for i in range(self.reg_post_num): - reg_fx = self.reg_post_conv_xs[i](reg_fx) - reg_fy = self.reg_post_conv_ys[i](reg_fy) - reg_fx = reg_fx.squeeze(2) - reg_fy = reg_fy.squeeze(3) - if self.reg_feat_up_ratio > 1: - reg_fx = self.relu(self.upsample_x(reg_fx)) - reg_fy = self.relu(self.upsample_y(reg_fy)) - reg_fx = torch.transpose(reg_fx, 1, 2) - reg_fy = torch.transpose(reg_fy, 1, 2) - return reg_fx.contiguous(), reg_fy.contiguous() - - def reg_pred(self, x, offset_fcs, cls_fcs): - """Predict bucketing estimation (cls_pred) and fine regression (offset - pred) with side-aware features.""" - x_offset = x.view(-1, self.reg_in_channels) - x_cls = x.view(-1, self.reg_in_channels) - - for fc in offset_fcs: - x_offset = self.relu(fc(x_offset)) - for fc in cls_fcs: - x_cls = self.relu(fc(x_cls)) - offset_pred = self.fc_reg_offset(x_offset) - cls_pred = self.fc_reg_cls(x_cls) - - offset_pred = offset_pred.view(x.size(0), -1) - cls_pred = cls_pred.view(x.size(0), -1) - - return offset_pred, cls_pred - - def side_aware_split(self, feat): - """Split side-aware features aligned with orders of bucketing - targets.""" - l_end = int(np.ceil(self.up_reg_feat_size / 2)) - r_start = int(np.floor(self.up_reg_feat_size / 2)) - feat_fl = feat[:, :l_end] - feat_fr = feat[:, r_start:].flip(dims=(1, )) - feat_fl = feat_fl.contiguous() - feat_fr = feat_fr.contiguous() - feat = torch.cat([feat_fl, feat_fr], dim=-1) - return feat - - def bbox_pred_split(self, bbox_pred, num_proposals_per_img): - """Split batch bbox prediction back to each image.""" - bucket_cls_preds, bucket_offset_preds = bbox_pred - bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0) - bucket_offset_preds = bucket_offset_preds.split( - num_proposals_per_img, 0) - bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds)) - return bbox_pred - - def reg_forward(self, reg_x): - outs = self.side_aware_feature_extractor(reg_x) - edge_offset_preds = [] - edge_cls_preds = [] - reg_fx = outs[0] - reg_fy = outs[1] - offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs, - self.reg_cls_fcs) - offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs, - self.reg_cls_fcs) - offset_pred_x = self.side_aware_split(offset_pred_x) - offset_pred_y = self.side_aware_split(offset_pred_y) - cls_pred_x = self.side_aware_split(cls_pred_x) - cls_pred_y = self.side_aware_split(cls_pred_y) - edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1) - edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1) - - return (edge_cls_preds, edge_offset_preds) - - def forward(self, x): - - bbox_pred = self.reg_forward(x) - cls_score = self.cls_forward(x) - - return cls_score, bbox_pred - - def get_targets(self, sampling_results, gt_bboxes, gt_labels, - rcnn_train_cfg): - pos_proposals = [res.pos_bboxes for res in sampling_results] - neg_proposals = [res.neg_bboxes for res in sampling_results] - pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] - pos_gt_labels = [res.pos_gt_labels for res in sampling_results] - cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals, - pos_gt_bboxes, pos_gt_labels, - rcnn_train_cfg) - (labels, label_weights, bucket_cls_targets, bucket_cls_weights, - bucket_offset_targets, bucket_offset_weights) = cls_reg_targets - return (labels, label_weights, (bucket_cls_targets, - bucket_offset_targets), - (bucket_cls_weights, bucket_offset_weights)) - - def bucket_target(self, - pos_proposals_list, - neg_proposals_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - rcnn_train_cfg, - concat=True): - (labels, label_weights, bucket_cls_targets, bucket_cls_weights, - bucket_offset_targets, bucket_offset_weights) = multi_apply( - self._bucket_target_single, - pos_proposals_list, - neg_proposals_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - cfg=rcnn_train_cfg) - - if concat: - labels = torch.cat(labels, 0) - label_weights = torch.cat(label_weights, 0) - bucket_cls_targets = torch.cat(bucket_cls_targets, 0) - bucket_cls_weights = torch.cat(bucket_cls_weights, 0) - bucket_offset_targets = torch.cat(bucket_offset_targets, 0) - bucket_offset_weights = torch.cat(bucket_offset_weights, 0) - return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, - bucket_offset_targets, bucket_offset_weights) - - def _bucket_target_single(self, pos_proposals, neg_proposals, - pos_gt_bboxes, pos_gt_labels, cfg): - """Compute bucketing estimation targets and fine regression targets for - a single image. - - Args: - pos_proposals (Tensor): positive proposals of a single image, - Shape (n_pos, 4) - neg_proposals (Tensor): negative proposals of a single image, - Shape (n_neg, 4). - pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals - of a single image, Shape (n_pos, 4). - pos_gt_labels (Tensor): gt labels assigned to positive proposals - of a single image, Shape (n_pos, ). - cfg (dict): Config of calculating targets - - Returns: - tuple: - - - labels (Tensor): Labels in a single image. \ - Shape (n,). - - label_weights (Tensor): Label weights in a single image.\ - Shape (n,) - - bucket_cls_targets (Tensor): Bucket cls targets in \ - a single image. Shape (n, num_buckets*2). - - bucket_cls_weights (Tensor): Bucket cls weights in \ - a single image. Shape (n, num_buckets*2). - - bucket_offset_targets (Tensor): Bucket offset targets \ - in a single image. Shape (n, num_buckets*2). - - bucket_offset_targets (Tensor): Bucket offset weights \ - in a single image. Shape (n, num_buckets*2). - """ - num_pos = pos_proposals.size(0) - num_neg = neg_proposals.size(0) - num_samples = num_pos + num_neg - labels = pos_gt_bboxes.new_full((num_samples, ), - self.num_classes, - dtype=torch.long) - label_weights = pos_proposals.new_zeros(num_samples) - bucket_cls_targets = pos_proposals.new_zeros(num_samples, - 4 * self.side_num) - bucket_cls_weights = pos_proposals.new_zeros(num_samples, - 4 * self.side_num) - bucket_offset_targets = pos_proposals.new_zeros( - num_samples, 4 * self.side_num) - bucket_offset_weights = pos_proposals.new_zeros( - num_samples, 4 * self.side_num) - if num_pos > 0: - labels[:num_pos] = pos_gt_labels - label_weights[:num_pos] = 1.0 - (pos_bucket_offset_targets, pos_bucket_offset_weights, - pos_bucket_cls_targets, - pos_bucket_cls_weights) = self.bbox_coder.encode( - pos_proposals, pos_gt_bboxes) - bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets - bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights - bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets - bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights - if num_neg > 0: - label_weights[-num_neg:] = 1.0 - return (labels, label_weights, bucket_cls_targets, bucket_cls_weights, - bucket_offset_targets, bucket_offset_weights) - - def loss(self, - cls_score, - bbox_pred, - rois, - labels, - label_weights, - bbox_targets, - bbox_weights, - reduction_override=None): - losses = dict() - if cls_score is not None: - avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) - losses['loss_cls'] = self.loss_cls( - cls_score, - labels, - label_weights, - avg_factor=avg_factor, - reduction_override=reduction_override) - losses['acc'] = accuracy(cls_score, labels) - - if bbox_pred is not None: - bucket_cls_preds, bucket_offset_preds = bbox_pred - bucket_cls_targets, bucket_offset_targets = bbox_targets - bucket_cls_weights, bucket_offset_weights = bbox_weights - # edge cls - bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num) - bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num) - bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num) - losses['loss_bbox_cls'] = self.loss_bbox_cls( - bucket_cls_preds, - bucket_cls_targets, - bucket_cls_weights, - avg_factor=bucket_cls_targets.size(0), - reduction_override=reduction_override) - - losses['loss_bbox_reg'] = self.loss_bbox_reg( - bucket_offset_preds, - bucket_offset_targets, - bucket_offset_weights, - avg_factor=bucket_offset_targets.size(0), - reduction_override=reduction_override) - - return losses - - @force_fp32(apply_to=('cls_score', 'bbox_pred')) - def get_bboxes(self, - rois, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=False, - cfg=None): - if isinstance(cls_score, list): - cls_score = sum(cls_score) / float(len(cls_score)) - scores = F.softmax(cls_score, dim=1) if cls_score is not None else None - - if bbox_pred is not None: - bboxes, confidences = self.bbox_coder.decode( - rois[:, 1:], bbox_pred, img_shape) - else: - bboxes = rois[:, 1:].clone() - confidences = None - if img_shape is not None: - bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) - bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) - - if rescale and bboxes.size(0) > 0: - if isinstance(scale_factor, float): - bboxes /= scale_factor - else: - bboxes /= torch.from_numpy(scale_factor).to(bboxes.device) - - if cfg is None: - return bboxes, scores - else: - det_bboxes, det_labels = multiclass_nms( - bboxes, - scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=confidences) - - return det_bboxes, det_labels - - @force_fp32(apply_to=('bbox_preds', )) - def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): - """Refine bboxes during training. - - Args: - rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, - and bs is the sampled RoIs per image. - labels (Tensor): Shape (n*bs, ). - bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \ - (n*bs, num_buckets*2)]. - pos_is_gts (list[Tensor]): Flags indicating if each positive bbox - is a gt bbox. - img_metas (list[dict]): Meta info of each image. - - Returns: - list[Tensor]: Refined bboxes of each image in a mini-batch. - """ - img_ids = rois[:, 0].long().unique(sorted=True) - assert img_ids.numel() == len(img_metas) - - bboxes_list = [] - for i in range(len(img_metas)): - inds = torch.nonzero( - rois[:, 0] == i, as_tuple=False).squeeze(dim=1) - num_rois = inds.numel() - - bboxes_ = rois[inds, 1:] - label_ = labels[inds] - edge_cls_preds, edge_offset_preds = bbox_preds - edge_cls_preds_ = edge_cls_preds[inds] - edge_offset_preds_ = edge_offset_preds[inds] - bbox_pred_ = [edge_cls_preds_, edge_offset_preds_] - img_meta_ = img_metas[i] - pos_is_gts_ = pos_is_gts[i] - - bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, - img_meta_) - # filter gt bboxes - pos_keep = 1 - pos_is_gts_ - keep_inds = pos_is_gts_.new_ones(num_rois) - keep_inds[:len(pos_is_gts_)] = pos_keep - - bboxes_list.append(bboxes[keep_inds.type(torch.bool)]) - - return bboxes_list - - @force_fp32(apply_to=('bbox_pred', )) - def regress_by_class(self, rois, label, bbox_pred, img_meta): - """Regress the bbox for the predicted class. Used in Cascade R-CNN. - - Args: - rois (Tensor): shape (n, 4) or (n, 5) - label (Tensor): shape (n, ) - bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \ - (n, num_buckets *2)] - img_meta (dict): Image meta info. - - Returns: - Tensor: Regressed bboxes, the same shape as input rois. - """ - assert rois.size(1) == 4 or rois.size(1) == 5 - - if rois.size(1) == 4: - new_rois, _ = self.bbox_coder.decode(rois, bbox_pred, - img_meta['img_shape']) - else: - bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred, - img_meta['img_shape']) - new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) - - return new_rois diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py deleted file mode 100644 index cf39ebef2fa26f69bb56e6d08384991975ad1cc2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmdet.models.builder import HEADS -from .convfc_bbox_head import ConvFCBBoxHead - - -@HEADS.register_module() -class SCNetBBoxHead(ConvFCBBoxHead): - """BBox head for `SCNet `_. - - This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us - to get intermediate shared feature. - """ - - def _forward_shared(self, x): - """Forward function for shared part.""" - if self.num_shared_convs > 0: - for conv in self.shared_convs: - x = conv(x) - - if self.num_shared_fcs > 0: - if self.with_avg_pool: - x = self.avg_pool(x) - - x = x.flatten(1) - - for fc in self.shared_fcs: - x = self.relu(fc(x)) - - return x - - def _forward_cls_reg(self, x): - """Forward function for classification and regression parts.""" - x_cls = x - x_reg = x - - for conv in self.cls_convs: - x_cls = conv(x_cls) - if x_cls.dim() > 2: - if self.with_avg_pool: - x_cls = self.avg_pool(x_cls) - x_cls = x_cls.flatten(1) - for fc in self.cls_fcs: - x_cls = self.relu(fc(x_cls)) - - for conv in self.reg_convs: - x_reg = conv(x_reg) - if x_reg.dim() > 2: - if self.with_avg_pool: - x_reg = self.avg_pool(x_reg) - x_reg = x_reg.flatten(1) - for fc in self.reg_fcs: - x_reg = self.relu(fc(x_reg)) - - cls_score = self.fc_cls(x_cls) if self.with_cls else None - bbox_pred = self.fc_reg(x_reg) if self.with_reg else None - - return cls_score, bbox_pred - - def forward(self, x, return_shared_feat=False): - """Forward function. - - Args: - x (Tensor): input features - return_shared_feat (bool): If True, return cls-reg-shared feature. - - Return: - out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``, - if ``return_shared_feat`` is True, append ``x_shared`` to the - returned tuple. - """ - x_shared = self._forward_shared(x) - out = self._forward_cls_reg(x_shared) - - if return_shared_feat: - out += (x_shared, ) - - return out diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/cascade_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/cascade_roi_head.py deleted file mode 100644 index e17313f20724263864cb8cf068e889ed71822b59..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/cascade_roi_head.py +++ /dev/null @@ -1,631 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn as nn -from mmcv.runner import ModuleList - -from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner, - build_sampler, merge_aug_bboxes, merge_aug_masks, - multiclass_nms) -from ..builder import HEADS, build_head, build_roi_extractor -from .base_roi_head import BaseRoIHead -from .test_mixins import BBoxTestMixin, MaskTestMixin - - -@HEADS.register_module() -class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): - """Cascade roi head including one bbox head and one mask head. - - https://arxiv.org/abs/1712.00726 - """ - - def __init__(self, - num_stages, - stage_loss_weights, - bbox_roi_extractor=None, - bbox_head=None, - mask_roi_extractor=None, - mask_head=None, - shared_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - assert bbox_roi_extractor is not None - assert bbox_head is not None - assert shared_head is None, \ - 'Shared head is not supported in Cascade RCNN anymore' - - self.num_stages = num_stages - self.stage_loss_weights = stage_loss_weights - super(CascadeRoIHead, self).__init__( - bbox_roi_extractor=bbox_roi_extractor, - bbox_head=bbox_head, - mask_roi_extractor=mask_roi_extractor, - mask_head=mask_head, - shared_head=shared_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) - - def init_bbox_head(self, bbox_roi_extractor, bbox_head): - """Initialize box head and box roi extractor. - - Args: - bbox_roi_extractor (dict): Config of box roi extractor. - bbox_head (dict): Config of box in box head. - """ - self.bbox_roi_extractor = ModuleList() - self.bbox_head = ModuleList() - if not isinstance(bbox_roi_extractor, list): - bbox_roi_extractor = [ - bbox_roi_extractor for _ in range(self.num_stages) - ] - if not isinstance(bbox_head, list): - bbox_head = [bbox_head for _ in range(self.num_stages)] - assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages - for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): - self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor)) - self.bbox_head.append(build_head(head)) - - def init_mask_head(self, mask_roi_extractor, mask_head): - """Initialize mask head and mask roi extractor. - - Args: - mask_roi_extractor (dict): Config of mask roi extractor. - mask_head (dict): Config of mask in mask head. - """ - self.mask_head = nn.ModuleList() - if not isinstance(mask_head, list): - mask_head = [mask_head for _ in range(self.num_stages)] - assert len(mask_head) == self.num_stages - for head in mask_head: - self.mask_head.append(build_head(head)) - if mask_roi_extractor is not None: - self.share_roi_extractor = False - self.mask_roi_extractor = ModuleList() - if not isinstance(mask_roi_extractor, list): - mask_roi_extractor = [ - mask_roi_extractor for _ in range(self.num_stages) - ] - assert len(mask_roi_extractor) == self.num_stages - for roi_extractor in mask_roi_extractor: - self.mask_roi_extractor.append( - build_roi_extractor(roi_extractor)) - else: - self.share_roi_extractor = True - self.mask_roi_extractor = self.bbox_roi_extractor - - def init_assigner_sampler(self): - """Initialize assigner and sampler for each stage.""" - self.bbox_assigner = [] - self.bbox_sampler = [] - if self.train_cfg is not None: - for idx, rcnn_train_cfg in enumerate(self.train_cfg): - self.bbox_assigner.append( - build_assigner(rcnn_train_cfg.assigner)) - self.current_stage = idx - self.bbox_sampler.append( - build_sampler(rcnn_train_cfg.sampler, context=self)) - - def forward_dummy(self, x, proposals): - """Dummy forward function.""" - # bbox head - outs = () - rois = bbox2roi([proposals]) - if self.with_bbox: - for i in range(self.num_stages): - bbox_results = self._bbox_forward(i, x, rois) - outs = outs + (bbox_results['cls_score'], - bbox_results['bbox_pred']) - # mask heads - if self.with_mask: - mask_rois = rois[:100] - for i in range(self.num_stages): - mask_results = self._mask_forward(i, x, mask_rois) - outs = outs + (mask_results['mask_pred'], ) - return outs - - def _bbox_forward(self, stage, x, rois): - """Box head forward function used in both training and testing.""" - bbox_roi_extractor = self.bbox_roi_extractor[stage] - bbox_head = self.bbox_head[stage] - bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], - rois) - # do not support caffe_c4 model anymore - cls_score, bbox_pred = bbox_head(bbox_feats) - - bbox_results = dict( - cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) - return bbox_results - - def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, - gt_labels, rcnn_train_cfg): - """Run forward function and calculate loss for box head in training.""" - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward(stage, x, rois) - bbox_targets = self.bbox_head[stage].get_targets( - sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) - loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update( - loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) - return bbox_results - - def _mask_forward(self, stage, x, rois): - """Mask head forward function used in both training and testing.""" - mask_roi_extractor = self.mask_roi_extractor[stage] - mask_head = self.mask_head[stage] - mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], - rois) - # do not support caffe_c4 model anymore - mask_pred = mask_head(mask_feats) - - mask_results = dict(mask_pred=mask_pred) - return mask_results - - def _mask_forward_train(self, - stage, - x, - sampling_results, - gt_masks, - rcnn_train_cfg, - bbox_feats=None): - """Run forward function and calculate loss for mask head in - training.""" - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - mask_results = self._mask_forward(stage, x, pos_rois) - - mask_targets = self.mask_head[stage].get_targets( - sampling_results, gt_masks, rcnn_train_cfg) - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], - mask_targets, pos_labels) - - mask_results.update(loss_mask=loss_mask) - return mask_results - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None): - """ - Args: - x (list[Tensor]): list of multi-level img features. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - proposals (list[Tensors]): list of region proposals. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - losses = dict() - for i in range(self.num_stages): - self.current_stage = i - rcnn_train_cfg = self.train_cfg[i] - lw = self.stage_loss_weights[i] - - # assign gts and sample proposals - sampling_results = [] - if self.with_bbox or self.with_mask: - bbox_assigner = self.bbox_assigner[i] - bbox_sampler = self.bbox_sampler[i] - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - - for j in range(num_imgs): - assign_result = bbox_assigner.assign( - proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], - gt_labels[j]) - sampling_result = bbox_sampler.sample( - assign_result, - proposal_list[j], - gt_bboxes[j], - gt_labels[j], - feats=[lvl_feat[j][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - # bbox head forward and loss - bbox_results = self._bbox_forward_train(i, x, sampling_results, - gt_bboxes, gt_labels, - rcnn_train_cfg) - - for name, value in bbox_results['loss_bbox'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # mask head forward and loss - if self.with_mask: - mask_results = self._mask_forward_train( - i, x, sampling_results, gt_masks, rcnn_train_cfg, - bbox_results['bbox_feats']) - for name, value in mask_results['loss_mask'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # refine bboxes - if i < self.num_stages - 1: - pos_is_gts = [res.pos_is_gt for res in sampling_results] - # bbox_targets is a tuple - roi_labels = bbox_results['bbox_targets'][0] - with torch.no_grad(): - cls_score = bbox_results['cls_score'] - if self.bbox_head[i].custom_activation: - cls_score = self.bbox_head[i].loss_cls.get_activation( - cls_score) - - # Empty proposal. - if cls_score.numel() == 0: - break - - roi_labels = torch.where( - roi_labels == self.bbox_head[i].num_classes, - cls_score[:, :-1].argmax(1), roi_labels) - proposal_list = self.bbox_head[i].refine_bboxes( - bbox_results['rois'], roi_labels, - bbox_results['bbox_pred'], pos_is_gts, img_metas) - - return losses - - def simple_test(self, x, proposal_list, img_metas, rescale=False): - """Test without augmentation. - - Args: - x (tuple[Tensor]): Features from upstream network. Each - has shape (batch_size, c, h, w). - proposal_list (list(Tensor)): Proposals from rpn head. - Each has shape (num_proposals, 5), last dimension - 5 represent (x1, y1, x2, y2, score). - img_metas (list[dict]): Meta information of images. - rescale (bool): Whether to rescale the results to - the original image. Default: True. - - Returns: - list[list[np.ndarray]] or list[tuple]: When no mask branch, - it is bbox results of each image and classes with type - `list[list[np.ndarray]]`. The outer list - corresponds to each image. The inner list - corresponds to each class. When the model has mask branch, - it contains bbox results and mask results. - The outer list corresponds to each image, and first element - of tuple is bbox results, second element is mask results. - """ - assert self.with_bbox, 'Bbox head must be implemented.' - num_imgs = len(proposal_list) - img_shapes = tuple(meta['img_shape'] for meta in img_metas) - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - # "ms" in variable names means multi-stage - ms_bbox_result = {} - ms_segm_result = {} - ms_scores = [] - rcnn_test_cfg = self.test_cfg - - rois = bbox2roi(proposal_list) - - if rois.shape[0] == 0: - # There is no proposal in the whole batch - bbox_results = [[ - np.zeros((0, 5), dtype=np.float32) - for _ in range(self.bbox_head[-1].num_classes) - ]] * num_imgs - - if self.with_mask: - mask_classes = self.mask_head[-1].num_classes - segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - results = list(zip(bbox_results, segm_results)) - else: - results = bbox_results - - return results - - for i in range(self.num_stages): - bbox_results = self._bbox_forward(i, x, rois) - - # split batch bbox prediction back to each image - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - num_proposals_per_img = tuple( - len(proposals) for proposals in proposal_list) - rois = rois.split(num_proposals_per_img, 0) - cls_score = cls_score.split(num_proposals_per_img, 0) - if isinstance(bbox_pred, torch.Tensor): - bbox_pred = bbox_pred.split(num_proposals_per_img, 0) - else: - bbox_pred = self.bbox_head[i].bbox_pred_split( - bbox_pred, num_proposals_per_img) - ms_scores.append(cls_score) - - if i < self.num_stages - 1: - if self.bbox_head[i].custom_activation: - cls_score = [ - self.bbox_head[i].loss_cls.get_activation(s) - for s in cls_score - ] - refine_rois_list = [] - for j in range(num_imgs): - if rois[j].shape[0] > 0: - bbox_label = cls_score[j][:, :-1].argmax(dim=1) - refined_rois = self.bbox_head[i].regress_by_class( - rois[j], bbox_label, bbox_pred[j], img_metas[j]) - refine_rois_list.append(refined_rois) - rois = torch.cat(refine_rois_list) - - # average scores of each image by stages - cls_score = [ - sum([score[i] for score in ms_scores]) / float(len(ms_scores)) - for i in range(num_imgs) - ] - - # apply bbox post-processing to each image individually - det_bboxes = [] - det_labels = [] - for i in range(num_imgs): - det_bbox, det_label = self.bbox_head[-1].get_bboxes( - rois[i], - cls_score[i], - bbox_pred[i], - img_shapes[i], - scale_factors[i], - rescale=rescale, - cfg=rcnn_test_cfg) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - - bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head[-1].num_classes) - for i in range(num_imgs) - ] - ms_bbox_result['ensemble'] = bbox_results - - if self.with_mask: - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - mask_classes = self.mask_head[-1].num_classes - segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - else: - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i][:, :4] - for i in range(len(det_bboxes)) - ] - mask_rois = bbox2roi(_bboxes) - num_mask_rois_per_img = tuple( - _bbox.size(0) for _bbox in _bboxes) - aug_masks = [] - for i in range(self.num_stages): - mask_results = self._mask_forward(i, x, mask_rois) - mask_pred = mask_results['mask_pred'] - # split batch mask prediction back to each image - mask_pred = mask_pred.split(num_mask_rois_per_img, 0) - aug_masks.append([ - m.sigmoid().cpu().detach().numpy() for m in mask_pred - ]) - - # apply mask post-processing to each image individually - segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append( - [[] - for _ in range(self.mask_head[-1].num_classes)]) - else: - aug_mask = [mask[i] for mask in aug_masks] - merged_masks = merge_aug_masks( - aug_mask, [[img_metas[i]]] * self.num_stages, - rcnn_test_cfg) - segm_result = self.mask_head[-1].get_seg_masks( - merged_masks, _bboxes[i], det_labels[i], - rcnn_test_cfg, ori_shapes[i], scale_factors[i], - rescale) - segm_results.append(segm_result) - ms_segm_result['ensemble'] = segm_results - - if self.with_mask: - results = list( - zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) - else: - results = ms_bbox_result['ensemble'] - - return results - - def aug_test(self, features, proposal_list, img_metas, rescale=False): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ - rcnn_test_cfg = self.test_cfg - aug_bboxes = [] - aug_scores = [] - for x, img_meta in zip(features, img_metas): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip, flip_direction) - # "ms" in variable names means multi-stage - ms_scores = [] - - rois = bbox2roi([proposals]) - - if rois.shape[0] == 0: - # There is no proposal in the single image - aug_bboxes.append(rois.new_zeros(0, 4)) - aug_scores.append(rois.new_zeros(0, 1)) - continue - - for i in range(self.num_stages): - bbox_results = self._bbox_forward(i, x, rois) - ms_scores.append(bbox_results['cls_score']) - - if i < self.num_stages - 1: - cls_score = bbox_results['cls_score'] - if self.bbox_head[i].custom_activation: - cls_score = self.bbox_head[i].loss_cls.get_activation( - cls_score) - bbox_label = cls_score[:, :-1].argmax(dim=1) - rois = self.bbox_head[i].regress_by_class( - rois, bbox_label, bbox_results['bbox_pred'], - img_meta[0]) - - cls_score = sum(ms_scores) / float(len(ms_scores)) - bboxes, scores = self.bbox_head[-1].get_bboxes( - rois, - cls_score, - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - - bbox_result = bbox2result(det_bboxes, det_labels, - self.bbox_head[-1].num_classes) - - if self.with_mask: - if det_bboxes.shape[0] == 0: - segm_result = [[] - for _ in range(self.mask_head[-1].num_classes)] - else: - aug_masks = [] - aug_img_metas = [] - for x, img_meta in zip(features, img_metas): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip, flip_direction) - mask_rois = bbox2roi([_bboxes]) - for i in range(self.num_stages): - mask_results = self._mask_forward(i, x, mask_rois) - aug_masks.append( - mask_results['mask_pred'].sigmoid().cpu().numpy()) - aug_img_metas.append(img_meta) - merged_masks = merge_aug_masks(aug_masks, aug_img_metas, - self.test_cfg) - - ori_shape = img_metas[0][0]['ori_shape'] - dummy_scale_factor = np.ones(4) - segm_result = self.mask_head[-1].get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - rcnn_test_cfg, - ori_shape, - scale_factor=dummy_scale_factor, - rescale=False) - return [(bbox_result, segm_result)] - else: - return [bbox_result] - - def onnx_export(self, x, proposals, img_metas): - - assert self.with_bbox, 'Bbox head must be implemented.' - assert proposals.shape[0] == 1, 'Only support one input image ' \ - 'while in exporting to ONNX' - # remove the scores - rois = proposals[..., :-1] - batch_size = rois.shape[0] - num_proposals_per_img = rois.shape[1] - # Eliminate the batch dimension - rois = rois.view(-1, 4) - - # add dummy batch index - rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1) - - max_shape = img_metas[0]['img_shape_for_onnx'] - ms_scores = [] - rcnn_test_cfg = self.test_cfg - - for i in range(self.num_stages): - bbox_results = self._bbox_forward(i, x, rois) - - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - # Recover the batch dimension - rois = rois.reshape(batch_size, num_proposals_per_img, - rois.size(-1)) - cls_score = cls_score.reshape(batch_size, num_proposals_per_img, - cls_score.size(-1)) - bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) - ms_scores.append(cls_score) - if i < self.num_stages - 1: - assert self.bbox_head[i].reg_class_agnostic - new_rois = self.bbox_head[i].bbox_coder.decode( - rois[..., 1:], bbox_pred, max_shape=max_shape) - rois = new_rois.reshape(-1, new_rois.shape[-1]) - # add dummy batch index - rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], - dim=-1) - - cls_score = sum(ms_scores) / float(len(ms_scores)) - bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4) - rois = rois.reshape(batch_size, num_proposals_per_img, -1) - det_bboxes, det_labels = self.bbox_head[-1].onnx_export( - rois, cls_score, bbox_pred, max_shape, cfg=rcnn_test_cfg) - - if not self.with_mask: - return det_bboxes, det_labels - else: - batch_index = torch.arange( - det_bboxes.size(0), - device=det_bboxes.device).float().view(-1, 1, 1).expand( - det_bboxes.size(0), det_bboxes.size(1), 1) - rois = det_bboxes[..., :4] - mask_rois = torch.cat([batch_index, rois], dim=-1) - mask_rois = mask_rois.view(-1, 5) - aug_masks = [] - for i in range(self.num_stages): - mask_results = self._mask_forward(i, x, mask_rois) - mask_pred = mask_results['mask_pred'] - aug_masks.append(mask_pred) - max_shape = img_metas[0]['img_shape_for_onnx'] - # calculate the mean of masks from several stage - mask_pred = sum(aug_masks) / len(aug_masks) - segm_results = self.mask_head[-1].onnx_export( - mask_pred, rois.reshape(-1, 4), det_labels.reshape(-1), - self.test_cfg, max_shape) - segm_results = segm_results.reshape(batch_size, - det_bboxes.shape[1], - max_shape[0], max_shape[1]) - return det_bboxes, det_labels, segm_results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/double_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/double_roi_head.py deleted file mode 100644 index 895b5d3067846e023f21482fb1628e9bdb0035fd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/double_roi_head.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import HEADS -from .standard_roi_head import StandardRoIHead - - -@HEADS.register_module() -class DoubleHeadRoIHead(StandardRoIHead): - """RoI head for Double Head RCNN. - - https://arxiv.org/abs/1904.06493 - """ - - def __init__(self, reg_roi_scale_factor, **kwargs): - super(DoubleHeadRoIHead, self).__init__(**kwargs) - self.reg_roi_scale_factor = reg_roi_scale_factor - - def _bbox_forward(self, x, rois): - """Box head forward function used in both training and testing time.""" - bbox_cls_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], rois) - bbox_reg_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], - rois, - roi_scale_factor=self.reg_roi_scale_factor) - if self.with_shared_head: - bbox_cls_feats = self.shared_head(bbox_cls_feats) - bbox_reg_feats = self.shared_head(bbox_reg_feats) - cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) - - bbox_results = dict( - cls_score=cls_score, - bbox_pred=bbox_pred, - bbox_feats=bbox_cls_feats) - return bbox_results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/dynamic_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/dynamic_roi_head.py deleted file mode 100644 index 4c2b6cdac1e38a00a810be03275f66e5257fd6fb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/dynamic_roi_head.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - -from mmdet.core import bbox2roi -from mmdet.models.losses import SmoothL1Loss -from ..builder import HEADS -from .standard_roi_head import StandardRoIHead - -EPS = 1e-15 - - -@HEADS.register_module() -class DynamicRoIHead(StandardRoIHead): - """RoI head for `Dynamic R-CNN `_.""" - - def __init__(self, **kwargs): - super(DynamicRoIHead, self).__init__(**kwargs) - assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss) - # the IoU history of the past `update_iter_interval` iterations - self.iou_history = [] - # the beta history of the past `update_iter_interval` iterations - self.beta_history = [] - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None): - """Forward function for training. - - Args: - x (list[Tensor]): list of multi-level img features. - - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - proposals (list[Tensors]): list of region proposals. - - gt_bboxes (list[Tensor]): each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - - gt_labels (list[Tensor]): class indices corresponding to each box - - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # assign gts and sample proposals - if self.with_bbox or self.with_mask: - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - sampling_results = [] - cur_iou = [] - for i in range(num_imgs): - assign_result = self.bbox_assigner.assign( - proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], - gt_labels[i]) - sampling_result = self.bbox_sampler.sample( - assign_result, - proposal_list[i], - gt_bboxes[i], - gt_labels[i], - feats=[lvl_feat[i][None] for lvl_feat in x]) - # record the `iou_topk`-th largest IoU in an image - iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk, - len(assign_result.max_overlaps)) - ious, _ = torch.topk(assign_result.max_overlaps, iou_topk) - cur_iou.append(ious[-1].item()) - sampling_results.append(sampling_result) - # average the current IoUs over images - cur_iou = np.mean(cur_iou) - self.iou_history.append(cur_iou) - - losses = dict() - # bbox head forward and loss - if self.with_bbox: - bbox_results = self._bbox_forward_train(x, sampling_results, - gt_bboxes, gt_labels, - img_metas) - losses.update(bbox_results['loss_bbox']) - - # mask head forward and loss - if self.with_mask: - mask_results = self._mask_forward_train(x, sampling_results, - bbox_results['bbox_feats'], - gt_masks, img_metas) - losses.update(mask_results['loss_mask']) - - # update IoU threshold and SmoothL1 beta - update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval - if len(self.iou_history) % update_iter_interval == 0: - new_iou_thr, new_beta = self.update_hyperparameters() - - return losses - - def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, - img_metas): - num_imgs = len(img_metas) - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward(x, rois) - - bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, self.train_cfg) - # record the `beta_topk`-th smallest target - # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets - # and bbox_weights, respectively - pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1) - num_pos = len(pos_inds) - cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1) - beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs, - num_pos) - cur_target = torch.kthvalue(cur_target, beta_topk)[0].item() - self.beta_history.append(cur_target) - loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update(loss_bbox=loss_bbox) - return bbox_results - - def update_hyperparameters(self): - """Update hyperparameters like IoU thresholds for assigner and beta for - SmoothL1 loss based on the training statistics. - - Returns: - tuple[float]: the updated ``iou_thr`` and ``beta``. - """ - new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou, - np.mean(self.iou_history)) - self.iou_history = [] - self.bbox_assigner.pos_iou_thr = new_iou_thr - self.bbox_assigner.neg_iou_thr = new_iou_thr - self.bbox_assigner.min_pos_iou = new_iou_thr - if (np.median(self.beta_history) < EPS): - # avoid 0 or too small value for new_beta - new_beta = self.bbox_head.loss_bbox.beta - else: - new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta, - np.median(self.beta_history)) - self.beta_history = [] - self.bbox_head.loss_bbox.beta = new_beta - return new_iou_thr, new_beta diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/grid_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/grid_roi_head.py deleted file mode 100644 index 333f62975c693fd00d2fa4605be7cef11aa404e1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/grid_roi_head.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - -from mmdet.core import bbox2result, bbox2roi -from ..builder import HEADS, build_head, build_roi_extractor -from .standard_roi_head import StandardRoIHead - - -@HEADS.register_module() -class GridRoIHead(StandardRoIHead): - """Grid roi head for Grid R-CNN. - - https://arxiv.org/abs/1811.12030 - """ - - def __init__(self, grid_roi_extractor, grid_head, **kwargs): - assert grid_head is not None - super(GridRoIHead, self).__init__(**kwargs) - if grid_roi_extractor is not None: - self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor) - self.share_roi_extractor = False - else: - self.share_roi_extractor = True - self.grid_roi_extractor = self.bbox_roi_extractor - self.grid_head = build_head(grid_head) - - def _random_jitter(self, sampling_results, img_metas, amplitude=0.15): - """Ramdom jitter positive proposals for training.""" - for sampling_result, img_meta in zip(sampling_results, img_metas): - bboxes = sampling_result.pos_bboxes - random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_( - -amplitude, amplitude) - # before jittering - cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2 - wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs() - # after jittering - new_cxcy = cxcy + wh * random_offsets[:, :2] - new_wh = wh * (1 + random_offsets[:, 2:]) - # xywh to xyxy - new_x1y1 = (new_cxcy - new_wh / 2) - new_x2y2 = (new_cxcy + new_wh / 2) - new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1) - # clip bboxes - max_shape = img_meta['img_shape'] - if max_shape is not None: - new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1) - new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1) - - sampling_result.pos_bboxes = new_bboxes - return sampling_results - - def forward_dummy(self, x, proposals): - """Dummy forward function.""" - # bbox head - outs = () - rois = bbox2roi([proposals]) - if self.with_bbox: - bbox_results = self._bbox_forward(x, rois) - outs = outs + (bbox_results['cls_score'], - bbox_results['bbox_pred']) - - # grid head - grid_rois = rois[:100] - grid_feats = self.grid_roi_extractor( - x[:self.grid_roi_extractor.num_inputs], grid_rois) - if self.with_shared_head: - grid_feats = self.shared_head(grid_feats) - grid_pred = self.grid_head(grid_feats) - outs = outs + (grid_pred, ) - - # mask head - if self.with_mask: - mask_rois = rois[:100] - mask_results = self._mask_forward(x, mask_rois) - outs = outs + (mask_results['mask_pred'], ) - return outs - - def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, - img_metas): - """Run forward function and calculate loss for box head in training.""" - bbox_results = super(GridRoIHead, - self)._bbox_forward_train(x, sampling_results, - gt_bboxes, gt_labels, - img_metas) - - # Grid head forward and loss - sampling_results = self._random_jitter(sampling_results, img_metas) - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - - # GN in head does not support zero shape input - if pos_rois.shape[0] == 0: - return bbox_results - - grid_feats = self.grid_roi_extractor( - x[:self.grid_roi_extractor.num_inputs], pos_rois) - if self.with_shared_head: - grid_feats = self.shared_head(grid_feats) - # Accelerate training - max_sample_num_grid = self.train_cfg.get('max_num_grid', 192) - sample_idx = torch.randperm( - grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid - )] - grid_feats = grid_feats[sample_idx] - - grid_pred = self.grid_head(grid_feats) - - grid_targets = self.grid_head.get_targets(sampling_results, - self.train_cfg) - grid_targets = grid_targets[sample_idx] - - loss_grid = self.grid_head.loss(grid_pred, grid_targets) - - bbox_results['loss_bbox'].update(loss_grid) - return bbox_results - - def simple_test(self, - x, - proposal_list, - img_metas, - proposals=None, - rescale=False): - """Test without augmentation.""" - assert self.with_bbox, 'Bbox head must be implemented.' - - det_bboxes, det_labels = self.simple_test_bboxes( - x, img_metas, proposal_list, self.test_cfg, rescale=False) - # pack rois into bboxes - grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes]) - if grid_rois.shape[0] != 0: - grid_feats = self.grid_roi_extractor( - x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) - self.grid_head.test_mode = True - grid_pred = self.grid_head(grid_feats) - # split batch grid head prediction back to each image - num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes) - grid_pred = { - k: v.split(num_roi_per_img, 0) - for k, v in grid_pred.items() - } - - # apply bbox post-processing to each image individually - bbox_results = [] - num_imgs = len(det_bboxes) - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - bbox_results.append([ - np.zeros((0, 5), dtype=np.float32) - for _ in range(self.bbox_head.num_classes) - ]) - else: - det_bbox = self.grid_head.get_bboxes( - det_bboxes[i], grid_pred['fused'][i], [img_metas[i]]) - if rescale: - det_bbox[:, :4] /= img_metas[i]['scale_factor'] - bbox_results.append( - bbox2result(det_bbox, det_labels[i], - self.bbox_head.num_classes)) - else: - bbox_results = [[ - np.zeros((0, 5), dtype=np.float32) - for _ in range(self.bbox_head.num_classes) - ] for _ in range(len(det_bboxes))] - - if not self.with_mask: - return bbox_results - else: - segm_results = self.simple_test_mask( - x, img_metas, det_bboxes, det_labels, rescale=rescale) - return list(zip(bbox_results, segm_results)) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/htc_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/htc_roi_head.py deleted file mode 100644 index 86a6db10d4ac26901fbd44941de8107e67819d42..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/htc_roi_head.py +++ /dev/null @@ -1,628 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn.functional as F - -from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, - merge_aug_masks, multiclass_nms) -from ..builder import HEADS, build_head, build_roi_extractor -from ..utils.brick_wrappers import adaptive_avg_pool2d -from .cascade_roi_head import CascadeRoIHead - - -@HEADS.register_module() -class HybridTaskCascadeRoIHead(CascadeRoIHead): - """Hybrid task cascade roi head including one bbox head and one mask head. - - https://arxiv.org/abs/1901.07518 - """ - - def __init__(self, - num_stages, - stage_loss_weights, - semantic_roi_extractor=None, - semantic_head=None, - semantic_fusion=('bbox', 'mask'), - interleaved=True, - mask_info_flow=True, - **kwargs): - super(HybridTaskCascadeRoIHead, - self).__init__(num_stages, stage_loss_weights, **kwargs) - assert self.with_bbox - assert not self.with_shared_head # shared head is not supported - - if semantic_head is not None: - self.semantic_roi_extractor = build_roi_extractor( - semantic_roi_extractor) - self.semantic_head = build_head(semantic_head) - - self.semantic_fusion = semantic_fusion - self.interleaved = interleaved - self.mask_info_flow = mask_info_flow - - @property - def with_semantic(self): - """bool: whether the head has semantic head""" - if hasattr(self, 'semantic_head') and self.semantic_head is not None: - return True - else: - return False - - def forward_dummy(self, x, proposals): - """Dummy forward function.""" - outs = () - # semantic head - if self.with_semantic: - _, semantic_feat = self.semantic_head(x) - else: - semantic_feat = None - # bbox heads - rois = bbox2roi([proposals]) - for i in range(self.num_stages): - bbox_results = self._bbox_forward( - i, x, rois, semantic_feat=semantic_feat) - outs = outs + (bbox_results['cls_score'], - bbox_results['bbox_pred']) - # mask heads - if self.with_mask: - mask_rois = rois[:100] - mask_roi_extractor = self.mask_roi_extractor[-1] - mask_feats = mask_roi_extractor( - x[:len(mask_roi_extractor.featmap_strides)], mask_rois) - if self.with_semantic and 'mask' in self.semantic_fusion: - mask_semantic_feat = self.semantic_roi_extractor( - [semantic_feat], mask_rois) - mask_feats = mask_feats + mask_semantic_feat - last_feat = None - for i in range(self.num_stages): - mask_head = self.mask_head[i] - if self.mask_info_flow: - mask_pred, last_feat = mask_head(mask_feats, last_feat) - else: - mask_pred = mask_head(mask_feats) - outs = outs + (mask_pred, ) - return outs - - def _bbox_forward_train(self, - stage, - x, - sampling_results, - gt_bboxes, - gt_labels, - rcnn_train_cfg, - semantic_feat=None): - """Run forward function and calculate loss for box head in training.""" - bbox_head = self.bbox_head[stage] - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward( - stage, x, rois, semantic_feat=semantic_feat) - - bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, rcnn_train_cfg) - loss_bbox = bbox_head.loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update( - loss_bbox=loss_bbox, - rois=rois, - bbox_targets=bbox_targets, - ) - return bbox_results - - def _mask_forward_train(self, - stage, - x, - sampling_results, - gt_masks, - rcnn_train_cfg, - semantic_feat=None): - """Run forward function and calculate loss for mask head in - training.""" - mask_roi_extractor = self.mask_roi_extractor[stage] - mask_head = self.mask_head[stage] - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], - pos_rois) - - # semantic feature fusion - # element-wise sum for original features and pooled semantic features - if self.with_semantic and 'mask' in self.semantic_fusion: - mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], - pos_rois) - if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: - mask_semantic_feat = F.adaptive_avg_pool2d( - mask_semantic_feat, mask_feats.shape[-2:]) - mask_feats = mask_feats + mask_semantic_feat - - # mask information flow - # forward all previous mask heads to obtain last_feat, and fuse it - # with the normal mask feature - if self.mask_info_flow: - last_feat = None - for i in range(stage): - last_feat = self.mask_head[i]( - mask_feats, last_feat, return_logits=False) - mask_pred = mask_head(mask_feats, last_feat, return_feat=False) - else: - mask_pred = mask_head(mask_feats, return_feat=False) - - mask_targets = mask_head.get_targets(sampling_results, gt_masks, - rcnn_train_cfg) - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) - - mask_results = dict(loss_mask=loss_mask) - return mask_results - - def _bbox_forward(self, stage, x, rois, semantic_feat=None): - """Box head forward function used in both training and testing.""" - bbox_roi_extractor = self.bbox_roi_extractor[stage] - bbox_head = self.bbox_head[stage] - bbox_feats = bbox_roi_extractor( - x[:len(bbox_roi_extractor.featmap_strides)], rois) - if self.with_semantic and 'bbox' in self.semantic_fusion: - bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], - rois) - if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: - bbox_semantic_feat = adaptive_avg_pool2d( - bbox_semantic_feat, bbox_feats.shape[-2:]) - bbox_feats = bbox_feats + bbox_semantic_feat - cls_score, bbox_pred = bbox_head(bbox_feats) - - bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred) - return bbox_results - - def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None): - """Mask head forward function for testing.""" - mask_roi_extractor = self.mask_roi_extractor[stage] - mask_head = self.mask_head[stage] - mask_rois = bbox2roi([bboxes]) - mask_feats = mask_roi_extractor( - x[:len(mask_roi_extractor.featmap_strides)], mask_rois) - if self.with_semantic and 'mask' in self.semantic_fusion: - mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], - mask_rois) - if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: - mask_semantic_feat = F.adaptive_avg_pool2d( - mask_semantic_feat, mask_feats.shape[-2:]) - mask_feats = mask_feats + mask_semantic_feat - if self.mask_info_flow: - last_feat = None - last_pred = None - for i in range(stage): - mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat) - if last_pred is not None: - mask_pred = mask_pred + last_pred - last_pred = mask_pred - mask_pred = mask_head(mask_feats, last_feat, return_feat=False) - if last_pred is not None: - mask_pred = mask_pred + last_pred - else: - mask_pred = mask_head(mask_feats) - return mask_pred - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - gt_semantic_seg=None): - """ - Args: - x (list[Tensor]): list of multi-level img features. - - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - proposal_list (list[Tensors]): list of region proposals. - - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - - gt_labels (list[Tensor]): class indices corresponding to each box - - gt_bboxes_ignore (None, list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - gt_masks (None, Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - gt_semantic_seg (None, list[Tensor]): semantic segmentation masks - used if the architecture supports semantic segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # semantic segmentation part - # 2 outputs: segmentation prediction and embedded features - losses = dict() - if self.with_semantic: - semantic_pred, semantic_feat = self.semantic_head(x) - loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) - losses['loss_semantic_seg'] = loss_seg - else: - semantic_feat = None - - for i in range(self.num_stages): - self.current_stage = i - rcnn_train_cfg = self.train_cfg[i] - lw = self.stage_loss_weights[i] - - # assign gts and sample proposals - sampling_results = [] - bbox_assigner = self.bbox_assigner[i] - bbox_sampler = self.bbox_sampler[i] - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - - for j in range(num_imgs): - assign_result = bbox_assigner.assign(proposal_list[j], - gt_bboxes[j], - gt_bboxes_ignore[j], - gt_labels[j]) - sampling_result = bbox_sampler.sample( - assign_result, - proposal_list[j], - gt_bboxes[j], - gt_labels[j], - feats=[lvl_feat[j][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - # bbox head forward and loss - bbox_results = \ - self._bbox_forward_train( - i, x, sampling_results, gt_bboxes, gt_labels, - rcnn_train_cfg, semantic_feat) - roi_labels = bbox_results['bbox_targets'][0] - - for name, value in bbox_results['loss_bbox'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # mask head forward and loss - if self.with_mask: - # interleaved execution: use regressed bboxes by the box branch - # to train the mask branch - if self.interleaved: - pos_is_gts = [res.pos_is_gt for res in sampling_results] - with torch.no_grad(): - proposal_list = self.bbox_head[i].refine_bboxes( - bbox_results['rois'], roi_labels, - bbox_results['bbox_pred'], pos_is_gts, img_metas) - # re-assign and sample 512 RoIs from 512 RoIs - sampling_results = [] - for j in range(num_imgs): - assign_result = bbox_assigner.assign( - proposal_list[j], gt_bboxes[j], - gt_bboxes_ignore[j], gt_labels[j]) - sampling_result = bbox_sampler.sample( - assign_result, - proposal_list[j], - gt_bboxes[j], - gt_labels[j], - feats=[lvl_feat[j][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - mask_results = self._mask_forward_train( - i, x, sampling_results, gt_masks, rcnn_train_cfg, - semantic_feat) - for name, value in mask_results['loss_mask'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # refine bboxes (same as Cascade R-CNN) - if i < self.num_stages - 1 and not self.interleaved: - pos_is_gts = [res.pos_is_gt for res in sampling_results] - with torch.no_grad(): - proposal_list = self.bbox_head[i].refine_bboxes( - bbox_results['rois'], roi_labels, - bbox_results['bbox_pred'], pos_is_gts, img_metas) - - return losses - - def simple_test(self, x, proposal_list, img_metas, rescale=False): - """Test without augmentation. - - Args: - x (tuple[Tensor]): Features from upstream network. Each - has shape (batch_size, c, h, w). - proposal_list (list(Tensor)): Proposals from rpn head. - Each has shape (num_proposals, 5), last dimension - 5 represent (x1, y1, x2, y2, score). - img_metas (list[dict]): Meta information of images. - rescale (bool): Whether to rescale the results to - the original image. Default: True. - - Returns: - list[list[np.ndarray]] or list[tuple]: When no mask branch, - it is bbox results of each image and classes with type - `list[list[np.ndarray]]`. The outer list - corresponds to each image. The inner list - corresponds to each class. When the model has mask branch, - it contains bbox results and mask results. - The outer list corresponds to each image, and first element - of tuple is bbox results, second element is mask results. - """ - if self.with_semantic: - _, semantic_feat = self.semantic_head(x) - else: - semantic_feat = None - - num_imgs = len(proposal_list) - img_shapes = tuple(meta['img_shape'] for meta in img_metas) - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - # "ms" in variable names means multi-stage - ms_bbox_result = {} - ms_segm_result = {} - ms_scores = [] - rcnn_test_cfg = self.test_cfg - - rois = bbox2roi(proposal_list) - - if rois.shape[0] == 0: - # There is no proposal in the whole batch - bbox_results = [[ - np.zeros((0, 5), dtype=np.float32) - for _ in range(self.bbox_head[-1].num_classes) - ]] * num_imgs - - if self.with_mask: - mask_classes = self.mask_head[-1].num_classes - segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - results = list(zip(bbox_results, segm_results)) - else: - results = bbox_results - - return results - - for i in range(self.num_stages): - bbox_head = self.bbox_head[i] - bbox_results = self._bbox_forward( - i, x, rois, semantic_feat=semantic_feat) - # split batch bbox prediction back to each image - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - num_proposals_per_img = tuple(len(p) for p in proposal_list) - rois = rois.split(num_proposals_per_img, 0) - cls_score = cls_score.split(num_proposals_per_img, 0) - bbox_pred = bbox_pred.split(num_proposals_per_img, 0) - ms_scores.append(cls_score) - - if i < self.num_stages - 1: - refine_rois_list = [] - for j in range(num_imgs): - if rois[j].shape[0] > 0: - bbox_label = cls_score[j][:, :-1].argmax(dim=1) - refine_rois = bbox_head.regress_by_class( - rois[j], bbox_label, bbox_pred[j], img_metas[j]) - refine_rois_list.append(refine_rois) - rois = torch.cat(refine_rois_list) - - # average scores of each image by stages - cls_score = [ - sum([score[i] for score in ms_scores]) / float(len(ms_scores)) - for i in range(num_imgs) - ] - - # apply bbox post-processing to each image individually - det_bboxes = [] - det_labels = [] - for i in range(num_imgs): - det_bbox, det_label = self.bbox_head[-1].get_bboxes( - rois[i], - cls_score[i], - bbox_pred[i], - img_shapes[i], - scale_factors[i], - rescale=rescale, - cfg=rcnn_test_cfg) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - bbox_result = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head[-1].num_classes) - for i in range(num_imgs) - ] - ms_bbox_result['ensemble'] = bbox_result - - if self.with_mask: - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - mask_classes = self.mask_head[-1].num_classes - segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - else: - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i] - for i in range(num_imgs) - ] - mask_rois = bbox2roi(_bboxes) - aug_masks = [] - mask_roi_extractor = self.mask_roi_extractor[-1] - mask_feats = mask_roi_extractor( - x[:len(mask_roi_extractor.featmap_strides)], mask_rois) - if self.with_semantic and 'mask' in self.semantic_fusion: - mask_semantic_feat = self.semantic_roi_extractor( - [semantic_feat], mask_rois) - mask_feats = mask_feats + mask_semantic_feat - last_feat = None - - num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) - for i in range(self.num_stages): - mask_head = self.mask_head[i] - if self.mask_info_flow: - mask_pred, last_feat = mask_head(mask_feats, last_feat) - else: - mask_pred = mask_head(mask_feats) - - # split batch mask prediction back to each image - mask_pred = mask_pred.split(num_bbox_per_img, 0) - aug_masks.append( - [mask.sigmoid().cpu().numpy() for mask in mask_pred]) - - # apply mask post-processing to each image individually - segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append( - [[] - for _ in range(self.mask_head[-1].num_classes)]) - else: - aug_mask = [mask[i] for mask in aug_masks] - merged_mask = merge_aug_masks( - aug_mask, [[img_metas[i]]] * self.num_stages, - rcnn_test_cfg) - segm_result = self.mask_head[-1].get_seg_masks( - merged_mask, _bboxes[i], det_labels[i], - rcnn_test_cfg, ori_shapes[i], scale_factors[i], - rescale) - segm_results.append(segm_result) - ms_segm_result['ensemble'] = segm_results - - if self.with_mask: - results = list( - zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble'])) - else: - results = ms_bbox_result['ensemble'] - - return results - - def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ - if self.with_semantic: - semantic_feats = [ - self.semantic_head(feat)[1] for feat in img_feats - ] - else: - semantic_feats = [None] * len(img_metas) - - rcnn_test_cfg = self.test_cfg - aug_bboxes = [] - aug_scores = [] - for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip, flip_direction) - # "ms" in variable names means multi-stage - ms_scores = [] - - rois = bbox2roi([proposals]) - - if rois.shape[0] == 0: - # There is no proposal in the single image - aug_bboxes.append(rois.new_zeros(0, 4)) - aug_scores.append(rois.new_zeros(0, 1)) - continue - - for i in range(self.num_stages): - bbox_head = self.bbox_head[i] - bbox_results = self._bbox_forward( - i, x, rois, semantic_feat=semantic) - ms_scores.append(bbox_results['cls_score']) - - if i < self.num_stages - 1: - bbox_label = bbox_results['cls_score'].argmax(dim=1) - rois = bbox_head.regress_by_class( - rois, bbox_label, bbox_results['bbox_pred'], - img_meta[0]) - - cls_score = sum(ms_scores) / float(len(ms_scores)) - bboxes, scores = self.bbox_head[-1].get_bboxes( - rois, - cls_score, - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - - bbox_result = bbox2result(det_bboxes, det_labels, - self.bbox_head[-1].num_classes) - - if self.with_mask: - if det_bboxes.shape[0] == 0: - segm_result = [[] - for _ in range(self.mask_head[-1].num_classes)] - else: - aug_masks = [] - aug_img_metas = [] - for x, img_meta, semantic in zip(img_feats, img_metas, - semantic_feats): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip, flip_direction) - mask_rois = bbox2roi([_bboxes]) - mask_feats = self.mask_roi_extractor[-1]( - x[:len(self.mask_roi_extractor[-1].featmap_strides)], - mask_rois) - if self.with_semantic: - semantic_feat = semantic - mask_semantic_feat = self.semantic_roi_extractor( - [semantic_feat], mask_rois) - if mask_semantic_feat.shape[-2:] != mask_feats.shape[ - -2:]: - mask_semantic_feat = F.adaptive_avg_pool2d( - mask_semantic_feat, mask_feats.shape[-2:]) - mask_feats = mask_feats + mask_semantic_feat - last_feat = None - for i in range(self.num_stages): - mask_head = self.mask_head[i] - if self.mask_info_flow: - mask_pred, last_feat = mask_head( - mask_feats, last_feat) - else: - mask_pred = mask_head(mask_feats) - aug_masks.append(mask_pred.sigmoid().cpu().numpy()) - aug_img_metas.append(img_meta) - merged_masks = merge_aug_masks(aug_masks, aug_img_metas, - self.test_cfg) - - ori_shape = img_metas[0][0]['ori_shape'] - segm_result = self.mask_head[-1].get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - rcnn_test_cfg, - ori_shape, - scale_factor=1.0, - rescale=False) - return [(bbox_result, segm_result)] - else: - return [bbox_result] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/__init__.py deleted file mode 100644 index 48a5d4227be41b8985403251e1803f78cf500636..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .coarse_mask_head import CoarseMaskHead -from .dynamic_mask_head import DynamicMaskHead -from .fcn_mask_head import FCNMaskHead -from .feature_relay_head import FeatureRelayHead -from .fused_semantic_head import FusedSemanticHead -from .global_context_head import GlobalContextHead -from .grid_head import GridHead -from .htc_mask_head import HTCMaskHead -from .mask_point_head import MaskPointHead -from .maskiou_head import MaskIoUHead -from .scnet_mask_head import SCNetMaskHead -from .scnet_semantic_head import SCNetSemanticHead - -__all__ = [ - 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', - 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead', - 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead', - 'DynamicMaskHead' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py deleted file mode 100644 index 946254cb4fe2544a0c6d390afbf40e2c50720f9e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.cnn import ConvModule, Linear -from mmcv.runner import ModuleList, auto_fp16 - -from mmdet.models.builder import HEADS -from .fcn_mask_head import FCNMaskHead - - -@HEADS.register_module() -class CoarseMaskHead(FCNMaskHead): - """Coarse mask head used in PointRend. - - Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample - the input feature map instead of upsample it. - - Args: - num_convs (int): Number of conv layers in the head. Default: 0. - num_fcs (int): Number of fc layers in the head. Default: 2. - fc_out_channels (int): Number of output channels of fc layer. - Default: 1024. - downsample_factor (int): The factor that feature map is downsampled by. - Default: 2. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_convs=0, - num_fcs=2, - fc_out_channels=1024, - downsample_factor=2, - init_cfg=dict( - type='Xavier', - override=[ - dict(name='fcs'), - dict(type='Constant', val=0.001, name='fc_logits') - ]), - *arg, - **kwarg): - super(CoarseMaskHead, self).__init__( - *arg, - num_convs=num_convs, - upsample_cfg=dict(type=None), - init_cfg=None, - **kwarg) - self.init_cfg = init_cfg - self.num_fcs = num_fcs - assert self.num_fcs > 0 - self.fc_out_channels = fc_out_channels - self.downsample_factor = downsample_factor - assert self.downsample_factor >= 1 - # remove conv_logit - delattr(self, 'conv_logits') - - if downsample_factor > 1: - downsample_in_channels = ( - self.conv_out_channels - if self.num_convs > 0 else self.in_channels) - self.downsample_conv = ConvModule( - downsample_in_channels, - self.conv_out_channels, - kernel_size=downsample_factor, - stride=downsample_factor, - padding=0, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - else: - self.downsample_conv = None - - self.output_size = (self.roi_feat_size[0] // downsample_factor, - self.roi_feat_size[1] // downsample_factor) - self.output_area = self.output_size[0] * self.output_size[1] - - last_layer_dim = self.conv_out_channels * self.output_area - - self.fcs = ModuleList() - for i in range(num_fcs): - fc_in_channels = ( - last_layer_dim if i == 0 else self.fc_out_channels) - self.fcs.append(Linear(fc_in_channels, self.fc_out_channels)) - last_layer_dim = self.fc_out_channels - output_channels = self.num_classes * self.output_area - self.fc_logits = Linear(last_layer_dim, output_channels) - - def init_weights(self): - super(FCNMaskHead, self).init_weights() - - @auto_fp16() - def forward(self, x): - for conv in self.convs: - x = conv(x) - - if self.downsample_conv is not None: - x = self.downsample_conv(x) - - x = x.flatten(1) - for fc in self.fcs: - x = self.relu(fc(x)) - mask_pred = self.fc_logits(x).view( - x.size(0), self.num_classes, *self.output_size) - return mask_pred diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py deleted file mode 100644 index 5bbe7eea49cae55ef3c4bdbb17e41f5788e45c79..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.runner import auto_fp16, force_fp32 - -from mmdet.core import mask_target -from mmdet.models.builder import HEADS -from mmdet.models.dense_heads.atss_head import reduce_mean -from mmdet.models.utils import build_transformer -from .fcn_mask_head import FCNMaskHead - - -@HEADS.register_module() -class DynamicMaskHead(FCNMaskHead): - r"""Dynamic Mask Head for - `Instances as Queries `_ - - Args: - num_convs (int): Number of convolution layer. - Defaults to 4. - roi_feat_size (int): The output size of RoI extractor, - Defaults to 14. - in_channels (int): Input feature channels. - Defaults to 256. - conv_kernel_size (int): Kernel size of convolution layers. - Defaults to 3. - conv_out_channels (int): Output channels of convolution layers. - Defaults to 256. - num_classes (int): Number of classes. - Defaults to 80 - class_agnostic (int): Whether generate class agnostic prediction. - Defaults to False. - dropout (float): Probability of drop the channel. - Defaults to 0.0 - upsample_cfg (dict): The config for upsample layer. - conv_cfg (dict): The convolution layer config. - norm_cfg (dict): The norm layer config. - dynamic_conv_cfg (dict): The dynamic convolution layer config. - loss_mask (dict): The config for mask loss. - """ - - def __init__(self, - num_convs=4, - roi_feat_size=14, - in_channels=256, - conv_kernel_size=3, - conv_out_channels=256, - num_classes=80, - class_agnostic=False, - upsample_cfg=dict(type='deconv', scale_factor=2), - conv_cfg=None, - norm_cfg=None, - dynamic_conv_cfg=dict( - type='DynamicConv', - in_channels=256, - feat_channels=64, - out_channels=256, - input_feat_shape=14, - with_proj=False, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN')), - loss_mask=dict(type='DiceLoss', loss_weight=8.0), - **kwargs): - super(DynamicMaskHead, self).__init__( - num_convs=num_convs, - roi_feat_size=roi_feat_size, - in_channels=in_channels, - conv_kernel_size=conv_kernel_size, - conv_out_channels=conv_out_channels, - num_classes=num_classes, - class_agnostic=class_agnostic, - upsample_cfg=upsample_cfg, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - loss_mask=loss_mask, - **kwargs) - assert class_agnostic is False, \ - 'DynamicMaskHead only support class_agnostic=False' - self.fp16_enabled = False - - self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) - - def init_weights(self): - """Use xavier initialization for all weight parameter and set - classification head bias as a specific value when use focal loss.""" - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - nn.init.constant_(self.conv_logits.bias, 0.) - - @auto_fp16() - def forward(self, roi_feat, proposal_feat): - """Forward function of DynamicMaskHead. - - Args: - roi_feat (Tensor): Roi-pooling features with shape - (batch_size*num_proposals, feature_dimensions, - pooling_h , pooling_w). - proposal_feat (Tensor): Intermediate feature get from - diihead in last stage, has shape - (batch_size*num_proposals, feature_dimensions) - - Returns: - mask_pred (Tensor): Predicted foreground masks with shape - (batch_size*num_proposals, num_classes, - pooling_h*2, pooling_w*2). - """ - - proposal_feat = proposal_feat.reshape(-1, self.in_channels) - proposal_feat_iic = self.instance_interactive_conv( - proposal_feat, roi_feat) - - x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size()) - - for conv in self.convs: - x = conv(x) - if self.upsample is not None: - x = self.upsample(x) - if self.upsample_method == 'deconv': - x = self.relu(x) - mask_pred = self.conv_logits(x) - return mask_pred - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, mask_targets, labels): - num_pos = labels.new_ones(labels.size()).float().sum() - avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item() - loss = dict() - if mask_pred.size(0) == 0: - loss_mask = mask_pred.sum() - else: - loss_mask = self.loss_mask( - mask_pred[torch.arange(num_pos).long(), labels, ...].sigmoid(), - mask_targets, - avg_factor=avg_factor) - loss['loss_mask'] = loss_mask - return loss - - def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): - - pos_proposals = [res.pos_bboxes for res in sampling_results] - pos_assigned_gt_inds = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, - gt_masks, rcnn_train_cfg) - return mask_targets diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py deleted file mode 100644 index 355d88221403f01a36a9e99d1a12d877a877790a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from warnings import warn - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer -from mmcv.ops.carafe import CARAFEPack -from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32 -from torch.nn.modules.utils import _pair - -from mmdet.core import mask_target -from mmdet.models.builder import HEADS, build_loss - -BYTES_PER_FLOAT = 4 -# TODO: This memory limit may be too much or too little. It would be better to -# determine it based on available resources. -GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit - - -@HEADS.register_module() -class FCNMaskHead(BaseModule): - - def __init__(self, - num_convs=4, - roi_feat_size=14, - in_channels=256, - conv_kernel_size=3, - conv_out_channels=256, - num_classes=80, - class_agnostic=False, - upsample_cfg=dict(type='deconv', scale_factor=2), - conv_cfg=None, - norm_cfg=None, - predictor_cfg=dict(type='Conv'), - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), - init_cfg=None): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super(FCNMaskHead, self).__init__(init_cfg) - self.upsample_cfg = upsample_cfg.copy() - if self.upsample_cfg['type'] not in [ - None, 'deconv', 'nearest', 'bilinear', 'carafe' - ]: - raise ValueError( - f'Invalid upsample method {self.upsample_cfg["type"]}, ' - 'accepted methods are "deconv", "nearest", "bilinear", ' - '"carafe"') - self.num_convs = num_convs - # WARN: roi_feat_size is reserved and not used - self.roi_feat_size = _pair(roi_feat_size) - self.in_channels = in_channels - self.conv_kernel_size = conv_kernel_size - self.conv_out_channels = conv_out_channels - self.upsample_method = self.upsample_cfg.get('type') - self.scale_factor = self.upsample_cfg.pop('scale_factor', None) - self.num_classes = num_classes - self.class_agnostic = class_agnostic - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.predictor_cfg = predictor_cfg - self.fp16_enabled = False - self.loss_mask = build_loss(loss_mask) - - self.convs = ModuleList() - for i in range(self.num_convs): - in_channels = ( - self.in_channels if i == 0 else self.conv_out_channels) - padding = (self.conv_kernel_size - 1) // 2 - self.convs.append( - ConvModule( - in_channels, - self.conv_out_channels, - self.conv_kernel_size, - padding=padding, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg)) - upsample_in_channels = ( - self.conv_out_channels if self.num_convs > 0 else in_channels) - upsample_cfg_ = self.upsample_cfg.copy() - if self.upsample_method is None: - self.upsample = None - elif self.upsample_method == 'deconv': - upsample_cfg_.update( - in_channels=upsample_in_channels, - out_channels=self.conv_out_channels, - kernel_size=self.scale_factor, - stride=self.scale_factor) - self.upsample = build_upsample_layer(upsample_cfg_) - elif self.upsample_method == 'carafe': - upsample_cfg_.update( - channels=upsample_in_channels, scale_factor=self.scale_factor) - self.upsample = build_upsample_layer(upsample_cfg_) - else: - # suppress warnings - align_corners = (None - if self.upsample_method == 'nearest' else False) - upsample_cfg_.update( - scale_factor=self.scale_factor, - mode=self.upsample_method, - align_corners=align_corners) - self.upsample = build_upsample_layer(upsample_cfg_) - - out_channels = 1 if self.class_agnostic else self.num_classes - logits_in_channel = ( - self.conv_out_channels - if self.upsample_method == 'deconv' else upsample_in_channels) - self.conv_logits = build_conv_layer(self.predictor_cfg, - logits_in_channel, out_channels, 1) - self.relu = nn.ReLU(inplace=True) - self.debug_imgs = None - - def init_weights(self): - super(FCNMaskHead, self).init_weights() - for m in [self.upsample, self.conv_logits]: - if m is None: - continue - elif isinstance(m, CARAFEPack): - m.init_weights() - elif hasattr(m, 'weight') and hasattr(m, 'bias'): - nn.init.kaiming_normal_( - m.weight, mode='fan_out', nonlinearity='relu') - nn.init.constant_(m.bias, 0) - - @auto_fp16() - def forward(self, x): - for conv in self.convs: - x = conv(x) - if self.upsample is not None: - x = self.upsample(x) - if self.upsample_method == 'deconv': - x = self.relu(x) - mask_pred = self.conv_logits(x) - return mask_pred - - def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg): - pos_proposals = [res.pos_bboxes for res in sampling_results] - pos_assigned_gt_inds = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, - gt_masks, rcnn_train_cfg) - return mask_targets - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, mask_targets, labels): - """ - Example: - >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA - >>> N = 7 # N = number of extracted ROIs - >>> C, H, W = 11, 32, 32 - >>> # Create example instance of FCN Mask Head. - >>> # There are lots of variations depending on the configuration - >>> self = FCNMaskHead(num_classes=C, num_convs=1) - >>> inputs = torch.rand(N, self.in_channels, H, W) - >>> mask_pred = self.forward(inputs) - >>> sf = self.scale_factor - >>> labels = torch.randint(0, C, size=(N,)) - >>> # With the default properties the mask targets should indicate - >>> # a (potentially soft) single-class label - >>> mask_targets = torch.rand(N, H * sf, W * sf) - >>> loss = self.loss(mask_pred, mask_targets, labels) - >>> print('loss = {!r}'.format(loss)) - """ - loss = dict() - if mask_pred.size(0) == 0: - loss_mask = mask_pred.sum() - else: - if self.class_agnostic: - loss_mask = self.loss_mask(mask_pred, mask_targets, - torch.zeros_like(labels)) - else: - loss_mask = self.loss_mask(mask_pred, mask_targets, labels) - loss['loss_mask'] = loss_mask - return loss - - def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, - ori_shape, scale_factor, rescale): - """Get segmentation masks from mask_pred and bboxes. - - Args: - mask_pred (Tensor or ndarray): shape (n, #class, h, w). - For single-scale testing, mask_pred is the direct output of - model, whose type is Tensor, while for multi-scale testing, - it will be converted to numpy array outside of this method. - det_bboxes (Tensor): shape (n, 4/5) - det_labels (Tensor): shape (n, ) - rcnn_test_cfg (dict): rcnn testing config - ori_shape (Tuple): original image height and width, shape (2,) - scale_factor(ndarray | Tensor): If ``rescale is True``, box - coordinates are divided by this scale factor to fit - ``ori_shape``. - rescale (bool): If True, the resulting masks will be rescaled to - ``ori_shape``. - - Returns: - list[list]: encoded masks. The c-th item in the outer list - corresponds to the c-th class. Given the c-th outer list, the - i-th item in that inner list is the mask for the i-th box with - class label c. - - Example: - >>> import mmcv - >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA - >>> N = 7 # N = number of extracted ROIs - >>> C, H, W = 11, 32, 32 - >>> # Create example instance of FCN Mask Head. - >>> self = FCNMaskHead(num_classes=C, num_convs=0) - >>> inputs = torch.rand(N, self.in_channels, H, W) - >>> mask_pred = self.forward(inputs) - >>> # Each input is associated with some bounding box - >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N) - >>> det_labels = torch.randint(0, C, size=(N,)) - >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, }) - >>> ori_shape = (H * 4, W * 4) - >>> scale_factor = torch.FloatTensor((1, 1)) - >>> rescale = False - >>> # Encoded masks are a list for each category. - >>> encoded_masks = self.get_seg_masks( - >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, - >>> scale_factor, rescale - >>> ) - >>> assert len(encoded_masks) == C - >>> assert sum(list(map(len, encoded_masks))) == N - """ - if isinstance(mask_pred, torch.Tensor): - mask_pred = mask_pred.sigmoid() - else: - # In AugTest, has been activated before - mask_pred = det_bboxes.new_tensor(mask_pred) - - device = mask_pred.device - cls_segms = [[] for _ in range(self.num_classes) - ] # BG is not included in num_classes - bboxes = det_bboxes[:, :4] - labels = det_labels - - # In most cases, scale_factor should have been - # converted to Tensor when rescale the bbox - if not isinstance(scale_factor, torch.Tensor): - if isinstance(scale_factor, float): - scale_factor = np.array([scale_factor] * 4) - warn('Scale_factor should be a Tensor or ndarray ' - 'with shape (4,), float would be deprecated. ') - assert isinstance(scale_factor, np.ndarray) - scale_factor = torch.Tensor(scale_factor) - - if rescale: - img_h, img_w = ori_shape[:2] - bboxes = bboxes / scale_factor.to(bboxes) - else: - w_scale, h_scale = scale_factor[0], scale_factor[1] - img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32) - img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32) - - N = len(mask_pred) - # The actual implementation split the input into chunks, - # and paste them chunk by chunk. - if device.type == 'cpu': - # CPU is most efficient when they are pasted one by one with - # skip_empty=True, so that it performs minimal number of - # operations. - num_chunks = N - else: - # GPU benefits from parallelism for larger chunks, - # but may have memory issue - # the types of img_w and img_h are np.int32, - # when the image resolution is large, - # the calculation of num_chunks will overflow. - # so we need to change the types of img_w and img_h to int. - # See https://github.com/open-mmlab/mmdetection/pull/5191 - num_chunks = int( - np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / - GPU_MEM_LIMIT)) - assert (num_chunks <= - N), 'Default GPU_MEM_LIMIT is too small; try increasing it' - chunks = torch.chunk(torch.arange(N, device=device), num_chunks) - - threshold = rcnn_test_cfg.mask_thr_binary - im_mask = torch.zeros( - N, - img_h, - img_w, - device=device, - dtype=torch.bool if threshold >= 0 else torch.uint8) - - if not self.class_agnostic: - mask_pred = mask_pred[range(N), labels][:, None] - - for inds in chunks: - masks_chunk, spatial_inds = _do_paste_mask( - mask_pred[inds], - bboxes[inds], - img_h, - img_w, - skip_empty=device.type == 'cpu') - - if threshold >= 0: - masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) - else: - # for visualization and debugging - masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) - - im_mask[(inds, ) + spatial_inds] = masks_chunk - - for i in range(N): - cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy()) - return cls_segms - - def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, - ori_shape, **kwargs): - """Get segmentation masks from mask_pred and bboxes. - - Args: - mask_pred (Tensor): shape (n, #class, h, w). - det_bboxes (Tensor): shape (n, 4/5) - det_labels (Tensor): shape (n, ) - rcnn_test_cfg (dict): rcnn testing config - ori_shape (Tuple): original image height and width, shape (2,) - - Returns: - Tensor: a mask of shape (N, img_h, img_w). - """ - - mask_pred = mask_pred.sigmoid() - bboxes = det_bboxes[:, :4] - labels = det_labels - # No need to consider rescale and scale_factor while exporting to ONNX - img_h, img_w = ori_shape[:2] - threshold = rcnn_test_cfg.mask_thr_binary - if not self.class_agnostic: - box_inds = torch.arange(mask_pred.shape[0]) - mask_pred = mask_pred[box_inds, labels][:, None] - masks, _ = _do_paste_mask( - mask_pred, bboxes, img_h, img_w, skip_empty=False) - if threshold >= 0: - # should convert to float to avoid problems in TRT - masks = (masks >= threshold).to(dtype=torch.float) - return masks - - -def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): - """Paste instance masks according to boxes. - - This implementation is modified from - https://github.com/facebookresearch/detectron2/ - - Args: - masks (Tensor): N, 1, H, W - boxes (Tensor): N, 4 - img_h (int): Height of the image to be pasted. - img_w (int): Width of the image to be pasted. - skip_empty (bool): Only paste masks within the region that - tightly bound all boxes, and returns the results this region only. - An important optimization for CPU. - - Returns: - tuple: (Tensor, tuple). The first item is mask tensor, the second one - is the slice object. - If skip_empty == False, the whole image will be pasted. It will - return a mask of shape (N, img_h, img_w) and an empty tuple. - If skip_empty == True, only area around the mask will be pasted. - A mask of shape (N, h', w') and its start and end coordinates - in the original image will be returned. - """ - # On GPU, paste all masks together (up to chunk size) - # by using the entire image to sample the masks - # Compared to pasting them one by one, - # this has more operations but is faster on COCO-scale dataset. - device = masks.device - if skip_empty: - x0_int, y0_int = torch.clamp( - boxes.min(dim=0).values.floor()[:2] - 1, - min=0).to(dtype=torch.int32) - x1_int = torch.clamp( - boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) - y1_int = torch.clamp( - boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) - else: - x0_int, y0_int = 0, 0 - x1_int, y1_int = img_w, img_h - x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 - - N = masks.shape[0] - - img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5 - img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5 - img_y = (img_y - y0) / (y1 - y0) * 2 - 1 - img_x = (img_x - x0) / (x1 - x0) * 2 - 1 - # img_x, img_y have shapes (N, w), (N, h) - # IsInf op is not supported with ONNX<=1.7.0 - if not torch.onnx.is_in_onnx_export(): - if torch.isinf(img_x).any(): - inds = torch.where(torch.isinf(img_x)) - img_x[inds] = 0 - if torch.isinf(img_y).any(): - inds = torch.where(torch.isinf(img_y)) - img_y[inds] = 0 - - gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) - gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) - grid = torch.stack([gx, gy], dim=3) - - img_masks = F.grid_sample( - masks.to(dtype=torch.float32), grid, align_corners=False) - - if skip_empty: - return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) - else: - return img_masks[:, 0], () diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/feature_relay_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/feature_relay_head.py deleted file mode 100644 index 452f37afdb6c8232aac0a68dcb7ccbd256d788b6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/feature_relay_head.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.runner import BaseModule, auto_fp16 - -from mmdet.models.builder import HEADS - - -@HEADS.register_module() -class FeatureRelayHead(BaseModule): - """Feature Relay Head used in `SCNet `_. - - Args: - in_channels (int, optional): number of input channels. Default: 256. - conv_out_channels (int, optional): number of output channels before - classification layer. Default: 256. - roi_feat_size (int, optional): roi feat size at box head. Default: 7. - scale_factor (int, optional): scale factor to match roi feat size - at mask head. Default: 2. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - in_channels=1024, - out_conv_channels=256, - roi_feat_size=7, - scale_factor=2, - init_cfg=dict(type='Kaiming', layer='Linear')): - super(FeatureRelayHead, self).__init__(init_cfg) - assert isinstance(roi_feat_size, int) - - self.in_channels = in_channels - self.out_conv_channels = out_conv_channels - self.roi_feat_size = roi_feat_size - self.out_channels = (roi_feat_size**2) * out_conv_channels - self.scale_factor = scale_factor - self.fp16_enabled = False - - self.fc = nn.Linear(self.in_channels, self.out_channels) - self.upsample = nn.Upsample( - scale_factor=scale_factor, mode='bilinear', align_corners=True) - - @auto_fp16() - def forward(self, x): - """Forward function.""" - N, in_C = x.shape - if N > 0: - out_C = self.out_conv_channels - out_HW = self.roi_feat_size - x = self.fc(x) - x = x.reshape(N, out_C, out_HW, out_HW) - x = self.upsample(x) - return x - return None diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py deleted file mode 100644 index c6eaa54ae8c90e305e5ec498a8af7c05db4a831f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, auto_fp16, force_fp32 - -from mmdet.models.builder import HEADS, build_loss - - -@HEADS.register_module() -class FusedSemanticHead(BaseModule): - r"""Multi-level fused semantic segmentation head. - - .. code-block:: none - - in_1 -> 1x1 conv --- - | - in_2 -> 1x1 conv -- | - || - in_3 -> 1x1 conv - || - ||| /-> 1x1 conv (mask prediction) - in_4 -> 1x1 conv -----> 3x3 convs (*4) - | \-> 1x1 conv (feature) - in_5 -> 1x1 conv --- - """ # noqa: W605 - - def __init__(self, - num_ins, - fusion_level, - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=183, - conv_cfg=None, - norm_cfg=None, - ignore_label=None, - loss_weight=None, - loss_seg=dict( - type='CrossEntropyLoss', - ignore_index=255, - loss_weight=0.2), - init_cfg=dict( - type='Kaiming', override=dict(name='conv_logits'))): - super(FusedSemanticHead, self).__init__(init_cfg) - self.num_ins = num_ins - self.fusion_level = fusion_level - self.num_convs = num_convs - self.in_channels = in_channels - self.conv_out_channels = conv_out_channels - self.num_classes = num_classes - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.fp16_enabled = False - - self.lateral_convs = nn.ModuleList() - for i in range(self.num_ins): - self.lateral_convs.append( - ConvModule( - self.in_channels, - self.in_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - inplace=False)) - - self.convs = nn.ModuleList() - for i in range(self.num_convs): - in_channels = self.in_channels if i == 0 else conv_out_channels - self.convs.append( - ConvModule( - in_channels, - conv_out_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.conv_embedding = ConvModule( - conv_out_channels, - conv_out_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) - if ignore_label: - loss_seg['ignore_index'] = ignore_label - if loss_weight: - loss_seg['loss_weight'] = loss_weight - if ignore_label or loss_weight: - warnings.warn('``ignore_label`` and ``loss_weight`` would be ' - 'deprecated soon. Please set ``ingore_index`` and ' - '``loss_weight`` in ``loss_seg`` instead.') - self.criterion = build_loss(loss_seg) - - @auto_fp16() - def forward(self, feats): - x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) - fused_size = tuple(x.shape[-2:]) - for i, feat in enumerate(feats): - if i != self.fusion_level: - feat = F.interpolate( - feat, size=fused_size, mode='bilinear', align_corners=True) - # fix runtime error of "+=" inplace operation in PyTorch 1.10 - x = x + self.lateral_convs[i](feat) - - for i in range(self.num_convs): - x = self.convs[i](x) - - mask_pred = self.conv_logits(x) - x = self.conv_embedding(x) - return mask_pred, x - - @force_fp32(apply_to=('mask_pred', )) - def loss(self, mask_pred, labels): - labels = labels.squeeze(1).long() - loss_semantic_seg = self.criterion(mask_pred, labels) - return loss_semantic_seg diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/global_context_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/global_context_head.py deleted file mode 100644 index af76a174be4dadf603f82b44a64ce487c9c64ca7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/global_context_head.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, auto_fp16, force_fp32 - -from mmdet.models.builder import HEADS -from mmdet.models.utils import ResLayer, SimplifiedBasicBlock - - -@HEADS.register_module() -class GlobalContextHead(BaseModule): - """Global context head used in `SCNet `_. - - Args: - num_convs (int, optional): number of convolutional layer in GlbCtxHead. - Default: 4. - in_channels (int, optional): number of input channels. Default: 256. - conv_out_channels (int, optional): number of output channels before - classification layer. Default: 256. - num_classes (int, optional): number of classes. Default: 80. - loss_weight (float, optional): global context loss weight. Default: 1. - conv_cfg (dict, optional): config to init conv layer. Default: None. - norm_cfg (dict, optional): config to init norm layer. Default: None. - conv_to_res (bool, optional): if True, 2 convs will be grouped into - 1 `SimplifiedBasicBlock` using a skip connection. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_weight=1.0, - conv_cfg=None, - norm_cfg=None, - conv_to_res=False, - init_cfg=dict( - type='Normal', std=0.01, override=dict(name='fc'))): - super(GlobalContextHead, self).__init__(init_cfg) - self.num_convs = num_convs - self.in_channels = in_channels - self.conv_out_channels = conv_out_channels - self.num_classes = num_classes - self.loss_weight = loss_weight - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.conv_to_res = conv_to_res - self.fp16_enabled = False - - if self.conv_to_res: - num_res_blocks = num_convs // 2 - self.convs = ResLayer( - SimplifiedBasicBlock, - in_channels, - self.conv_out_channels, - num_res_blocks, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - self.num_convs = num_res_blocks - else: - self.convs = nn.ModuleList() - for i in range(self.num_convs): - in_channels = self.in_channels if i == 0 else conv_out_channels - self.convs.append( - ConvModule( - in_channels, - conv_out_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - - self.pool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Linear(conv_out_channels, num_classes) - - self.criterion = nn.BCEWithLogitsLoss() - - @auto_fp16() - def forward(self, feats): - """Forward function.""" - x = feats[-1] - for i in range(self.num_convs): - x = self.convs[i](x) - x = self.pool(x) - - # multi-class prediction - mc_pred = x.reshape(x.size(0), -1) - mc_pred = self.fc(mc_pred) - - return mc_pred, x - - @force_fp32(apply_to=('pred', )) - def loss(self, pred, labels): - """Loss function.""" - labels = [lbl.unique() for lbl in labels] - targets = pred.new_zeros(pred.size()) - for i, label in enumerate(labels): - targets[i, label] = 1.0 - loss = self.loss_weight * self.criterion(pred, targets) - return loss diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/grid_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/grid_head.py deleted file mode 100644 index 0c0702d2a3f8bb7f2292307b907260bdecf1a164..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/grid_head.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule - -from mmdet.models.builder import HEADS, build_loss - - -@HEADS.register_module() -class GridHead(BaseModule): - - def __init__(self, - grid_points=9, - num_convs=8, - roi_feat_size=14, - in_channels=256, - conv_kernel_size=3, - point_feat_channels=64, - deconv_kernel_size=4, - class_agnostic=False, - loss_grid=dict( - type='CrossEntropyLoss', use_sigmoid=True, - loss_weight=15), - conv_cfg=None, - norm_cfg=dict(type='GN', num_groups=36), - init_cfg=[ - dict(type='Kaiming', layer=['Conv2d', 'Linear']), - dict( - type='Normal', - layer='ConvTranspose2d', - std=0.001, - override=dict( - type='Normal', - name='deconv2', - std=0.001, - bias=-np.log(0.99 / 0.01))) - ]): - super(GridHead, self).__init__(init_cfg) - self.grid_points = grid_points - self.num_convs = num_convs - self.roi_feat_size = roi_feat_size - self.in_channels = in_channels - self.conv_kernel_size = conv_kernel_size - self.point_feat_channels = point_feat_channels - self.conv_out_channels = self.point_feat_channels * self.grid_points - self.class_agnostic = class_agnostic - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': - assert self.conv_out_channels % norm_cfg['num_groups'] == 0 - - assert self.grid_points >= 4 - self.grid_size = int(np.sqrt(self.grid_points)) - if self.grid_size * self.grid_size != self.grid_points: - raise ValueError('grid_points must be a square number') - - # the predicted heatmap is half of whole_map_size - if not isinstance(self.roi_feat_size, int): - raise ValueError('Only square RoIs are supporeted in Grid R-CNN') - self.whole_map_size = self.roi_feat_size * 4 - - # compute point-wise sub-regions - self.sub_regions = self.calc_sub_regions() - - self.convs = [] - for i in range(self.num_convs): - in_channels = ( - self.in_channels if i == 0 else self.conv_out_channels) - stride = 2 if i == 0 else 1 - padding = (self.conv_kernel_size - 1) // 2 - self.convs.append( - ConvModule( - in_channels, - self.conv_out_channels, - self.conv_kernel_size, - stride=stride, - padding=padding, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - bias=True)) - self.convs = nn.Sequential(*self.convs) - - self.deconv1 = nn.ConvTranspose2d( - self.conv_out_channels, - self.conv_out_channels, - kernel_size=deconv_kernel_size, - stride=2, - padding=(deconv_kernel_size - 2) // 2, - groups=grid_points) - self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) - self.deconv2 = nn.ConvTranspose2d( - self.conv_out_channels, - grid_points, - kernel_size=deconv_kernel_size, - stride=2, - padding=(deconv_kernel_size - 2) // 2, - groups=grid_points) - - # find the 4-neighbor of each grid point - self.neighbor_points = [] - grid_size = self.grid_size - for i in range(grid_size): # i-th column - for j in range(grid_size): # j-th row - neighbors = [] - if i > 0: # left: (i - 1, j) - neighbors.append((i - 1) * grid_size + j) - if j > 0: # up: (i, j - 1) - neighbors.append(i * grid_size + j - 1) - if j < grid_size - 1: # down: (i, j + 1) - neighbors.append(i * grid_size + j + 1) - if i < grid_size - 1: # right: (i + 1, j) - neighbors.append((i + 1) * grid_size + j) - self.neighbor_points.append(tuple(neighbors)) - # total edges in the grid - self.num_edges = sum([len(p) for p in self.neighbor_points]) - - self.forder_trans = nn.ModuleList() # first-order feature transition - self.sorder_trans = nn.ModuleList() # second-order feature transition - for neighbors in self.neighbor_points: - fo_trans = nn.ModuleList() - so_trans = nn.ModuleList() - for _ in range(len(neighbors)): - # each transition module consists of a 5x5 depth-wise conv and - # 1x1 conv. - fo_trans.append( - nn.Sequential( - nn.Conv2d( - self.point_feat_channels, - self.point_feat_channels, - 5, - stride=1, - padding=2, - groups=self.point_feat_channels), - nn.Conv2d(self.point_feat_channels, - self.point_feat_channels, 1))) - so_trans.append( - nn.Sequential( - nn.Conv2d( - self.point_feat_channels, - self.point_feat_channels, - 5, - 1, - 2, - groups=self.point_feat_channels), - nn.Conv2d(self.point_feat_channels, - self.point_feat_channels, 1))) - self.forder_trans.append(fo_trans) - self.sorder_trans.append(so_trans) - - self.loss_grid = build_loss(loss_grid) - - def forward(self, x): - assert x.shape[-1] == x.shape[-2] == self.roi_feat_size - # RoI feature transformation, downsample 2x - x = self.convs(x) - - c = self.point_feat_channels - # first-order fusion - x_fo = [None for _ in range(self.grid_points)] - for i, points in enumerate(self.neighbor_points): - x_fo[i] = x[:, i * c:(i + 1) * c] - for j, point_idx in enumerate(points): - x_fo[i] = x_fo[i] + self.forder_trans[i][j]( - x[:, point_idx * c:(point_idx + 1) * c]) - - # second-order fusion - x_so = [None for _ in range(self.grid_points)] - for i, points in enumerate(self.neighbor_points): - x_so[i] = x[:, i * c:(i + 1) * c] - for j, point_idx in enumerate(points): - x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) - - # predicted heatmap with fused features - x2 = torch.cat(x_so, dim=1) - x2 = self.deconv1(x2) - x2 = F.relu(self.norm1(x2), inplace=True) - heatmap = self.deconv2(x2) - - # predicted heatmap with original features (applicable during training) - if self.training: - x1 = x - x1 = self.deconv1(x1) - x1 = F.relu(self.norm1(x1), inplace=True) - heatmap_unfused = self.deconv2(x1) - else: - heatmap_unfused = heatmap - - return dict(fused=heatmap, unfused=heatmap_unfused) - - def calc_sub_regions(self): - """Compute point specific representation regions. - - See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details. - """ - # to make it consistent with the original implementation, half_size - # is computed as 2 * quarter_size, which is smaller - half_size = self.whole_map_size // 4 * 2 - sub_regions = [] - for i in range(self.grid_points): - x_idx = i // self.grid_size - y_idx = i % self.grid_size - if x_idx == 0: - sub_x1 = 0 - elif x_idx == self.grid_size - 1: - sub_x1 = half_size - else: - ratio = x_idx / (self.grid_size - 1) - 0.25 - sub_x1 = max(int(ratio * self.whole_map_size), 0) - - if y_idx == 0: - sub_y1 = 0 - elif y_idx == self.grid_size - 1: - sub_y1 = half_size - else: - ratio = y_idx / (self.grid_size - 1) - 0.25 - sub_y1 = max(int(ratio * self.whole_map_size), 0) - sub_regions.append( - (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) - return sub_regions - - def get_targets(self, sampling_results, rcnn_train_cfg): - # mix all samples (across images) together. - pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], - dim=0).cpu() - pos_gt_bboxes = torch.cat( - [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() - assert pos_bboxes.shape == pos_gt_bboxes.shape - - # expand pos_bboxes to 2x of original size - x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 - y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 - x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 - y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 - pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) - pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) - - num_rois = pos_bboxes.shape[0] - map_size = self.whole_map_size - # this is not the final target shape - targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), - dtype=torch.float) - - # pre-compute interpolation factors for all grid points. - # the first item is the factor of x-dim, and the second is y-dim. - # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) - factors = [] - for j in range(self.grid_points): - x_idx = j // self.grid_size - y_idx = j % self.grid_size - factors.append((1 - x_idx / (self.grid_size - 1), - 1 - y_idx / (self.grid_size - 1))) - - radius = rcnn_train_cfg.pos_radius - radius2 = radius**2 - for i in range(num_rois): - # ignore small bboxes - if (pos_bbox_ws[i] <= self.grid_size - or pos_bbox_hs[i] <= self.grid_size): - continue - # for each grid point, mark a small circle as positive - for j in range(self.grid_points): - factor_x, factor_y = factors[j] - gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( - 1 - factor_x) * pos_gt_bboxes[i, 2] - gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( - 1 - factor_y) * pos_gt_bboxes[i, 3] - - cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * - map_size) - cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * - map_size) - - for x in range(cx - radius, cx + radius + 1): - for y in range(cy - radius, cy + radius + 1): - if x >= 0 and x < map_size and y >= 0 and y < map_size: - if (x - cx)**2 + (y - cy)**2 <= radius2: - targets[i, j, y, x] = 1 - # reduce the target heatmap size by a half - # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). - sub_targets = [] - for i in range(self.grid_points): - sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] - sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) - sub_targets = torch.cat(sub_targets, dim=1) - sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device) - return sub_targets - - def loss(self, grid_pred, grid_targets): - loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) - loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) - loss_grid = loss_fused + loss_unfused - return dict(loss_grid=loss_grid) - - def get_bboxes(self, det_bboxes, grid_pred, img_metas): - # TODO: refactoring - assert det_bboxes.shape[0] == grid_pred.shape[0] - det_bboxes = det_bboxes.cpu() - cls_scores = det_bboxes[:, [4]] - det_bboxes = det_bboxes[:, :4] - grid_pred = grid_pred.sigmoid().cpu() - - R, c, h, w = grid_pred.shape - half_size = self.whole_map_size // 4 * 2 - assert h == w == half_size - assert c == self.grid_points - - # find the point with max scores in the half-sized heatmap - grid_pred = grid_pred.view(R * c, h * w) - pred_scores, pred_position = grid_pred.max(dim=1) - xs = pred_position % w - ys = pred_position // w - - # get the position in the whole heatmap instead of half-sized heatmap - for i in range(self.grid_points): - xs[i::self.grid_points] += self.sub_regions[i][0] - ys[i::self.grid_points] += self.sub_regions[i][1] - - # reshape to (num_rois, grid_points) - pred_scores, xs, ys = tuple( - map(lambda x: x.view(R, c), [pred_scores, xs, ys])) - - # get expanded pos_bboxes - widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1) - heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1) - x1 = (det_bboxes[:, 0, None] - widths / 2) - y1 = (det_bboxes[:, 1, None] - heights / 2) - # map the grid point to the absolute coordinates - abs_xs = (xs.float() + 0.5) / w * widths + x1 - abs_ys = (ys.float() + 0.5) / h * heights + y1 - - # get the grid points indices that fall on the bbox boundaries - x1_inds = [i for i in range(self.grid_size)] - y1_inds = [i * self.grid_size for i in range(self.grid_size)] - x2_inds = [ - self.grid_points - self.grid_size + i - for i in range(self.grid_size) - ] - y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] - - # voting of all grid points on some boundary - bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) - bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) - bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) - bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( - dim=1, keepdim=True) / ( - pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) - - bbox_res = torch.cat( - [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1) - bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1]) - bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0]) - - return bbox_res diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/htc_mask_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/htc_mask_head.py deleted file mode 100644 index 7ad8592b4c35e4d1c483fe6bc372ee1facb8fde2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/htc_mask_head.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.cnn import ConvModule - -from mmdet.models.builder import HEADS -from .fcn_mask_head import FCNMaskHead - - -@HEADS.register_module() -class HTCMaskHead(FCNMaskHead): - - def __init__(self, with_conv_res=True, *args, **kwargs): - super(HTCMaskHead, self).__init__(*args, **kwargs) - self.with_conv_res = with_conv_res - if self.with_conv_res: - self.conv_res = ConvModule( - self.conv_out_channels, - self.conv_out_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - - def forward(self, x, res_feat=None, return_logits=True, return_feat=True): - if res_feat is not None: - assert self.with_conv_res - res_feat = self.conv_res(res_feat) - x = x + res_feat - for conv in self.convs: - x = conv(x) - res_feat = x - outs = [] - if return_logits: - x = self.upsample(x) - if self.upsample_method == 'deconv': - x = self.relu(x) - mask_pred = self.conv_logits(x) - outs.append(mask_pred) - if return_feat: - outs.append(res_feat) - return outs if len(outs) > 1 else outs[0] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/mask_point_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/mask_point_head.py deleted file mode 100644 index c77c46d2c6fc73872535597068441cdb608e481c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/mask_point_head.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point -from mmcv.runner import BaseModule - -from mmdet.models.builder import HEADS, build_loss -from mmdet.models.utils import (get_uncertain_point_coords_with_randomness, - get_uncertainty) - - -@HEADS.register_module() -class MaskPointHead(BaseModule): - """A mask point head use in PointRend. - - ``MaskPointHead`` use shared multi-layer perceptron (equivalent to - nn.Conv1d) to predict the logit of input points. The fine-grained feature - and coarse feature will be concatenate together for predication. - - Args: - num_fcs (int): Number of fc layers in the head. Default: 3. - in_channels (int): Number of input channels. Default: 256. - fc_channels (int): Number of fc channels. Default: 256. - num_classes (int): Number of classes for logits. Default: 80. - class_agnostic (bool): Whether use class agnostic classification. - If so, the output channels of logits will be 1. Default: False. - coarse_pred_each_layer (bool): Whether concatenate coarse feature with - the output of each fc layer. Default: True. - conv_cfg (dict | None): Dictionary to construct and config conv layer. - Default: dict(type='Conv1d')) - norm_cfg (dict | None): Dictionary to construct and config norm layer. - Default: None. - loss_point (dict): Dictionary to construct and config loss layer of - point head. Default: dict(type='CrossEntropyLoss', use_mask=True, - loss_weight=1.0). - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_classes, - num_fcs=3, - in_channels=256, - fc_channels=256, - class_agnostic=False, - coarse_pred_each_layer=True, - conv_cfg=dict(type='Conv1d'), - norm_cfg=None, - act_cfg=dict(type='ReLU'), - loss_point=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0), - init_cfg=dict( - type='Normal', std=0.001, - override=dict(name='fc_logits'))): - super().__init__(init_cfg) - self.num_fcs = num_fcs - self.in_channels = in_channels - self.fc_channels = fc_channels - self.num_classes = num_classes - self.class_agnostic = class_agnostic - self.coarse_pred_each_layer = coarse_pred_each_layer - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.loss_point = build_loss(loss_point) - - fc_in_channels = in_channels + num_classes - self.fcs = nn.ModuleList() - for _ in range(num_fcs): - fc = ConvModule( - fc_in_channels, - fc_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.fcs.append(fc) - fc_in_channels = fc_channels - fc_in_channels += num_classes if self.coarse_pred_each_layer else 0 - - out_channels = 1 if self.class_agnostic else self.num_classes - self.fc_logits = nn.Conv1d( - fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0) - - def forward(self, fine_grained_feats, coarse_feats): - """Classify each point base on fine grained and coarse feats. - - Args: - fine_grained_feats (Tensor): Fine grained feature sampled from FPN, - shape (num_rois, in_channels, num_points). - coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead, - shape (num_rois, num_classes, num_points). - - Returns: - Tensor: Point classification results, - shape (num_rois, num_class, num_points). - """ - - x = torch.cat([fine_grained_feats, coarse_feats], dim=1) - for fc in self.fcs: - x = fc(x) - if self.coarse_pred_each_layer: - x = torch.cat((x, coarse_feats), dim=1) - return self.fc_logits(x) - - def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks, - cfg): - """Get training targets of MaskPointHead for all images. - - Args: - rois (Tensor): Region of Interest, shape (num_rois, 5). - rel_roi_points: Points coordinates relative to RoI, shape - (num_rois, num_points, 2). - sampling_results (:obj:`SamplingResult`): Sampling result after - sampling and assignment. - gt_masks (Tensor) : Ground truth segmentation masks of - corresponding boxes, shape (num_rois, height, width). - cfg (dict): Training cfg. - - Returns: - Tensor: Point target, shape (num_rois, num_points). - """ - - num_imgs = len(sampling_results) - rois_list = [] - rel_roi_points_list = [] - for batch_ind in range(num_imgs): - inds = (rois[:, 0] == batch_ind) - rois_list.append(rois[inds]) - rel_roi_points_list.append(rel_roi_points[inds]) - pos_assigned_gt_inds_list = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - cfg_list = [cfg for _ in range(num_imgs)] - - point_targets = map(self._get_target_single, rois_list, - rel_roi_points_list, pos_assigned_gt_inds_list, - gt_masks, cfg_list) - point_targets = list(point_targets) - - if len(point_targets) > 0: - point_targets = torch.cat(point_targets) - - return point_targets - - def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds, - gt_masks, cfg): - """Get training target of MaskPointHead for each image.""" - num_pos = rois.size(0) - num_points = cfg.num_points - if num_pos > 0: - gt_masks_th = ( - gt_masks.to_tensor(rois.dtype, rois.device).index_select( - 0, pos_assigned_gt_inds)) - gt_masks_th = gt_masks_th.unsqueeze(1) - rel_img_points = rel_roi_point_to_rel_img_point( - rois, rel_roi_points, gt_masks_th) - point_targets = point_sample(gt_masks_th, - rel_img_points).squeeze(1) - else: - point_targets = rois.new_zeros((0, num_points)) - return point_targets - - def loss(self, point_pred, point_targets, labels): - """Calculate loss for MaskPointHead. - - Args: - point_pred (Tensor): Point predication result, shape - (num_rois, num_classes, num_points). - point_targets (Tensor): Point targets, shape (num_roi, num_points). - labels (Tensor): Class label of corresponding boxes, - shape (num_rois, ) - - Returns: - dict[str, Tensor]: a dictionary of point loss components - """ - - loss = dict() - if self.class_agnostic: - loss_point = self.loss_point(point_pred, point_targets, - torch.zeros_like(labels)) - else: - loss_point = self.loss_point(point_pred, point_targets, labels) - loss['loss_point'] = loss_point - return loss - - def get_roi_rel_points_train(self, mask_pred, labels, cfg): - """Get ``num_points`` most uncertain points with random points during - train. - - Sample points in [0, 1] x [0, 1] coordinate space based on their - uncertainty. The uncertainties are calculated for each point using - '_get_uncertainty()' function that takes point's logit prediction as - input. - - Args: - mask_pred (Tensor): A tensor of shape (num_rois, num_classes, - mask_height, mask_width) for class-specific or class-agnostic - prediction. - labels (list): The ground truth class for each instance. - cfg (dict): Training config of point head. - - Returns: - point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) - that contains the coordinates sampled points. - """ - point_coords = get_uncertain_point_coords_with_randomness( - mask_pred, labels, cfg.num_points, cfg.oversample_ratio, - cfg.importance_sample_ratio) - return point_coords - - def get_roi_rel_points_test(self, mask_pred, pred_label, cfg): - """Get ``num_points`` most uncertain points during test. - - Args: - mask_pred (Tensor): A tensor of shape (num_rois, num_classes, - mask_height, mask_width) for class-specific or class-agnostic - prediction. - pred_label (list): The predication class for each instance. - cfg (dict): Testing config of point head. - - Returns: - point_indices (Tensor): A tensor of shape (num_rois, num_points) - that contains indices from [0, mask_height x mask_width) of the - most uncertain points. - point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) - that contains [0, 1] x [0, 1] normalized coordinates of the - most uncertain points from the [mask_height, mask_width] grid . - """ - num_points = cfg.subdivision_num_points - uncertainty_map = get_uncertainty(mask_pred, pred_label) - num_rois, _, mask_height, mask_width = uncertainty_map.shape - - # During ONNX exporting, the type of each elements of 'shape' is - # `Tensor(float)`, while it is `float` during PyTorch inference. - if isinstance(mask_height, torch.Tensor): - h_step = 1.0 / mask_height.float() - w_step = 1.0 / mask_width.float() - else: - h_step = 1.0 / mask_height - w_step = 1.0 / mask_width - # cast to int to avoid dynamic K for TopK op in ONNX - mask_size = int(mask_height * mask_width) - uncertainty_map = uncertainty_map.view(num_rois, mask_size) - num_points = min(mask_size, num_points) - point_indices = uncertainty_map.topk(num_points, dim=1)[1] - xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step - ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step - point_coords = torch.stack([xs, ys], dim=2) - return point_indices, point_coords diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/maskiou_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/maskiou_head.py deleted file mode 100644 index a7ff7c7c4e70bd3c033731f9bc0bf40ca74a4bba..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/maskiou_head.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import Conv2d, Linear, MaxPool2d -from mmcv.runner import BaseModule, force_fp32 -from torch.nn.modules.utils import _pair - -from mmdet.models.builder import HEADS, build_loss - - -@HEADS.register_module() -class MaskIoUHead(BaseModule): - """Mask IoU Head. - - This head predicts the IoU of predicted masks and corresponding gt masks. - """ - - def __init__(self, - num_convs=4, - num_fcs=2, - roi_feat_size=14, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - num_classes=80, - loss_iou=dict(type='MSELoss', loss_weight=0.5), - init_cfg=[ - dict(type='Kaiming', override=dict(name='convs')), - dict(type='Caffe2Xavier', override=dict(name='fcs')), - dict( - type='Normal', - std=0.01, - override=dict(name='fc_mask_iou')) - ]): - super(MaskIoUHead, self).__init__(init_cfg) - self.in_channels = in_channels - self.conv_out_channels = conv_out_channels - self.fc_out_channels = fc_out_channels - self.num_classes = num_classes - self.fp16_enabled = False - - self.convs = nn.ModuleList() - for i in range(num_convs): - if i == 0: - # concatenation of mask feature and mask prediction - in_channels = self.in_channels + 1 - else: - in_channels = self.conv_out_channels - stride = 2 if i == num_convs - 1 else 1 - self.convs.append( - Conv2d( - in_channels, - self.conv_out_channels, - 3, - stride=stride, - padding=1)) - - roi_feat_size = _pair(roi_feat_size) - pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2) - self.fcs = nn.ModuleList() - for i in range(num_fcs): - in_channels = ( - self.conv_out_channels * - pooled_area if i == 0 else self.fc_out_channels) - self.fcs.append(Linear(in_channels, self.fc_out_channels)) - - self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes) - self.relu = nn.ReLU() - self.max_pool = MaxPool2d(2, 2) - self.loss_iou = build_loss(loss_iou) - - def forward(self, mask_feat, mask_pred): - mask_pred = mask_pred.sigmoid() - mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1)) - - x = torch.cat((mask_feat, mask_pred_pooled), 1) - - for conv in self.convs: - x = self.relu(conv(x)) - x = x.flatten(1) - for fc in self.fcs: - x = self.relu(fc(x)) - mask_iou = self.fc_mask_iou(x) - return mask_iou - - @force_fp32(apply_to=('mask_iou_pred', )) - def loss(self, mask_iou_pred, mask_iou_targets): - pos_inds = mask_iou_targets > 0 - if pos_inds.sum() > 0: - loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds], - mask_iou_targets[pos_inds]) - else: - loss_mask_iou = mask_iou_pred.sum() * 0 - return dict(loss_mask_iou=loss_mask_iou) - - @force_fp32(apply_to=('mask_pred', )) - def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets, - rcnn_train_cfg): - """Compute target of mask IoU. - - Mask IoU target is the IoU of the predicted mask (inside a bbox) and - the gt mask of corresponding gt mask (the whole instance). - The intersection area is computed inside the bbox, and the gt mask area - is computed with two steps, firstly we compute the gt area inside the - bbox, then divide it by the area ratio of gt area inside the bbox and - the gt area of the whole instance. - - Args: - sampling_results (list[:obj:`SamplingResult`]): sampling results. - gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance) - of each image, with the same shape of the input image. - mask_pred (Tensor): Predicted masks of each positive proposal, - shape (num_pos, h, w). - mask_targets (Tensor): Gt mask of each positive proposal, - binary map of the shape (num_pos, h, w). - rcnn_train_cfg (dict): Training config for R-CNN part. - - Returns: - Tensor: mask iou target (length == num positive). - """ - pos_proposals = [res.pos_bboxes for res in sampling_results] - pos_assigned_gt_inds = [ - res.pos_assigned_gt_inds for res in sampling_results - ] - - # compute the area ratio of gt areas inside the proposals and - # the whole instance - area_ratios = map(self._get_area_ratio, pos_proposals, - pos_assigned_gt_inds, gt_masks) - area_ratios = torch.cat(list(area_ratios)) - assert mask_targets.size(0) == area_ratios.size(0) - - mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float() - mask_pred_areas = mask_pred.sum((-1, -2)) - - # mask_pred and mask_targets are binary maps - overlap_areas = (mask_pred * mask_targets).sum((-1, -2)) - - # compute the mask area of the whole instance - gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7) - - mask_iou_targets = overlap_areas / ( - mask_pred_areas + gt_full_areas - overlap_areas) - return mask_iou_targets - - def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): - """Compute area ratio of the gt mask inside the proposal and the gt - mask of the corresponding instance.""" - num_pos = pos_proposals.size(0) - if num_pos > 0: - area_ratios = [] - proposals_np = pos_proposals.cpu().numpy() - pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() - # compute mask areas of gt instances (batch processing for speedup) - gt_instance_mask_area = gt_masks.areas - for i in range(num_pos): - gt_mask = gt_masks[pos_assigned_gt_inds[i]] - - # crop the gt mask inside the proposal - bbox = proposals_np[i, :].astype(np.int32) - gt_mask_in_proposal = gt_mask.crop(bbox) - - ratio = gt_mask_in_proposal.areas[0] / ( - gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) - area_ratios.append(ratio) - area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( - pos_proposals.device) - else: - area_ratios = pos_proposals.new_zeros((0, )) - return area_ratios - - @force_fp32(apply_to=('mask_iou_pred', )) - def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels): - """Get the mask scores. - - mask_score = bbox_score * mask_iou - """ - inds = range(det_labels.size(0)) - mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1] - mask_scores = mask_scores.cpu().numpy() - det_labels = det_labels.cpu().numpy() - return [mask_scores[det_labels == i] for i in range(self.num_classes)] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py deleted file mode 100644 index ca62486615a3c99fe09f4c71758b4dd01dc2fc3a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmdet.models.builder import HEADS -from mmdet.models.utils import ResLayer, SimplifiedBasicBlock -from .fcn_mask_head import FCNMaskHead - - -@HEADS.register_module() -class SCNetMaskHead(FCNMaskHead): - """Mask head for `SCNet `_. - - Args: - conv_to_res (bool, optional): if True, change the conv layers to - ``SimplifiedBasicBlock``. - """ - - def __init__(self, conv_to_res=True, **kwargs): - super(SCNetMaskHead, self).__init__(**kwargs) - self.conv_to_res = conv_to_res - if conv_to_res: - assert self.conv_kernel_size == 3 - self.num_res_blocks = self.num_convs // 2 - self.convs = ResLayer( - SimplifiedBasicBlock, - self.in_channels, - self.conv_out_channels, - self.num_res_blocks, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py deleted file mode 100644 index 2b8c5c32bbb7604426d774674eb9fecb51e1d789..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmdet.models.builder import HEADS -from mmdet.models.utils import ResLayer, SimplifiedBasicBlock -from .fused_semantic_head import FusedSemanticHead - - -@HEADS.register_module() -class SCNetSemanticHead(FusedSemanticHead): - """Mask head for `SCNet `_. - - Args: - conv_to_res (bool, optional): if True, change the conv layers to - ``SimplifiedBasicBlock``. - """ - - def __init__(self, conv_to_res=True, **kwargs): - super(SCNetSemanticHead, self).__init__(**kwargs) - self.conv_to_res = conv_to_res - if self.conv_to_res: - num_res_blocks = self.num_convs // 2 - self.convs = ResLayer( - SimplifiedBasicBlock, - self.in_channels, - self.conv_out_channels, - num_res_blocks, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg) - self.num_convs = num_res_blocks diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_scoring_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_scoring_roi_head.py deleted file mode 100644 index 4617988e30abebe9ede13e04dda72632724ce159..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/mask_scoring_roi_head.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core import bbox2roi -from ..builder import HEADS, build_head -from .standard_roi_head import StandardRoIHead - - -@HEADS.register_module() -class MaskScoringRoIHead(StandardRoIHead): - """Mask Scoring RoIHead for Mask Scoring RCNN. - - https://arxiv.org/abs/1903.00241 - """ - - def __init__(self, mask_iou_head, **kwargs): - assert mask_iou_head is not None - super(MaskScoringRoIHead, self).__init__(**kwargs) - self.mask_iou_head = build_head(mask_iou_head) - - def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, - img_metas): - """Run forward function and calculate loss for Mask head in - training.""" - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - mask_results = super(MaskScoringRoIHead, - self)._mask_forward_train(x, sampling_results, - bbox_feats, gt_masks, - img_metas) - if mask_results['loss_mask'] is None: - return mask_results - - # mask iou head forward and loss - pos_mask_pred = mask_results['mask_pred'][ - range(mask_results['mask_pred'].size(0)), pos_labels] - mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'], - pos_mask_pred) - pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), - pos_labels] - - mask_iou_targets = self.mask_iou_head.get_targets( - sampling_results, gt_masks, pos_mask_pred, - mask_results['mask_targets'], self.train_cfg) - loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred, - mask_iou_targets) - mask_results['loss_mask'].update(loss_mask_iou) - return mask_results - - def simple_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False): - """Obtain mask prediction without augmentation.""" - # image shapes of images in the batch - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - num_imgs = len(det_bboxes) - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - num_classes = self.mask_head.num_classes - segm_results = [[[] for _ in range(num_classes)] - for _ in range(num_imgs)] - mask_scores = [[[] for _ in range(num_classes)] - for _ in range(num_imgs)] - else: - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i] - for i in range(num_imgs) - ] - mask_rois = bbox2roi(_bboxes) - mask_results = self._mask_forward(x, mask_rois) - concat_det_labels = torch.cat(det_labels) - # get mask scores with mask iou head - mask_feats = mask_results['mask_feats'] - mask_pred = mask_results['mask_pred'] - mask_iou_pred = self.mask_iou_head( - mask_feats, mask_pred[range(concat_det_labels.size(0)), - concat_det_labels]) - # split batch mask prediction back to each image - num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes) - mask_preds = mask_pred.split(num_bboxes_per_img, 0) - mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0) - - # apply mask post-processing to each image individually - segm_results = [] - mask_scores = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append( - [[] for _ in range(self.mask_head.num_classes)]) - mask_scores.append( - [[] for _ in range(self.mask_head.num_classes)]) - else: - segm_result = self.mask_head.get_seg_masks( - mask_preds[i], _bboxes[i], det_labels[i], - self.test_cfg, ori_shapes[i], scale_factors[i], - rescale) - # get mask scores with mask iou head - mask_score = self.mask_iou_head.get_mask_scores( - mask_iou_preds[i], det_bboxes[i], det_labels[i]) - segm_results.append(segm_result) - mask_scores.append(mask_score) - return list(zip(segm_results, mask_scores)) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/pisa_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/pisa_roi_head.py deleted file mode 100644 index 92a51186e28bf25ba71474536fc211037999d0f8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/pisa_roi_head.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmdet.core import bbox2roi -from ..builder import HEADS -from ..losses.pisa_loss import carl_loss, isr_p -from .standard_roi_head import StandardRoIHead - - -@HEADS.register_module() -class PISARoIHead(StandardRoIHead): - r"""The RoI head for `Prime Sample Attention in Object Detection - `_.""" - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None): - """Forward function for training. - - Args: - x (list[Tensor]): List of multi-level img features. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - proposals (list[Tensors]): List of region proposals. - gt_bboxes (list[Tensor]): Each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): Class indices corresponding to each box - gt_bboxes_ignore (list[Tensor], optional): Specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None | Tensor) : True segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # assign gts and sample proposals - if self.with_bbox or self.with_mask: - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - sampling_results = [] - neg_label_weights = [] - for i in range(num_imgs): - assign_result = self.bbox_assigner.assign( - proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], - gt_labels[i]) - sampling_result = self.bbox_sampler.sample( - assign_result, - proposal_list[i], - gt_bboxes[i], - gt_labels[i], - feats=[lvl_feat[i][None] for lvl_feat in x]) - # neg label weight is obtained by sampling when using ISR-N - neg_label_weight = None - if isinstance(sampling_result, tuple): - sampling_result, neg_label_weight = sampling_result - sampling_results.append(sampling_result) - neg_label_weights.append(neg_label_weight) - - losses = dict() - # bbox head forward and loss - if self.with_bbox: - bbox_results = self._bbox_forward_train( - x, - sampling_results, - gt_bboxes, - gt_labels, - img_metas, - neg_label_weights=neg_label_weights) - losses.update(bbox_results['loss_bbox']) - - # mask head forward and loss - if self.with_mask: - mask_results = self._mask_forward_train(x, sampling_results, - bbox_results['bbox_feats'], - gt_masks, img_metas) - losses.update(mask_results['loss_mask']) - - return losses - - def _bbox_forward(self, x, rois): - """Box forward function used in both training and testing.""" - # TODO: a more flexible way to decide which feature maps to use - bbox_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], rois) - if self.with_shared_head: - bbox_feats = self.shared_head(bbox_feats) - cls_score, bbox_pred = self.bbox_head(bbox_feats) - - bbox_results = dict( - cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) - return bbox_results - - def _bbox_forward_train(self, - x, - sampling_results, - gt_bboxes, - gt_labels, - img_metas, - neg_label_weights=None): - """Run forward function and calculate loss for box head in training.""" - rois = bbox2roi([res.bboxes for res in sampling_results]) - - bbox_results = self._bbox_forward(x, rois) - - bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, self.train_cfg) - - # neg_label_weights obtained by sampler is image-wise, mapping back to - # the corresponding location in label weights - if neg_label_weights[0] is not None: - label_weights = bbox_targets[1] - cur_num_rois = 0 - for i in range(len(sampling_results)): - num_pos = sampling_results[i].pos_inds.size(0) - num_neg = sampling_results[i].neg_inds.size(0) - label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos + - num_neg] = neg_label_weights[i] - cur_num_rois += num_pos + num_neg - - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - - # Apply ISR-P - isr_cfg = self.train_cfg.get('isr', None) - if isr_cfg is not None: - bbox_targets = isr_p( - cls_score, - bbox_pred, - bbox_targets, - rois, - sampling_results, - self.bbox_head.loss_cls, - self.bbox_head.bbox_coder, - **isr_cfg, - num_class=self.bbox_head.num_classes) - loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois, - *bbox_targets) - - # Add CARL Loss - carl_cfg = self.train_cfg.get('carl', None) - if carl_cfg is not None: - loss_carl = carl_loss( - cls_score, - bbox_targets[0], - bbox_pred, - bbox_targets[2], - self.bbox_head.loss_bbox, - **carl_cfg, - num_class=self.bbox_head.num_classes) - loss_bbox.update(loss_carl) - - bbox_results.update(loss_bbox=loss_bbox) - return bbox_results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/point_rend_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/point_rend_roi_head.py deleted file mode 100644 index 9f667793f48abd948592d1c0f50f8975ae2c4b89..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/point_rend_roi_head.py +++ /dev/null @@ -1,393 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa -import os -import warnings - -import numpy as np -import torch -import torch.nn.functional as F -from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point - -from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks -from .. import builder -from ..builder import HEADS -from .standard_roi_head import StandardRoIHead - - -@HEADS.register_module() -class PointRendRoIHead(StandardRoIHead): - """`PointRend `_.""" - - def __init__(self, point_head, *args, **kwargs): - super().__init__(*args, **kwargs) - assert self.with_bbox and self.with_mask - self.init_point_head(point_head) - - def init_point_head(self, point_head): - """Initialize ``point_head``""" - self.point_head = builder.build_head(point_head) - - def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, - img_metas): - """Run forward function and calculate loss for mask head and point head - in training.""" - mask_results = super()._mask_forward_train(x, sampling_results, - bbox_feats, gt_masks, - img_metas) - if mask_results['loss_mask'] is not None: - loss_point = self._mask_point_forward_train( - x, sampling_results, mask_results['mask_pred'], gt_masks, - img_metas) - mask_results['loss_mask'].update(loss_point) - - return mask_results - - def _mask_point_forward_train(self, x, sampling_results, mask_pred, - gt_masks, img_metas): - """Run forward function and calculate loss for point head in - training.""" - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - rel_roi_points = self.point_head.get_roi_rel_points_train( - mask_pred, pos_labels, cfg=self.train_cfg) - rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - - fine_grained_point_feats = self._get_fine_grained_point_feats( - x, rois, rel_roi_points, img_metas) - coarse_point_feats = point_sample(mask_pred, rel_roi_points) - mask_point_pred = self.point_head(fine_grained_point_feats, - coarse_point_feats) - mask_point_target = self.point_head.get_targets( - rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg) - loss_mask_point = self.point_head.loss(mask_point_pred, - mask_point_target, pos_labels) - - return loss_mask_point - - def _get_fine_grained_point_feats(self, x, rois, rel_roi_points, - img_metas): - """Sample fine grained feats from each level feature map and - concatenate them together. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - rois (Tensor): shape (num_rois, 5). - rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, - 2) that contains [0, 1] x [0, 1] normalized coordinates of the - most uncertain points from the [mask_height, mask_width] grid. - img_metas (list[dict]): Image meta info. - - Returns: - Tensor: The fine grained features for each points, - has shape (num_rois, feats_channels, num_points). - """ - num_imgs = len(img_metas) - fine_grained_feats = [] - for idx in range(self.mask_roi_extractor.num_inputs): - feats = x[idx] - spatial_scale = 1. / float( - self.mask_roi_extractor.featmap_strides[idx]) - point_feats = [] - for batch_ind in range(num_imgs): - # unravel batch dim - feat = feats[batch_ind].unsqueeze(0) - inds = (rois[:, 0].long() == batch_ind) - if inds.any(): - rel_img_points = rel_roi_point_to_rel_img_point( - rois[inds], rel_roi_points[inds], feat.shape[2:], - spatial_scale).unsqueeze(0) - point_feat = point_sample(feat, rel_img_points) - point_feat = point_feat.squeeze(0).transpose(0, 1) - point_feats.append(point_feat) - fine_grained_feats.append(torch.cat(point_feats, dim=0)) - return torch.cat(fine_grained_feats, dim=1) - - def _mask_point_forward_test(self, x, rois, label_pred, mask_pred, - img_metas): - """Mask refining process with point head in testing. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - rois (Tensor): shape (num_rois, 5). - label_pred (Tensor): The predication class for each rois. - mask_pred (Tensor): The predication coarse masks of - shape (num_rois, num_classes, small_size, small_size). - img_metas (list[dict]): Image meta info. - - Returns: - Tensor: The refined masks of shape (num_rois, num_classes, - large_size, large_size). - """ - refined_mask_pred = mask_pred.clone() - for subdivision_step in range(self.test_cfg.subdivision_steps): - refined_mask_pred = F.interpolate( - refined_mask_pred, - scale_factor=self.test_cfg.scale_factor, - mode='bilinear', - align_corners=False) - # If `subdivision_num_points` is larger or equal to the - # resolution of the next step, then we can skip this step - num_rois, channels, mask_height, mask_width = \ - refined_mask_pred.shape - if (self.test_cfg.subdivision_num_points >= - self.test_cfg.scale_factor**2 * mask_height * mask_width - and - subdivision_step < self.test_cfg.subdivision_steps - 1): - continue - point_indices, rel_roi_points = \ - self.point_head.get_roi_rel_points_test( - refined_mask_pred, label_pred, cfg=self.test_cfg) - fine_grained_point_feats = self._get_fine_grained_point_feats( - x, rois, rel_roi_points, img_metas) - coarse_point_feats = point_sample(mask_pred, rel_roi_points) - mask_point_pred = self.point_head(fine_grained_point_feats, - coarse_point_feats) - - point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) - refined_mask_pred = refined_mask_pred.reshape( - num_rois, channels, mask_height * mask_width) - refined_mask_pred = refined_mask_pred.scatter_( - 2, point_indices, mask_point_pred) - refined_mask_pred = refined_mask_pred.view(num_rois, channels, - mask_height, mask_width) - - return refined_mask_pred - - def simple_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False): - """Obtain mask prediction without augmentation.""" - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - if isinstance(scale_factors[0], float): - warnings.warn( - 'Scale factor in img_metas should be a ' - 'ndarray with shape (4,) ' - 'arrange as (factor_w, factor_h, factor_w, factor_h), ' - 'The scale_factor with float type has been deprecated. ') - scale_factors = np.array([scale_factors] * 4, dtype=np.float32) - - num_imgs = len(det_bboxes) - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - segm_results = [[[] for _ in range(self.mask_head.num_classes)] - for _ in range(num_imgs)] - else: - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))] - if rescale: - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - _bboxes[i] * scale_factors[i] for i in range(len(_bboxes)) - ] - - mask_rois = bbox2roi(_bboxes) - mask_results = self._mask_forward(x, mask_rois) - # split batch mask prediction back to each image - mask_pred = mask_results['mask_pred'] - num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] - mask_preds = mask_pred.split(num_mask_roi_per_img, 0) - mask_rois = mask_rois.split(num_mask_roi_per_img, 0) - - # apply mask post-processing to each image individually - segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append( - [[] for _ in range(self.mask_head.num_classes)]) - else: - x_i = [xx[[i]] for xx in x] - mask_rois_i = mask_rois[i] - mask_rois_i[:, 0] = 0 # TODO: remove this hack - mask_pred_i = self._mask_point_forward_test( - x_i, mask_rois_i, det_labels[i], mask_preds[i], - [img_metas]) - segm_result = self.mask_head.get_seg_masks( - mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg, - ori_shapes[i], scale_factors[i], rescale) - segm_results.append(segm_result) - return segm_results - - def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): - """Test for mask head with test time augmentation.""" - if det_bboxes.shape[0] == 0: - segm_result = [[] for _ in range(self.mask_head.num_classes)] - else: - aug_masks = [] - for x, img_meta in zip(feats, img_metas): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip) - mask_rois = bbox2roi([_bboxes]) - mask_results = self._mask_forward(x, mask_rois) - mask_results['mask_pred'] = self._mask_point_forward_test( - x, mask_rois, det_labels, mask_results['mask_pred'], - img_meta) - # convert to numpy array to save memory - aug_masks.append( - mask_results['mask_pred'].sigmoid().cpu().numpy()) - merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) - - ori_shape = img_metas[0][0]['ori_shape'] - segm_result = self.mask_head.get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - self.test_cfg, - ori_shape, - scale_factor=1.0, - rescale=False) - return segm_result - - def _onnx_get_fine_grained_point_feats(self, x, rois, rel_roi_points): - """Export the process of sampling fine grained feats to onnx. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - rois (Tensor): shape (num_rois, 5). - rel_roi_points (Tensor): A tensor of shape (num_rois, num_points, - 2) that contains [0, 1] x [0, 1] normalized coordinates of the - most uncertain points from the [mask_height, mask_width] grid. - - Returns: - Tensor: The fine grained features for each points, - has shape (num_rois, feats_channels, num_points). - """ - batch_size = x[0].shape[0] - num_rois = rois.shape[0] - fine_grained_feats = [] - for idx in range(self.mask_roi_extractor.num_inputs): - feats = x[idx] - spatial_scale = 1. / float( - self.mask_roi_extractor.featmap_strides[idx]) - - rel_img_points = rel_roi_point_to_rel_img_point( - rois, rel_roi_points, feats, spatial_scale) - channels = feats.shape[1] - num_points = rel_img_points.shape[1] - rel_img_points = rel_img_points.reshape(batch_size, -1, num_points, - 2) - point_feats = point_sample(feats, rel_img_points) - point_feats = point_feats.transpose(1, 2).reshape( - num_rois, channels, num_points) - fine_grained_feats.append(point_feats) - return torch.cat(fine_grained_feats, dim=1) - - def _mask_point_onnx_export(self, x, rois, label_pred, mask_pred): - """Export mask refining process with point head to onnx. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - rois (Tensor): shape (num_rois, 5). - label_pred (Tensor): The predication class for each rois. - mask_pred (Tensor): The predication coarse masks of - shape (num_rois, num_classes, small_size, small_size). - - Returns: - Tensor: The refined masks of shape (num_rois, num_classes, - large_size, large_size). - """ - refined_mask_pred = mask_pred.clone() - for subdivision_step in range(self.test_cfg.subdivision_steps): - refined_mask_pred = F.interpolate( - refined_mask_pred, - scale_factor=self.test_cfg.scale_factor, - mode='bilinear', - align_corners=False) - # If `subdivision_num_points` is larger or equal to the - # resolution of the next step, then we can skip this step - num_rois, channels, mask_height, mask_width = \ - refined_mask_pred.shape - if (self.test_cfg.subdivision_num_points >= - self.test_cfg.scale_factor**2 * mask_height * mask_width - and - subdivision_step < self.test_cfg.subdivision_steps - 1): - continue - point_indices, rel_roi_points = \ - self.point_head.get_roi_rel_points_test( - refined_mask_pred, label_pred, cfg=self.test_cfg) - fine_grained_point_feats = self._onnx_get_fine_grained_point_feats( - x, rois, rel_roi_points) - coarse_point_feats = point_sample(mask_pred, rel_roi_points) - mask_point_pred = self.point_head(fine_grained_point_feats, - coarse_point_feats) - - point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) - refined_mask_pred = refined_mask_pred.reshape( - num_rois, channels, mask_height * mask_width) - - is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' - # avoid ScatterElements op in ONNX for TensorRT - if is_trt_backend: - mask_shape = refined_mask_pred.shape - point_shape = point_indices.shape - inds_dim0 = torch.arange(point_shape[0]).reshape( - point_shape[0], 1, 1).expand_as(point_indices) - inds_dim1 = torch.arange(point_shape[1]).reshape( - 1, point_shape[1], 1).expand_as(point_indices) - inds_1d = inds_dim0.reshape( - -1) * mask_shape[1] * mask_shape[2] + inds_dim1.reshape( - -1) * mask_shape[2] + point_indices.reshape(-1) - refined_mask_pred = refined_mask_pred.reshape(-1) - refined_mask_pred[inds_1d] = mask_point_pred.reshape(-1) - refined_mask_pred = refined_mask_pred.reshape(*mask_shape) - else: - refined_mask_pred = refined_mask_pred.scatter_( - 2, point_indices, mask_point_pred) - - refined_mask_pred = refined_mask_pred.view(num_rois, channels, - mask_height, mask_width) - - return refined_mask_pred - - def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): - """Export mask branch to onnx which supports batch inference. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - img_metas (list[dict]): Image meta info. - det_bboxes (Tensor): Bboxes and corresponding scores. - has shape [N, num_bboxes, 5]. - det_labels (Tensor): class labels of - shape [N, num_bboxes]. - - Returns: - Tensor: The segmentation results of shape [N, num_bboxes, - image_height, image_width]. - """ - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - raise RuntimeError('[ONNX Error] Can not record MaskHead ' - 'as it has not been executed this time') - batch_size = det_bboxes.size(0) - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - det_bboxes = det_bboxes[..., :4] - batch_index = torch.arange( - det_bboxes.size(0), device=det_bboxes.device).float().view( - -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) - mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) - mask_rois = mask_rois.view(-1, 5) - mask_results = self._mask_forward(x, mask_rois) - mask_pred = mask_results['mask_pred'] - max_shape = img_metas[0]['img_shape_for_onnx'] - num_det = det_bboxes.shape[1] - det_bboxes = det_bboxes.reshape(-1, 4) - det_labels = det_labels.reshape(-1) - - mask_pred = self._mask_point_onnx_export(x, mask_rois, det_labels, - mask_pred) - - segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, - det_labels, self.test_cfg, - max_shape) - segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], - max_shape[1]) - return segm_results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/__init__.py deleted file mode 100644 index 0f60214991b0ed14cdbc3964aee15356c6aaf2aa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_roi_extractor import BaseRoIExtractor -from .generic_roi_extractor import GenericRoIExtractor -from .single_level_roi_extractor import SingleRoIExtractor - -__all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor'] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py deleted file mode 100644 index 82629757decc4bc4c374369641f4b742abd47c12..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -import torch -import torch.nn as nn -from mmcv import ops -from mmcv.runner import BaseModule - - -class BaseRoIExtractor(BaseModule, metaclass=ABCMeta): - """Base class for RoI extractor. - - Args: - roi_layer (dict): Specify RoI layer type and arguments. - out_channels (int): Output channels of RoI layers. - featmap_strides (int): Strides of input feature maps. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - roi_layer, - out_channels, - featmap_strides, - init_cfg=None): - super(BaseRoIExtractor, self).__init__(init_cfg) - self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) - self.out_channels = out_channels - self.featmap_strides = featmap_strides - self.fp16_enabled = False - - @property - def num_inputs(self): - """int: Number of input feature maps.""" - return len(self.featmap_strides) - - def build_roi_layers(self, layer_cfg, featmap_strides): - """Build RoI operator to extract feature from each level feature map. - - Args: - layer_cfg (dict): Dictionary to construct and config RoI layer - operation. Options are modules under ``mmcv/ops`` such as - ``RoIAlign``. - featmap_strides (List[int]): The stride of input feature map w.r.t - to the original image size, which would be used to scale RoI - coordinate (original image coordinate system) to feature - coordinate system. - - Returns: - nn.ModuleList: The RoI extractor modules for each level feature - map. - """ - - cfg = layer_cfg.copy() - layer_type = cfg.pop('type') - assert hasattr(ops, layer_type) - layer_cls = getattr(ops, layer_type) - roi_layers = nn.ModuleList( - [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) - return roi_layers - - def roi_rescale(self, rois, scale_factor): - """Scale RoI coordinates by scale factor. - - Args: - rois (torch.Tensor): RoI (Region of Interest), shape (n, 5) - scale_factor (float): Scale factor that RoI will be multiplied by. - - Returns: - torch.Tensor: Scaled RoI. - """ - - cx = (rois[:, 1] + rois[:, 3]) * 0.5 - cy = (rois[:, 2] + rois[:, 4]) * 0.5 - w = rois[:, 3] - rois[:, 1] - h = rois[:, 4] - rois[:, 2] - new_w = w * scale_factor - new_h = h * scale_factor - x1 = cx - new_w * 0.5 - x2 = cx + new_w * 0.5 - y1 = cy - new_h * 0.5 - y2 = cy + new_h * 0.5 - new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) - return new_rois - - @abstractmethod - def forward(self, feats, rois, roi_scale_factor=None): - pass diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py deleted file mode 100644 index 89a9f891e1e5aa52d85531dc62e7f518124df2f4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.cnn.bricks import build_plugin_layer -from mmcv.runner import force_fp32 - -from mmdet.models.builder import ROI_EXTRACTORS -from .base_roi_extractor import BaseRoIExtractor - - -@ROI_EXTRACTORS.register_module() -class GenericRoIExtractor(BaseRoIExtractor): - """Extract RoI features from all level feature maps levels. - - This is the implementation of `A novel Region of Interest Extraction Layer - for Instance Segmentation `_. - - Args: - aggregation (str): The method to aggregate multiple feature maps. - Options are 'sum', 'concat'. Default: 'sum'. - pre_cfg (dict | None): Specify pre-processing modules. Default: None. - post_cfg (dict | None): Specify post-processing modules. Default: None. - kwargs (keyword arguments): Arguments that are the same - as :class:`BaseRoIExtractor`. - """ - - def __init__(self, - aggregation='sum', - pre_cfg=None, - post_cfg=None, - **kwargs): - super(GenericRoIExtractor, self).__init__(**kwargs) - - assert aggregation in ['sum', 'concat'] - - self.aggregation = aggregation - self.with_post = post_cfg is not None - self.with_pre = pre_cfg is not None - # build pre/post processing modules - if self.with_post: - self.post_module = build_plugin_layer(post_cfg, '_post_module')[1] - if self.with_pre: - self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1] - - @force_fp32(apply_to=('feats', ), out_fp16=True) - def forward(self, feats, rois, roi_scale_factor=None): - """Forward function.""" - if len(feats) == 1: - return self.roi_layers[0](feats[0], rois) - - out_size = self.roi_layers[0].output_size - num_levels = len(feats) - roi_feats = feats[0].new_zeros( - rois.size(0), self.out_channels, *out_size) - - # some times rois is an empty tensor - if roi_feats.shape[0] == 0: - return roi_feats - - if roi_scale_factor is not None: - rois = self.roi_rescale(rois, roi_scale_factor) - - # mark the starting channels for concat mode - start_channels = 0 - for i in range(num_levels): - roi_feats_t = self.roi_layers[i](feats[i], rois) - end_channels = start_channels + roi_feats_t.size(1) - if self.with_pre: - # apply pre-processing to a RoI extracted from each layer - roi_feats_t = self.pre_module(roi_feats_t) - if self.aggregation == 'sum': - # and sum them all - roi_feats = roi_feats + roi_feats_t - else: - # and concat them along channel dimension - roi_feats[:, start_channels:end_channels] = roi_feats_t - # update channels starting position - start_channels = end_channels - # check if concat channels match at the end - if self.aggregation == 'concat': - assert start_channels == self.out_channels - - if self.with_post: - # apply post-processing before return the result - roi_feats = self.post_module(roi_feats) - return roi_feats diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py deleted file mode 100644 index 23008245a9cb4c1a547996a39a67fcc60f76b9aa..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.runner import force_fp32 - -from mmdet.models.builder import ROI_EXTRACTORS -from .base_roi_extractor import BaseRoIExtractor - - -@ROI_EXTRACTORS.register_module() -class SingleRoIExtractor(BaseRoIExtractor): - """Extract RoI features from a single level feature map. - - If there are multiple input feature levels, each RoI is mapped to a level - according to its scale. The mapping rule is proposed in - `FPN `_. - - Args: - roi_layer (dict): Specify RoI layer type and arguments. - out_channels (int): Output channels of RoI layers. - featmap_strides (List[int]): Strides of input feature maps. - finest_scale (int): Scale threshold of mapping to level 0. Default: 56. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - roi_layer, - out_channels, - featmap_strides, - finest_scale=56, - init_cfg=None): - super(SingleRoIExtractor, self).__init__(roi_layer, out_channels, - featmap_strides, init_cfg) - self.finest_scale = finest_scale - - def map_roi_levels(self, rois, num_levels): - """Map rois to corresponding feature levels by scales. - - - scale < finest_scale * 2: level 0 - - finest_scale * 2 <= scale < finest_scale * 4: level 1 - - finest_scale * 4 <= scale < finest_scale * 8: level 2 - - scale >= finest_scale * 8: level 3 - - Args: - rois (Tensor): Input RoIs, shape (k, 5). - num_levels (int): Total level number. - - Returns: - Tensor: Level index (0-based) of each RoI, shape (k, ) - """ - scale = torch.sqrt( - (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2])) - target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) - target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() - return target_lvls - - @force_fp32(apply_to=('feats', ), out_fp16=True) - def forward(self, feats, rois, roi_scale_factor=None): - """Forward function.""" - out_size = self.roi_layers[0].output_size - num_levels = len(feats) - expand_dims = (-1, self.out_channels * out_size[0] * out_size[1]) - if torch.onnx.is_in_onnx_export(): - # Work around to export mask-rcnn to onnx - roi_feats = rois[:, :1].clone().detach() - roi_feats = roi_feats.expand(*expand_dims) - roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size) - roi_feats = roi_feats * 0 - else: - roi_feats = feats[0].new_zeros( - rois.size(0), self.out_channels, *out_size) - # TODO: remove this when parrots supports - if torch.__version__ == 'parrots': - roi_feats.requires_grad = True - - if num_levels == 1: - if len(rois) == 0: - return roi_feats - return self.roi_layers[0](feats[0], rois) - - target_lvls = self.map_roi_levels(rois, num_levels) - - if roi_scale_factor is not None: - rois = self.roi_rescale(rois, roi_scale_factor) - - for i in range(num_levels): - mask = target_lvls == i - if torch.onnx.is_in_onnx_export(): - # To keep all roi_align nodes exported to onnx - # and skip nonzero op - mask = mask.float().unsqueeze(-1) - # select target level rois and reset the rest rois to zero. - rois_i = rois.clone().detach() - rois_i = rois_i * mask - mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape) - roi_feats_t = self.roi_layers[i](feats[i], rois_i) - roi_feats_t = roi_feats_t * mask_exp - roi_feats = roi_feats + roi_feats_t - continue - inds = mask.nonzero(as_tuple=False).squeeze(1) - if inds.numel() > 0: - rois_ = rois[inds] - roi_feats_t = self.roi_layers[i](feats[i], rois_) - roi_feats[inds] = roi_feats_t - else: - # Sometimes some pyramid levels will not be used for RoI - # feature extraction and this will cause an incomplete - # computation graph in one GPU, which is different from those - # in other GPUs and will cause a hanging error. - # Therefore, we add it to ensure each feature pyramid is - # included in the computation graph to avoid runtime bugs. - roi_feats = roi_feats + sum( - x.view(-1)[0] - for x in self.parameters()) * 0. + feats[i].sum() * 0. - return roi_feats diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/scnet_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/scnet_roi_head.py deleted file mode 100644 index 32f56aa8a24d5f825351b714a99fce836eacbf18..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/scnet_roi_head.py +++ /dev/null @@ -1,605 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch -import torch.nn.functional as F - -from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, - merge_aug_masks, multiclass_nms) -from ..builder import HEADS, build_head, build_roi_extractor -from ..utils.brick_wrappers import adaptive_avg_pool2d -from .cascade_roi_head import CascadeRoIHead - - -@HEADS.register_module() -class SCNetRoIHead(CascadeRoIHead): - """RoIHead for `SCNet `_. - - Args: - num_stages (int): number of cascade stages. - stage_loss_weights (list): loss weight of cascade stages. - semantic_roi_extractor (dict): config to init semantic roi extractor. - semantic_head (dict): config to init semantic head. - feat_relay_head (dict): config to init feature_relay_head. - glbctx_head (dict): config to init global context head. - """ - - def __init__(self, - num_stages, - stage_loss_weights, - semantic_roi_extractor=None, - semantic_head=None, - feat_relay_head=None, - glbctx_head=None, - **kwargs): - super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights, - **kwargs) - assert self.with_bbox and self.with_mask - assert not self.with_shared_head # shared head is not supported - - if semantic_head is not None: - self.semantic_roi_extractor = build_roi_extractor( - semantic_roi_extractor) - self.semantic_head = build_head(semantic_head) - - if feat_relay_head is not None: - self.feat_relay_head = build_head(feat_relay_head) - - if glbctx_head is not None: - self.glbctx_head = build_head(glbctx_head) - - def init_mask_head(self, mask_roi_extractor, mask_head): - """Initialize ``mask_head``""" - if mask_roi_extractor is not None: - self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) - self.mask_head = build_head(mask_head) - - @property - def with_semantic(self): - """bool: whether the head has semantic head""" - return hasattr(self, - 'semantic_head') and self.semantic_head is not None - - @property - def with_feat_relay(self): - """bool: whether the head has feature relay head""" - return (hasattr(self, 'feat_relay_head') - and self.feat_relay_head is not None) - - @property - def with_glbctx(self): - """bool: whether the head has global context head""" - return hasattr(self, 'glbctx_head') and self.glbctx_head is not None - - def _fuse_glbctx(self, roi_feats, glbctx_feat, rois): - """Fuse global context feats with roi feats.""" - assert roi_feats.size(0) == rois.size(0) - img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long() - fused_feats = torch.zeros_like(roi_feats) - for img_id in img_inds: - inds = (rois[:, 0] == img_id.item()) - fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id] - return fused_feats - - def _slice_pos_feats(self, feats, sampling_results): - """Get features from pos rois.""" - num_rois = [res.bboxes.size(0) for res in sampling_results] - num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results] - inds = torch.zeros(sum(num_rois), dtype=torch.bool) - start = 0 - for i in range(len(num_rois)): - start = 0 if i == 0 else start + num_rois[i - 1] - stop = start + num_pos_rois[i] - inds[start:stop] = 1 - sliced_feats = feats[inds] - return sliced_feats - - def _bbox_forward(self, - stage, - x, - rois, - semantic_feat=None, - glbctx_feat=None): - """Box head forward function used in both training and testing.""" - bbox_roi_extractor = self.bbox_roi_extractor[stage] - bbox_head = self.bbox_head[stage] - bbox_feats = bbox_roi_extractor( - x[:len(bbox_roi_extractor.featmap_strides)], rois) - if self.with_semantic and semantic_feat is not None: - bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], - rois) - if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: - bbox_semantic_feat = adaptive_avg_pool2d( - bbox_semantic_feat, bbox_feats.shape[-2:]) - bbox_feats = bbox_feats + bbox_semantic_feat - if self.with_glbctx and glbctx_feat is not None: - bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois) - cls_score, bbox_pred, relayed_feat = bbox_head( - bbox_feats, return_shared_feat=True) - - bbox_results = dict( - cls_score=cls_score, - bbox_pred=bbox_pred, - relayed_feat=relayed_feat) - return bbox_results - - def _mask_forward(self, - x, - rois, - semantic_feat=None, - glbctx_feat=None, - relayed_feat=None): - """Mask head forward function used in both training and testing.""" - mask_feats = self.mask_roi_extractor( - x[:self.mask_roi_extractor.num_inputs], rois) - if self.with_semantic and semantic_feat is not None: - mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], - rois) - if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: - mask_semantic_feat = F.adaptive_avg_pool2d( - mask_semantic_feat, mask_feats.shape[-2:]) - mask_feats = mask_feats + mask_semantic_feat - if self.with_glbctx and glbctx_feat is not None: - mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois) - if self.with_feat_relay and relayed_feat is not None: - mask_feats = mask_feats + relayed_feat - mask_pred = self.mask_head(mask_feats) - mask_results = dict(mask_pred=mask_pred) - - return mask_results - - def _bbox_forward_train(self, - stage, - x, - sampling_results, - gt_bboxes, - gt_labels, - rcnn_train_cfg, - semantic_feat=None, - glbctx_feat=None): - """Run forward function and calculate loss for box head in training.""" - bbox_head = self.bbox_head[stage] - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward( - stage, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - - bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, rcnn_train_cfg) - loss_bbox = bbox_head.loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update( - loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets) - return bbox_results - - def _mask_forward_train(self, - x, - sampling_results, - gt_masks, - rcnn_train_cfg, - semantic_feat=None, - glbctx_feat=None, - relayed_feat=None): - """Run forward function and calculate loss for mask head in - training.""" - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - mask_results = self._mask_forward( - x, - pos_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - - mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, - rcnn_train_cfg) - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - loss_mask = self.mask_head.loss(mask_results['mask_pred'], - mask_targets, pos_labels) - - mask_results = loss_mask - return mask_results - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - gt_semantic_seg=None): - """ - Args: - x (list[Tensor]): list of multi-level img features. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - proposal_list (list[Tensors]): list of region proposals. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None, list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None, Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - gt_semantic_seg (None, list[Tensor]): semantic segmentation masks - used if the architecture supports semantic segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - losses = dict() - - # semantic segmentation branch - if self.with_semantic: - semantic_pred, semantic_feat = self.semantic_head(x) - loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) - losses['loss_semantic_seg'] = loss_seg - else: - semantic_feat = None - - # global context branch - if self.with_glbctx: - mc_pred, glbctx_feat = self.glbctx_head(x) - loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels) - losses['loss_glbctx'] = loss_glbctx - else: - glbctx_feat = None - - for i in range(self.num_stages): - self.current_stage = i - rcnn_train_cfg = self.train_cfg[i] - lw = self.stage_loss_weights[i] - - # assign gts and sample proposals - sampling_results = [] - bbox_assigner = self.bbox_assigner[i] - bbox_sampler = self.bbox_sampler[i] - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - - for j in range(num_imgs): - assign_result = bbox_assigner.assign(proposal_list[j], - gt_bboxes[j], - gt_bboxes_ignore[j], - gt_labels[j]) - sampling_result = bbox_sampler.sample( - assign_result, - proposal_list[j], - gt_bboxes[j], - gt_labels[j], - feats=[lvl_feat[j][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - bbox_results = \ - self._bbox_forward_train( - i, x, sampling_results, gt_bboxes, gt_labels, - rcnn_train_cfg, semantic_feat, glbctx_feat) - roi_labels = bbox_results['bbox_targets'][0] - - for name, value in bbox_results['loss_bbox'].items(): - losses[f's{i}.{name}'] = ( - value * lw if 'loss' in name else value) - - # refine boxes - if i < self.num_stages - 1: - pos_is_gts = [res.pos_is_gt for res in sampling_results] - with torch.no_grad(): - proposal_list = self.bbox_head[i].refine_bboxes( - bbox_results['rois'], roi_labels, - bbox_results['bbox_pred'], pos_is_gts, img_metas) - - if self.with_feat_relay: - relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'], - sampling_results) - relayed_feat = self.feat_relay_head(relayed_feat) - else: - relayed_feat = None - - mask_results = self._mask_forward_train(x, sampling_results, gt_masks, - rcnn_train_cfg, semantic_feat, - glbctx_feat, relayed_feat) - mask_lw = sum(self.stage_loss_weights) - losses['loss_mask'] = mask_lw * mask_results['loss_mask'] - - return losses - - def simple_test(self, x, proposal_list, img_metas, rescale=False): - """Test without augmentation. - - Args: - x (tuple[Tensor]): Features from upstream network. Each - has shape (batch_size, c, h, w). - proposal_list (list(Tensor)): Proposals from rpn head. - Each has shape (num_proposals, 5), last dimension - 5 represent (x1, y1, x2, y2, score). - img_metas (list[dict]): Meta information of images. - rescale (bool): Whether to rescale the results to - the original image. Default: True. - - Returns: - list[list[np.ndarray]] or list[tuple]: When no mask branch, - it is bbox results of each image and classes with type - `list[list[np.ndarray]]`. The outer list - corresponds to each image. The inner list - corresponds to each class. When the model has mask branch, - it contains bbox results and mask results. - The outer list corresponds to each image, and first element - of tuple is bbox results, second element is mask results. - """ - if self.with_semantic: - _, semantic_feat = self.semantic_head(x) - else: - semantic_feat = None - - if self.with_glbctx: - mc_pred, glbctx_feat = self.glbctx_head(x) - else: - glbctx_feat = None - - num_imgs = len(proposal_list) - img_shapes = tuple(meta['img_shape'] for meta in img_metas) - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - # "ms" in variable names means multi-stage - ms_scores = [] - rcnn_test_cfg = self.test_cfg - - rois = bbox2roi(proposal_list) - - if rois.shape[0] == 0: - # There is no proposal in the whole batch - bbox_results = [[ - np.zeros((0, 5), dtype=np.float32) - for _ in range(self.bbox_head[-1].num_classes) - ]] * num_imgs - - if self.with_mask: - mask_classes = self.mask_head.num_classes - segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - results = list(zip(bbox_results, segm_results)) - else: - results = bbox_results - - return results - - for i in range(self.num_stages): - bbox_head = self.bbox_head[i] - bbox_results = self._bbox_forward( - i, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - # split batch bbox prediction back to each image - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - num_proposals_per_img = tuple(len(p) for p in proposal_list) - rois = rois.split(num_proposals_per_img, 0) - cls_score = cls_score.split(num_proposals_per_img, 0) - bbox_pred = bbox_pred.split(num_proposals_per_img, 0) - ms_scores.append(cls_score) - - if i < self.num_stages - 1: - refine_rois_list = [] - for j in range(num_imgs): - if rois[j].shape[0] > 0: - bbox_label = cls_score[j][:, :-1].argmax(dim=1) - refine_rois = bbox_head.regress_by_class( - rois[j], bbox_label, bbox_pred[j], img_metas[j]) - refine_rois_list.append(refine_rois) - rois = torch.cat(refine_rois_list) - - # average scores of each image by stages - cls_score = [ - sum([score[i] for score in ms_scores]) / float(len(ms_scores)) - for i in range(num_imgs) - ] - - # apply bbox post-processing to each image individually - det_bboxes = [] - det_labels = [] - for i in range(num_imgs): - det_bbox, det_label = self.bbox_head[-1].get_bboxes( - rois[i], - cls_score[i], - bbox_pred[i], - img_shapes[i], - scale_factors[i], - rescale=rescale, - cfg=rcnn_test_cfg) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - det_bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head[-1].num_classes) - for i in range(num_imgs) - ] - - if self.with_mask: - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - mask_classes = self.mask_head.num_classes - det_segm_results = [[[] for _ in range(mask_classes)] - for _ in range(num_imgs)] - else: - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i] - for i in range(num_imgs) - ] - mask_rois = bbox2roi(_bboxes) - - # get relay feature on mask_rois - bbox_results = self._bbox_forward( - -1, - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - relayed_feat = bbox_results['relayed_feat'] - relayed_feat = self.feat_relay_head(relayed_feat) - - mask_results = self._mask_forward( - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - mask_pred = mask_results['mask_pred'] - - # split batch mask prediction back to each image - num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes) - mask_preds = mask_pred.split(num_bbox_per_img, 0) - - # apply mask post-processing to each image individually - det_segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - det_segm_results.append( - [[] for _ in range(self.mask_head.num_classes)]) - else: - segm_result = self.mask_head.get_seg_masks( - mask_preds[i], _bboxes[i], det_labels[i], - self.test_cfg, ori_shapes[i], scale_factors[i], - rescale) - det_segm_results.append(segm_result) - - # return results - if self.with_mask: - return list(zip(det_bbox_results, det_segm_results)) - else: - return det_bbox_results - - def aug_test(self, img_feats, proposal_list, img_metas, rescale=False): - if self.with_semantic: - semantic_feats = [ - self.semantic_head(feat)[1] for feat in img_feats - ] - else: - semantic_feats = [None] * len(img_metas) - - if self.with_glbctx: - glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats] - else: - glbctx_feats = [None] * len(img_metas) - - rcnn_test_cfg = self.test_cfg - aug_bboxes = [] - aug_scores = [] - for x, img_meta, semantic_feat, glbctx_feat in zip( - img_feats, img_metas, semantic_feats, glbctx_feats): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip) - # "ms" in variable names means multi-stage - ms_scores = [] - - rois = bbox2roi([proposals]) - - if rois.shape[0] == 0: - # There is no proposal in the single image - aug_bboxes.append(rois.new_zeros(0, 4)) - aug_scores.append(rois.new_zeros(0, 1)) - continue - - for i in range(self.num_stages): - bbox_head = self.bbox_head[i] - bbox_results = self._bbox_forward( - i, - x, - rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - ms_scores.append(bbox_results['cls_score']) - if i < self.num_stages - 1: - bbox_label = bbox_results['cls_score'].argmax(dim=1) - rois = bbox_head.regress_by_class( - rois, bbox_label, bbox_results['bbox_pred'], - img_meta[0]) - - cls_score = sum(ms_scores) / float(len(ms_scores)) - bboxes, scores = self.bbox_head[-1].get_bboxes( - rois, - cls_score, - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - - det_bbox_results = bbox2result(det_bboxes, det_labels, - self.bbox_head[-1].num_classes) - - if self.with_mask: - if det_bboxes.shape[0] == 0: - det_segm_results = [[] - for _ in range(self.mask_head.num_classes)] - else: - aug_masks = [] - for x, img_meta, semantic_feat, glbctx_feat in zip( - img_feats, img_metas, semantic_feats, glbctx_feats): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip) - mask_rois = bbox2roi([_bboxes]) - # get relay feature on mask_rois - bbox_results = self._bbox_forward( - -1, - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat) - relayed_feat = bbox_results['relayed_feat'] - relayed_feat = self.feat_relay_head(relayed_feat) - mask_results = self._mask_forward( - x, - mask_rois, - semantic_feat=semantic_feat, - glbctx_feat=glbctx_feat, - relayed_feat=relayed_feat) - mask_pred = mask_results['mask_pred'] - aug_masks.append(mask_pred.sigmoid().cpu().numpy()) - merged_masks = merge_aug_masks(aug_masks, img_metas, - self.test_cfg) - ori_shape = img_metas[0][0]['ori_shape'] - det_segm_results = self.mask_head.get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - rcnn_test_cfg, - ori_shape, - scale_factor=1.0, - rescale=False) - return [(det_bbox_results, det_segm_results)] - else: - return [det_bbox_results] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/shared_heads/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/shared_heads/__init__.py deleted file mode 100644 index d56636ab34d1dd2592828238099bcdccf179d6d3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/shared_heads/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .res_layer import ResLayer - -__all__ = ['ResLayer'] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/shared_heads/res_layer.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/shared_heads/res_layer.py deleted file mode 100644 index bef00a0581b225df618616e5c5b8f417337d9fe1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/shared_heads/res_layer.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -from mmcv.runner import BaseModule, auto_fp16 - -from mmdet.models.backbones import ResNet -from mmdet.models.builder import SHARED_HEADS -from mmdet.models.utils import ResLayer as _ResLayer - - -@SHARED_HEADS.register_module() -class ResLayer(BaseModule): - - def __init__(self, - depth, - stage=3, - stride=2, - dilation=1, - style='pytorch', - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - with_cp=False, - dcn=None, - pretrained=None, - init_cfg=None): - super(ResLayer, self).__init__(init_cfg) - - self.norm_eval = norm_eval - self.norm_cfg = norm_cfg - self.stage = stage - self.fp16_enabled = False - block, stage_blocks = ResNet.arch_settings[depth] - stage_block = stage_blocks[stage] - planes = 64 * 2**stage - inplanes = 64 * 2**(stage - 1) * block.expansion - - res_layer = _ResLayer( - block, - inplanes, - planes, - stage_block, - stride=stride, - dilation=dilation, - style=style, - with_cp=with_cp, - norm_cfg=self.norm_cfg, - dcn=dcn) - self.add_module(f'layer{stage + 1}', res_layer) - - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is a deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - if init_cfg is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ] - else: - raise TypeError('pretrained must be a str or None') - - @auto_fp16() - def forward(self, x): - res_layer = getattr(self, f'layer{self.stage + 1}') - out = res_layer(x) - return out - - def train(self, mode=True): - super(ResLayer, self).train(mode) - if self.norm_eval: - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/sparse_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/sparse_roi_head.py deleted file mode 100644 index 2613469e3a7cf397f19c04b24c43ab50b0c75551..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/sparse_roi_head.py +++ /dev/null @@ -1,424 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numpy as np -import torch - -from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh -from mmdet.core.bbox.samplers import PseudoSampler -from ..builder import HEADS -from .cascade_roi_head import CascadeRoIHead - - -@HEADS.register_module() -class SparseRoIHead(CascadeRoIHead): - r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with - Learnable Proposals `_ - and `Instances as Queries `_ - - Args: - num_stages (int): Number of stage whole iterative process. - Defaults to 6. - stage_loss_weights (Tuple[float]): The loss - weight of each stage. By default all stages have - the same weight 1. - bbox_roi_extractor (dict): Config of box roi extractor. - mask_roi_extractor (dict): Config of mask roi extractor. - bbox_head (dict): Config of box head. - mask_head (dict): Config of mask head. - train_cfg (dict, optional): Configuration information in train stage. - Defaults to None. - test_cfg (dict, optional): Configuration information in test stage. - Defaults to None. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - """ - - def __init__(self, - num_stages=6, - stage_loss_weights=(1, 1, 1, 1, 1, 1), - proposal_feature_channel=256, - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', output_size=7, sampling_ratio=2), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_roi_extractor=None, - bbox_head=dict( - type='DIIHead', - num_classes=80, - num_fcs=2, - num_heads=8, - num_cls_fcs=1, - num_reg_fcs=3, - feedforward_channels=2048, - hidden_channels=256, - dropout=0.0, - roi_feat_size=7, - ffn_act_cfg=dict(type='ReLU', inplace=True)), - mask_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - assert bbox_roi_extractor is not None - assert bbox_head is not None - assert len(stage_loss_weights) == num_stages - self.num_stages = num_stages - self.stage_loss_weights = stage_loss_weights - self.proposal_feature_channel = proposal_feature_channel - super(SparseRoIHead, self).__init__( - num_stages, - stage_loss_weights, - bbox_roi_extractor=bbox_roi_extractor, - mask_roi_extractor=mask_roi_extractor, - bbox_head=bbox_head, - mask_head=mask_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained, - init_cfg=init_cfg) - # train_cfg would be None when run the test.py - if train_cfg is not None: - for stage in range(num_stages): - assert isinstance(self.bbox_sampler[stage], PseudoSampler), \ - 'Sparse R-CNN and QueryInst only support `PseudoSampler`' - - def _bbox_forward(self, stage, x, rois, object_feats, img_metas): - """Box head forward function used in both training and testing. Returns - all regression, classification results and a intermediate feature. - - Args: - stage (int): The index of current stage in - iterative process. - x (List[Tensor]): List of FPN features - rois (Tensor): Rois in total batch. With shape (num_proposal, 5). - the last dimension 5 represents (img_index, x1, y1, x2, y2). - object_feats (Tensor): The object feature extracted from - the previous stage. - img_metas (dict): meta information of images. - - Returns: - dict[str, Tensor]: a dictionary of bbox head outputs, - Containing the following results: - - - cls_score (Tensor): The score of each class, has - shape (batch_size, num_proposals, num_classes) - when use focal loss or - (batch_size, num_proposals, num_classes+1) - otherwise. - - decode_bbox_pred (Tensor): The regression results - with shape (batch_size, num_proposal, 4). - The last dimension 4 represents - [tl_x, tl_y, br_x, br_y]. - - object_feats (Tensor): The object feature extracted - from current stage - - detach_cls_score_list (list[Tensor]): The detached - classification results, length is batch_size, and - each tensor has shape (num_proposal, num_classes). - - detach_proposal_list (list[tensor]): The detached - regression results, length is batch_size, and each - tensor has shape (num_proposal, 4). The last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - """ - num_imgs = len(img_metas) - bbox_roi_extractor = self.bbox_roi_extractor[stage] - bbox_head = self.bbox_head[stage] - bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], - rois) - cls_score, bbox_pred, object_feats, attn_feats = bbox_head( - bbox_feats, object_feats) - proposal_list = self.bbox_head[stage].refine_bboxes( - rois, - rois.new_zeros(len(rois)), # dummy arg - bbox_pred.view(-1, bbox_pred.size(-1)), - [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)], - img_metas) - bbox_results = dict( - cls_score=cls_score, - decode_bbox_pred=torch.cat(proposal_list), - object_feats=object_feats, - attn_feats=attn_feats, - # detach then use it in label assign - detach_cls_score_list=[ - cls_score[i].detach() for i in range(num_imgs) - ], - detach_proposal_list=[item.detach() for item in proposal_list]) - - return bbox_results - - def _mask_forward(self, stage, x, rois, attn_feats): - """Mask head forward function used in both training and testing.""" - mask_roi_extractor = self.mask_roi_extractor[stage] - mask_head = self.mask_head[stage] - mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], - rois) - # do not support caffe_c4 model anymore - mask_pred = mask_head(mask_feats, attn_feats) - - mask_results = dict(mask_pred=mask_pred) - return mask_results - - def _mask_forward_train(self, stage, x, attn_feats, sampling_results, - gt_masks, rcnn_train_cfg): - """Run forward function and calculate loss for mask head in - training.""" - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - attn_feats = torch.cat([ - feats[res.pos_inds] - for (feats, res) in zip(attn_feats, sampling_results) - ]) - mask_results = self._mask_forward(stage, x, pos_rois, attn_feats) - - mask_targets = self.mask_head[stage].get_targets( - sampling_results, gt_masks, rcnn_train_cfg) - - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - - loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'], - mask_targets, pos_labels) - mask_results.update(loss_mask) - return mask_results - - def forward_train(self, - x, - proposal_boxes, - proposal_features, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - imgs_whwh=None, - gt_masks=None): - """Forward function in training stage. - - Args: - x (list[Tensor]): list of multi-level img features. - proposals (Tensor): Decoded proposal bboxes, has shape - (batch_size, num_proposals, 4) - proposal_features (Tensor): Expanded proposal - features, has shape - (batch_size, num_proposals, proposal_feature_channel) - img_metas (list[dict]): list of image info dict where - each dict has: 'img_shape', 'scale_factor', 'flip', - and may also contain 'filename', 'ori_shape', - 'pad_shape', and 'img_norm_cfg'. For details on the - values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - imgs_whwh (Tensor): Tensor with shape (batch_size, 4), - the dimension means - [img_width,img_height, img_width, img_height]. - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components of all stage. - """ - - num_imgs = len(img_metas) - num_proposals = proposal_boxes.size(1) - imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1) - all_stage_bbox_results = [] - proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] - object_feats = proposal_features - all_stage_loss = {} - for stage in range(self.num_stages): - rois = bbox2roi(proposal_list) - bbox_results = self._bbox_forward(stage, x, rois, object_feats, - img_metas) - all_stage_bbox_results.append(bbox_results) - if gt_bboxes_ignore is None: - # TODO support ignore - gt_bboxes_ignore = [None for _ in range(num_imgs)] - sampling_results = [] - cls_pred_list = bbox_results['detach_cls_score_list'] - proposal_list = bbox_results['detach_proposal_list'] - for i in range(num_imgs): - normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] / - imgs_whwh[i]) - assign_result = self.bbox_assigner[stage].assign( - normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i], - gt_labels[i], img_metas[i]) - sampling_result = self.bbox_sampler[stage].sample( - assign_result, proposal_list[i], gt_bboxes[i]) - sampling_results.append(sampling_result) - bbox_targets = self.bbox_head[stage].get_targets( - sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage], - True) - cls_score = bbox_results['cls_score'] - decode_bbox_pred = bbox_results['decode_bbox_pred'] - - single_stage_loss = self.bbox_head[stage].loss( - cls_score.view(-1, cls_score.size(-1)), - decode_bbox_pred.view(-1, 4), - *bbox_targets, - imgs_whwh=imgs_whwh) - - if self.with_mask: - mask_results = self._mask_forward_train( - stage, x, bbox_results['attn_feats'], sampling_results, - gt_masks, self.train_cfg[stage]) - single_stage_loss['loss_mask'] = mask_results['loss_mask'] - - for key, value in single_stage_loss.items(): - all_stage_loss[f'stage{stage}_{key}'] = value * \ - self.stage_loss_weights[stage] - object_feats = bbox_results['object_feats'] - - return all_stage_loss - - def simple_test(self, - x, - proposal_boxes, - proposal_features, - img_metas, - imgs_whwh, - rescale=False): - """Test without augmentation. - - Args: - x (list[Tensor]): list of multi-level img features. - proposal_boxes (Tensor): Decoded proposal bboxes, has shape - (batch_size, num_proposals, 4) - proposal_features (Tensor): Expanded proposal - features, has shape - (batch_size, num_proposals, proposal_feature_channel) - img_metas (dict): meta information of images. - imgs_whwh (Tensor): Tensor with shape (batch_size, 4), - the dimension means - [img_width,img_height, img_width, img_height]. - rescale (bool): If True, return boxes in original image - space. Defaults to False. - - Returns: - list[list[np.ndarray]] or list[tuple]: When no mask branch, - it is bbox results of each image and classes with type - `list[list[np.ndarray]]`. The outer list - corresponds to each image. The inner list - corresponds to each class. When the model has a mask branch, - it is a list[tuple] that contains bbox results and mask results. - The outer list corresponds to each image, and first element - of tuple is bbox results, second element is mask results. - """ - assert self.with_bbox, 'Bbox head must be implemented.' - # Decode initial proposals - num_imgs = len(img_metas) - proposal_list = [proposal_boxes[i] for i in range(num_imgs)] - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - object_feats = proposal_features - if all([proposal.shape[0] == 0 for proposal in proposal_list]): - # There is no proposal in the whole batch - bbox_results = [[ - np.zeros((0, 5), dtype=np.float32) - for i in range(self.bbox_head[-1].num_classes) - ]] * num_imgs - return bbox_results - - for stage in range(self.num_stages): - rois = bbox2roi(proposal_list) - bbox_results = self._bbox_forward(stage, x, rois, object_feats, - img_metas) - object_feats = bbox_results['object_feats'] - cls_score = bbox_results['cls_score'] - proposal_list = bbox_results['detach_proposal_list'] - - if self.with_mask: - rois = bbox2roi(proposal_list) - mask_results = self._mask_forward(stage, x, rois, - bbox_results['attn_feats']) - mask_results['mask_pred'] = mask_results['mask_pred'].reshape( - num_imgs, -1, *mask_results['mask_pred'].size()[1:]) - - num_classes = self.bbox_head[-1].num_classes - det_bboxes = [] - det_labels = [] - - if self.bbox_head[-1].loss_cls.use_sigmoid: - cls_score = cls_score.sigmoid() - else: - cls_score = cls_score.softmax(-1)[..., :-1] - - for img_id in range(num_imgs): - cls_score_per_img = cls_score[img_id] - scores_per_img, topk_indices = cls_score_per_img.flatten( - 0, 1).topk( - self.test_cfg.max_per_img, sorted=False) - labels_per_img = topk_indices % num_classes - bbox_pred_per_img = proposal_list[img_id][topk_indices // - num_classes] - if rescale: - scale_factor = img_metas[img_id]['scale_factor'] - bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor) - det_bboxes.append( - torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1)) - det_labels.append(labels_per_img) - - bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], num_classes) - for i in range(num_imgs) - ] - - if self.with_mask: - if rescale and not isinstance(scale_factors[0], float): - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i][:, :4] - for i in range(len(det_bboxes)) - ] - segm_results = [] - mask_pred = mask_results['mask_pred'] - for img_id in range(num_imgs): - mask_pred_per_img = mask_pred[img_id].flatten(0, - 1)[topk_indices] - mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat( - 1, num_classes, 1, 1) - segm_result = self.mask_head[-1].get_seg_masks( - mask_pred_per_img, _bboxes[img_id], det_labels[img_id], - self.test_cfg, ori_shapes[img_id], scale_factors[img_id], - rescale) - segm_results.append(segm_result) - - if self.with_mask: - results = list(zip(bbox_results, segm_results)) - else: - results = bbox_results - - return results - - def aug_test(self, features, proposal_list, img_metas, rescale=False): - raise NotImplementedError( - 'Sparse R-CNN and QueryInst does not support `aug_test`') - - def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas): - """Dummy forward function when do the flops computing.""" - all_stage_bbox_results = [] - proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))] - object_feats = proposal_features - if self.with_bbox: - for stage in range(self.num_stages): - rois = bbox2roi(proposal_list) - bbox_results = self._bbox_forward(stage, x, rois, object_feats, - img_metas) - - all_stage_bbox_results.append((bbox_results, )) - proposal_list = bbox_results['detach_proposal_list'] - object_feats = bbox_results['object_feats'] - - if self.with_mask: - rois = bbox2roi(proposal_list) - mask_results = self._mask_forward( - stage, x, rois, bbox_results['attn_feats']) - all_stage_bbox_results[-1] += (mask_results, ) - return all_stage_bbox_results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/standard_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/standard_roi_head.py deleted file mode 100644 index 3fdd82ad1f04ba927ef35d16b140b7b23d5ff3e1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/standard_roi_head.py +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler -from ..builder import HEADS, build_head, build_roi_extractor -from .base_roi_head import BaseRoIHead -from .test_mixins import BBoxTestMixin, MaskTestMixin - - -@HEADS.register_module() -class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): - """Simplest base roi head including one bbox head and one mask head.""" - - def init_assigner_sampler(self): - """Initialize assigner and sampler.""" - self.bbox_assigner = None - self.bbox_sampler = None - if self.train_cfg: - self.bbox_assigner = build_assigner(self.train_cfg.assigner) - self.bbox_sampler = build_sampler( - self.train_cfg.sampler, context=self) - - def init_bbox_head(self, bbox_roi_extractor, bbox_head): - """Initialize ``bbox_head``""" - self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor) - self.bbox_head = build_head(bbox_head) - - def init_mask_head(self, mask_roi_extractor, mask_head): - """Initialize ``mask_head``""" - if mask_roi_extractor is not None: - self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) - self.share_roi_extractor = False - else: - self.share_roi_extractor = True - self.mask_roi_extractor = self.bbox_roi_extractor - self.mask_head = build_head(mask_head) - - def forward_dummy(self, x, proposals): - """Dummy forward function.""" - # bbox head - outs = () - rois = bbox2roi([proposals]) - if self.with_bbox: - bbox_results = self._bbox_forward(x, rois) - outs = outs + (bbox_results['cls_score'], - bbox_results['bbox_pred']) - # mask head - if self.with_mask: - mask_rois = rois[:100] - mask_results = self._mask_forward(x, mask_rois) - outs = outs + (mask_results['mask_pred'], ) - return outs - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - **kwargs): - """ - Args: - x (list[Tensor]): list of multi-level img features. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - proposals (list[Tensors]): list of region proposals. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # assign gts and sample proposals - if self.with_bbox or self.with_mask: - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - sampling_results = [] - for i in range(num_imgs): - assign_result = self.bbox_assigner.assign( - proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], - gt_labels[i]) - sampling_result = self.bbox_sampler.sample( - assign_result, - proposal_list[i], - gt_bboxes[i], - gt_labels[i], - feats=[lvl_feat[i][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - losses = dict() - # bbox head forward and loss - if self.with_bbox: - bbox_results = self._bbox_forward_train(x, sampling_results, - gt_bboxes, gt_labels, - img_metas) - losses.update(bbox_results['loss_bbox']) - - # mask head forward and loss - if self.with_mask: - mask_results = self._mask_forward_train(x, sampling_results, - bbox_results['bbox_feats'], - gt_masks, img_metas) - losses.update(mask_results['loss_mask']) - - return losses - - def _bbox_forward(self, x, rois): - """Box head forward function used in both training and testing.""" - # TODO: a more flexible way to decide which feature maps to use - bbox_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], rois) - if self.with_shared_head: - bbox_feats = self.shared_head(bbox_feats) - cls_score, bbox_pred = self.bbox_head(bbox_feats) - - bbox_results = dict( - cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) - return bbox_results - - def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, - img_metas): - """Run forward function and calculate loss for box head in training.""" - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward(x, rois) - - bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, self.train_cfg) - loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - - bbox_results.update(loss_bbox=loss_bbox) - return bbox_results - - def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, - img_metas): - """Run forward function and calculate loss for mask head in - training.""" - if not self.share_roi_extractor: - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - mask_results = self._mask_forward(x, pos_rois) - else: - pos_inds = [] - device = bbox_feats.device - for res in sampling_results: - pos_inds.append( - torch.ones( - res.pos_bboxes.shape[0], - device=device, - dtype=torch.uint8)) - pos_inds.append( - torch.zeros( - res.neg_bboxes.shape[0], - device=device, - dtype=torch.uint8)) - pos_inds = torch.cat(pos_inds) - - mask_results = self._mask_forward( - x, pos_inds=pos_inds, bbox_feats=bbox_feats) - - mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, - self.train_cfg) - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - loss_mask = self.mask_head.loss(mask_results['mask_pred'], - mask_targets, pos_labels) - - mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets) - return mask_results - - def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): - """Mask head forward function used in both training and testing.""" - assert ((rois is not None) ^ - (pos_inds is not None and bbox_feats is not None)) - if rois is not None: - mask_feats = self.mask_roi_extractor( - x[:self.mask_roi_extractor.num_inputs], rois) - if self.with_shared_head: - mask_feats = self.shared_head(mask_feats) - else: - assert bbox_feats is not None - mask_feats = bbox_feats[pos_inds] - - mask_pred = self.mask_head(mask_feats) - mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats) - return mask_results - - async def async_simple_test(self, - x, - proposal_list, - img_metas, - proposals=None, - rescale=False): - """Async test without augmentation.""" - assert self.with_bbox, 'Bbox head must be implemented.' - - det_bboxes, det_labels = await self.async_test_bboxes( - x, img_metas, proposal_list, self.test_cfg, rescale=rescale) - bbox_results = bbox2result(det_bboxes, det_labels, - self.bbox_head.num_classes) - if not self.with_mask: - return bbox_results - else: - segm_results = await self.async_test_mask( - x, - img_metas, - det_bboxes, - det_labels, - rescale=rescale, - mask_test_cfg=self.test_cfg.get('mask')) - return bbox_results, segm_results - - def simple_test(self, - x, - proposal_list, - img_metas, - proposals=None, - rescale=False): - """Test without augmentation. - - Args: - x (tuple[Tensor]): Features from upstream network. Each - has shape (batch_size, c, h, w). - proposal_list (list(Tensor)): Proposals from rpn head. - Each has shape (num_proposals, 5), last dimension - 5 represent (x1, y1, x2, y2, score). - img_metas (list[dict]): Meta information of images. - rescale (bool): Whether to rescale the results to - the original image. Default: True. - - Returns: - list[list[np.ndarray]] or list[tuple]: When no mask branch, - it is bbox results of each image and classes with type - `list[list[np.ndarray]]`. The outer list - corresponds to each image. The inner list - corresponds to each class. When the model has mask branch, - it contains bbox results and mask results. - The outer list corresponds to each image, and first element - of tuple is bbox results, second element is mask results. - """ - assert self.with_bbox, 'Bbox head must be implemented.' - - det_bboxes, det_labels = self.simple_test_bboxes( - x, img_metas, proposal_list, self.test_cfg, rescale=rescale) - - bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head.num_classes) - for i in range(len(det_bboxes)) - ] - - if not self.with_mask: - return bbox_results - else: - segm_results = self.simple_test_mask( - x, img_metas, det_bboxes, det_labels, rescale=rescale) - return list(zip(bbox_results, segm_results)) - - def aug_test(self, x, proposal_list, img_metas, rescale=False): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ - det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas, - proposal_list, - self.test_cfg) - if rescale: - _det_bboxes = det_bboxes - else: - _det_bboxes = det_bboxes.clone() - _det_bboxes[:, :4] *= det_bboxes.new_tensor( - img_metas[0][0]['scale_factor']) - bbox_results = bbox2result(_det_bboxes, det_labels, - self.bbox_head.num_classes) - - # det_bboxes always keep the original scale - if self.with_mask: - segm_results = self.aug_test_mask(x, img_metas, det_bboxes, - det_labels) - return [(bbox_results, segm_results)] - else: - return [bbox_results] - - def onnx_export(self, x, proposals, img_metas, rescale=False): - """Test without augmentation.""" - assert self.with_bbox, 'Bbox head must be implemented.' - det_bboxes, det_labels = self.bbox_onnx_export( - x, img_metas, proposals, self.test_cfg, rescale=rescale) - - if not self.with_mask: - return det_bboxes, det_labels - else: - segm_results = self.mask_onnx_export( - x, img_metas, det_bboxes, det_labels, rescale=rescale) - return det_bboxes, det_labels, segm_results - - def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): - """Export mask branch to onnx which supports batch inference. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - img_metas (list[dict]): Image meta info. - det_bboxes (Tensor): Bboxes and corresponding scores. - has shape [N, num_bboxes, 5]. - det_labels (Tensor): class labels of - shape [N, num_bboxes]. - - Returns: - Tensor: The segmentation results of shape [N, num_bboxes, - image_height, image_width]. - """ - # image shapes of images in the batch - - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - raise RuntimeError('[ONNX Error] Can not record MaskHead ' - 'as it has not been executed this time') - batch_size = det_bboxes.size(0) - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - det_bboxes = det_bboxes[..., :4] - batch_index = torch.arange( - det_bboxes.size(0), device=det_bboxes.device).float().view( - -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) - mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) - mask_rois = mask_rois.view(-1, 5) - mask_results = self._mask_forward(x, mask_rois) - mask_pred = mask_results['mask_pred'] - max_shape = img_metas[0]['img_shape_for_onnx'] - num_det = det_bboxes.shape[1] - det_bboxes = det_bboxes.reshape(-1, 4) - det_labels = det_labels.reshape(-1) - segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, - det_labels, self.test_cfg, - max_shape) - segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], - max_shape[1]) - return segm_results - - def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg, - **kwargs): - """Export bbox branch to onnx which supports batch inference. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - img_metas (list[dict]): Image meta info. - proposals (Tensor): Region proposals with - batch dimension, has shape [N, num_bboxes, 5]. - rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. - - Returns: - tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5] - and class labels of shape [N, num_bboxes]. - """ - # get origin input shape to support onnx dynamic input shape - assert len( - img_metas - ) == 1, 'Only support one input image while in exporting to ONNX' - img_shapes = img_metas[0]['img_shape_for_onnx'] - - rois = proposals - - batch_index = torch.arange( - rois.size(0), device=rois.device).float().view(-1, 1, 1).expand( - rois.size(0), rois.size(1), 1) - - rois = torch.cat([batch_index, rois[..., :4]], dim=-1) - batch_size = rois.shape[0] - num_proposals_per_img = rois.shape[1] - - # Eliminate the batch dimension - rois = rois.view(-1, 5) - bbox_results = self._bbox_forward(x, rois) - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - - # Recover the batch dimension - rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) - cls_score = cls_score.reshape(batch_size, num_proposals_per_img, - cls_score.size(-1)) - - bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, - bbox_pred.size(-1)) - det_bboxes, det_labels = self.bbox_head.onnx_export( - rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg) - - return det_bboxes, det_labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/test_mixins.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/test_mixins.py deleted file mode 100644 index ae6e79aecf4e10a9ec25a55b480decc179ec91f6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/test_mixins.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import sys -import warnings - -import numpy as np -import torch - -from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes, - merge_aug_masks, multiclass_nms) - -if sys.version_info >= (3, 7): - from mmdet.utils.contextmanagers import completed - - -class BBoxTestMixin: - - if sys.version_info >= (3, 7): - - async def async_test_bboxes(self, - x, - img_metas, - proposals, - rcnn_test_cfg, - rescale=False, - **kwargs): - """Asynchronized test for box head without augmentation.""" - rois = bbox2roi(proposals) - roi_feats = self.bbox_roi_extractor( - x[:len(self.bbox_roi_extractor.featmap_strides)], rois) - if self.with_shared_head: - roi_feats = self.shared_head(roi_feats) - sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017) - - async with completed( - __name__, 'bbox_head_forward', - sleep_interval=sleep_interval): - cls_score, bbox_pred = self.bbox_head(roi_feats) - - img_shape = img_metas[0]['img_shape'] - scale_factor = img_metas[0]['scale_factor'] - det_bboxes, det_labels = self.bbox_head.get_bboxes( - rois, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=rescale, - cfg=rcnn_test_cfg) - return det_bboxes, det_labels - - def simple_test_bboxes(self, - x, - img_metas, - proposals, - rcnn_test_cfg, - rescale=False): - """Test only det bboxes without augmentation. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - img_metas (list[dict]): Image meta info. - proposals (List[Tensor]): Region proposals. - rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. - rescale (bool): If True, return boxes in original image space. - Default: False. - - Returns: - tuple[list[Tensor], list[Tensor]]: The first list contains - the boxes of the corresponding image in a batch, each - tensor has the shape (num_boxes, 5) and last dimension - 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor - in the second list is the labels with shape (num_boxes, ). - The length of both lists should be equal to batch_size. - """ - - rois = bbox2roi(proposals) - - if rois.shape[0] == 0: - batch_size = len(proposals) - det_bbox = rois.new_zeros(0, 5) - det_label = rois.new_zeros((0, ), dtype=torch.long) - if rcnn_test_cfg is None: - det_bbox = det_bbox[:, :4] - det_label = rois.new_zeros( - (0, self.bbox_head.fc_cls.out_features)) - # There is no proposal in the whole batch - return [det_bbox] * batch_size, [det_label] * batch_size - - bbox_results = self._bbox_forward(x, rois) - img_shapes = tuple(meta['img_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - # split batch bbox prediction back to each image - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - num_proposals_per_img = tuple(len(p) for p in proposals) - rois = rois.split(num_proposals_per_img, 0) - cls_score = cls_score.split(num_proposals_per_img, 0) - - # some detector with_reg is False, bbox_pred will be None - if bbox_pred is not None: - # TODO move this to a sabl_roi_head - # the bbox prediction of some detectors like SABL is not Tensor - if isinstance(bbox_pred, torch.Tensor): - bbox_pred = bbox_pred.split(num_proposals_per_img, 0) - else: - bbox_pred = self.bbox_head.bbox_pred_split( - bbox_pred, num_proposals_per_img) - else: - bbox_pred = (None, ) * len(proposals) - - # apply bbox post-processing to each image individually - det_bboxes = [] - det_labels = [] - for i in range(len(proposals)): - if rois[i].shape[0] == 0: - # There is no proposal in the single image - det_bbox = rois[i].new_zeros(0, 5) - det_label = rois[i].new_zeros((0, ), dtype=torch.long) - if rcnn_test_cfg is None: - det_bbox = det_bbox[:, :4] - det_label = rois[i].new_zeros( - (0, self.bbox_head.fc_cls.out_features)) - - else: - det_bbox, det_label = self.bbox_head.get_bboxes( - rois[i], - cls_score[i], - bbox_pred[i], - img_shapes[i], - scale_factors[i], - rescale=rescale, - cfg=rcnn_test_cfg) - det_bboxes.append(det_bbox) - det_labels.append(det_label) - return det_bboxes, det_labels - - def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): - """Test det bboxes with test time augmentation.""" - aug_bboxes = [] - aug_scores = [] - for x, img_meta in zip(feats, img_metas): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - # TODO more flexible - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip, flip_direction) - rois = bbox2roi([proposals]) - bbox_results = self._bbox_forward(x, rois) - bboxes, scores = self.bbox_head.get_bboxes( - rois, - bbox_results['cls_score'], - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - if merged_bboxes.shape[0] == 0: - # There is no proposal in the single image - det_bboxes = merged_bboxes.new_zeros(0, 5) - det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long) - else: - det_bboxes, det_labels = multiclass_nms(merged_bboxes, - merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - return det_bboxes, det_labels - - -class MaskTestMixin: - - if sys.version_info >= (3, 7): - - async def async_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False, - mask_test_cfg=None): - """Asynchronized test for mask head without augmentation.""" - # image shape of the first image in the batch (only one) - ori_shape = img_metas[0]['ori_shape'] - scale_factor = img_metas[0]['scale_factor'] - if det_bboxes.shape[0] == 0: - segm_result = [[] for _ in range(self.mask_head.num_classes)] - else: - if rescale and not isinstance(scale_factor, - (float, torch.Tensor)): - scale_factor = det_bboxes.new_tensor(scale_factor) - _bboxes = ( - det_bboxes[:, :4] * - scale_factor if rescale else det_bboxes) - mask_rois = bbox2roi([_bboxes]) - mask_feats = self.mask_roi_extractor( - x[:len(self.mask_roi_extractor.featmap_strides)], - mask_rois) - - if self.with_shared_head: - mask_feats = self.shared_head(mask_feats) - if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'): - sleep_interval = mask_test_cfg['async_sleep_interval'] - else: - sleep_interval = 0.035 - async with completed( - __name__, - 'mask_head_forward', - sleep_interval=sleep_interval): - mask_pred = self.mask_head(mask_feats) - segm_result = self.mask_head.get_seg_masks( - mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape, - scale_factor, rescale) - return segm_result - - def simple_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False): - """Simple test for mask head without augmentation.""" - # image shapes of images in the batch - ori_shapes = tuple(meta['ori_shape'] for meta in img_metas) - scale_factors = tuple(meta['scale_factor'] for meta in img_metas) - - if isinstance(scale_factors[0], float): - warnings.warn( - 'Scale factor in img_metas should be a ' - 'ndarray with shape (4,) ' - 'arrange as (factor_w, factor_h, factor_w, factor_h), ' - 'The scale_factor with float type has been deprecated. ') - scale_factors = np.array([scale_factors] * 4, dtype=np.float32) - - num_imgs = len(det_bboxes) - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - segm_results = [[[] for _ in range(self.mask_head.num_classes)] - for _ in range(num_imgs)] - else: - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - if rescale: - scale_factors = [ - torch.from_numpy(scale_factor).to(det_bboxes[0].device) - for scale_factor in scale_factors - ] - _bboxes = [ - det_bboxes[i][:, :4] * - scale_factors[i] if rescale else det_bboxes[i][:, :4] - for i in range(len(det_bboxes)) - ] - mask_rois = bbox2roi(_bboxes) - mask_results = self._mask_forward(x, mask_rois) - mask_pred = mask_results['mask_pred'] - # split batch mask prediction back to each image - num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes] - mask_preds = mask_pred.split(num_mask_roi_per_img, 0) - - # apply mask post-processing to each image individually - segm_results = [] - for i in range(num_imgs): - if det_bboxes[i].shape[0] == 0: - segm_results.append( - [[] for _ in range(self.mask_head.num_classes)]) - else: - segm_result = self.mask_head.get_seg_masks( - mask_preds[i], _bboxes[i], det_labels[i], - self.test_cfg, ori_shapes[i], scale_factors[i], - rescale) - segm_results.append(segm_result) - return segm_results - - def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): - """Test for mask head with test time augmentation.""" - if det_bboxes.shape[0] == 0: - segm_result = [[] for _ in range(self.mask_head.num_classes)] - else: - aug_masks = [] - for x, img_meta in zip(feats, img_metas): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip, flip_direction) - mask_rois = bbox2roi([_bboxes]) - mask_results = self._mask_forward(x, mask_rois) - # convert to numpy array to save memory - aug_masks.append( - mask_results['mask_pred'].sigmoid().cpu().numpy()) - merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) - - ori_shape = img_metas[0][0]['ori_shape'] - scale_factor = det_bboxes.new_ones(4) - segm_result = self.mask_head.get_seg_masks( - merged_masks, - det_bboxes, - det_labels, - self.test_cfg, - ori_shape, - scale_factor=scale_factor, - rescale=False) - return segm_result diff --git a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/trident_roi_head.py b/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/trident_roi_head.py deleted file mode 100644 index 09758792de83ad1a1c9026ad2950843a13daf1b5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/roi_heads/trident_roi_head.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.ops import batched_nms - -from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, - multiclass_nms) -from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead -from ..builder import HEADS - - -@HEADS.register_module() -class TridentRoIHead(StandardRoIHead): - """Trident roi head. - - Args: - num_branch (int): Number of branches in TridentNet. - test_branch_idx (int): In inference, all 3 branches will be used - if `test_branch_idx==-1`, otherwise only branch with index - `test_branch_idx` will be used. - """ - - def __init__(self, num_branch, test_branch_idx, **kwargs): - self.num_branch = num_branch - self.test_branch_idx = test_branch_idx - super(TridentRoIHead, self).__init__(**kwargs) - - def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels): - """Merge bbox predictions of each branch.""" - if trident_det_bboxes.numel() == 0: - det_bboxes = trident_det_bboxes.new_zeros((0, 5)) - det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long) - else: - nms_bboxes = trident_det_bboxes[:, :4] - nms_scores = trident_det_bboxes[:, 4].contiguous() - nms_inds = trident_det_labels - nms_cfg = self.test_cfg['nms'] - det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds, - nms_cfg) - det_labels = trident_det_labels[keep] - if self.test_cfg['max_per_img'] > 0: - det_labels = det_labels[:self.test_cfg['max_per_img']] - det_bboxes = det_bboxes[:self.test_cfg['max_per_img']] - - return det_bboxes, det_labels - - def simple_test(self, - x, - proposal_list, - img_metas, - proposals=None, - rescale=False): - """Test without augmentation as follows: - - 1. Compute prediction bbox and label per branch. - 2. Merge predictions of each branch according to scores of - bboxes, i.e., bboxes with higher score are kept to give - top-k prediction. - """ - assert self.with_bbox, 'Bbox head must be implemented.' - det_bboxes_list, det_labels_list = self.simple_test_bboxes( - x, img_metas, proposal_list, self.test_cfg, rescale=rescale) - num_branch = self.num_branch if self.test_branch_idx == -1 else 1 - for _ in range(len(det_bboxes_list)): - if det_bboxes_list[_].shape[0] == 0: - det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5)) - det_bboxes, det_labels = [], [] - for i in range(len(img_metas) // num_branch): - det_result = self.merge_trident_bboxes( - torch.cat(det_bboxes_list[i * num_branch:(i + 1) * - num_branch]), - torch.cat(det_labels_list[i * num_branch:(i + 1) * - num_branch])) - det_bboxes.append(det_result[0]) - det_labels.append(det_result[1]) - - bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head.num_classes) - for i in range(len(det_bboxes)) - ] - return bbox_results - - def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): - """Test det bboxes with test time augmentation.""" - aug_bboxes = [] - aug_scores = [] - for x, img_meta in zip(feats, img_metas): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - - trident_bboxes, trident_scores = [], [] - for branch_idx in range(len(proposal_list)): - proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, - scale_factor, flip, flip_direction) - rois = bbox2roi([proposals]) - bbox_results = self._bbox_forward(x, rois) - bboxes, scores = self.bbox_head.get_bboxes( - rois, - bbox_results['cls_score'], - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - trident_bboxes.append(bboxes) - trident_scores.append(scores) - - aug_bboxes.append(torch.cat(trident_bboxes, 0)) - aug_scores.append(torch.cat(trident_scores, 0)) - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - return det_bboxes, det_labels diff --git a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/__init__.py deleted file mode 100644 index b489a905b1e9b6cef2e8b9575600990563128e4e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .panoptic_fpn_head import PanopticFPNHead # noqa: F401,F403 -from .panoptic_fusion_heads import * # noqa: F401,F403 diff --git a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/base_semantic_head.py b/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/base_semantic_head.py deleted file mode 100644 index 2b6ca145f050fbe10f348594203b6f0aa30f5695..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/base_semantic_head.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -import torch.nn.functional as F -from mmcv.runner import BaseModule, force_fp32 - -from ..builder import build_loss -from ..utils import interpolate_as - - -class BaseSemanticHead(BaseModule, metaclass=ABCMeta): - """Base module of Semantic Head. - - Args: - num_classes (int): the number of classes. - init_cfg (dict): the initialization config. - loss_seg (dict): the loss of the semantic head. - """ - - def __init__(self, - num_classes, - init_cfg=None, - loss_seg=dict( - type='CrossEntropyLoss', - ignore_index=255, - loss_weight=1.0)): - super(BaseSemanticHead, self).__init__(init_cfg) - self.loss_seg = build_loss(loss_seg) - self.num_classes = num_classes - - @force_fp32(apply_to=('seg_preds', )) - def loss(self, seg_preds, gt_semantic_seg): - """Get the loss of semantic head. - - Args: - seg_preds (Tensor): The input logits with the shape (N, C, H, W). - gt_semantic_seg: The ground truth of semantic segmentation with - the shape (N, H, W). - label_bias: The starting number of the semantic label. - Default: 1. - - Returns: - dict: the loss of semantic head. - """ - if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]: - seg_preds = interpolate_as(seg_preds, gt_semantic_seg) - seg_preds = seg_preds.permute((0, 2, 3, 1)) - - loss_seg = self.loss_seg( - seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C] - gt_semantic_seg.reshape(-1).long()) - return dict(loss_seg=loss_seg) - - @abstractmethod - def forward(self, x): - """Placeholder of forward function. - - Returns: - dict[str, Tensor]: A dictionary, including features - and predicted scores. Required keys: 'seg_preds' - and 'feats'. - """ - pass - - def forward_train(self, x, gt_semantic_seg): - output = self.forward(x) - seg_preds = output['seg_preds'] - return self.loss(seg_preds, gt_semantic_seg) - - def simple_test(self, x, img_metas, rescale=False): - output = self.forward(x) - seg_preds = output['seg_preds'] - seg_preds = F.interpolate( - seg_preds, - size=img_metas[0]['pad_shape'][:2], - mode='bilinear', - align_corners=False) - - if rescale: - h, w, _ = img_metas[0]['img_shape'] - seg_preds = seg_preds[:, :, :h, :w] - - h, w, _ = img_metas[0]['ori_shape'] - seg_preds = F.interpolate( - seg_preds, size=(h, w), mode='bilinear', align_corners=False) - return seg_preds diff --git a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fpn_head.py b/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fpn_head.py deleted file mode 100644 index f1df2976121a7668ab468b8997728683360fae14..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fpn_head.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch -import torch.nn as nn -from mmcv.runner import ModuleList - -from ..builder import HEADS -from ..utils import ConvUpsample -from .base_semantic_head import BaseSemanticHead - - -@HEADS.register_module() -class PanopticFPNHead(BaseSemanticHead): - """PanopticFPNHead used in Panoptic FPN. - - In this head, the number of output channels is ``num_stuff_classes - + 1``, including all stuff classes and one thing class. The stuff - classes will be reset from ``0`` to ``num_stuff_classes - 1``, the - thing classes will be merged to ``num_stuff_classes``-th channel. - - Arg: - num_things_classes (int): Number of thing classes. Default: 80. - num_stuff_classes (int): Number of stuff classes. Default: 53. - num_classes (int): Number of classes, including all stuff - classes and one thing class. This argument is deprecated, - please use ``num_things_classes`` and ``num_stuff_classes``. - The module will automatically infer the num_classes by - ``num_stuff_classes + 1``. - in_channels (int): Number of channels in the input feature - map. - inner_channels (int): Number of channels in inner features. - start_level (int): The start level of the input features - used in PanopticFPN. - end_level (int): The end level of the used features, the - ``end_level``-th layer will not be used. - fg_range (tuple): Range of the foreground classes. It starts - from ``0`` to ``num_things_classes-1``. Deprecated, please use - ``num_things_classes`` directly. - bg_range (tuple): Range of the background classes. It starts - from ``num_things_classes`` to ``num_things_classes + - num_stuff_classes - 1``. Deprecated, please use - ``num_stuff_classes`` and ``num_things_classes`` directly. - conv_cfg (dict): Dictionary to construct and config - conv layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Use ``GN`` by default. - init_cfg (dict or list[dict], optional): Initialization config dict. - loss_seg (dict): the loss of the semantic head. - """ - - def __init__(self, - num_things_classes=80, - num_stuff_classes=53, - num_classes=None, - in_channels=256, - inner_channels=128, - start_level=0, - end_level=4, - fg_range=None, - bg_range=None, - conv_cfg=None, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - init_cfg=None, - loss_seg=dict( - type='CrossEntropyLoss', ignore_index=-1, - loss_weight=1.0)): - if num_classes is not None: - warnings.warn( - '`num_classes` is deprecated now, please set ' - '`num_stuff_classes` directly, the `num_classes` will be ' - 'set to `num_stuff_classes + 1`') - # num_classes = num_stuff_classes + 1 for PanopticFPN. - assert num_classes == num_stuff_classes + 1 - super(PanopticFPNHead, self).__init__(num_stuff_classes + 1, init_cfg, - loss_seg) - self.num_things_classes = num_things_classes - self.num_stuff_classes = num_stuff_classes - if fg_range is not None and bg_range is not None: - self.fg_range = fg_range - self.bg_range = bg_range - self.num_things_classes = fg_range[1] - fg_range[0] + 1 - self.num_stuff_classes = bg_range[1] - bg_range[0] + 1 - warnings.warn( - '`fg_range` and `bg_range` are deprecated now, ' - f'please use `num_things_classes`={self.num_things_classes} ' - f'and `num_stuff_classes`={self.num_stuff_classes} instead.') - - # Used feature layers are [start_level, end_level) - self.start_level = start_level - self.end_level = end_level - self.num_stages = end_level - start_level - self.inner_channels = inner_channels - - self.conv_upsample_layers = ModuleList() - for i in range(start_level, end_level): - self.conv_upsample_layers.append( - ConvUpsample( - in_channels, - inner_channels, - num_layers=i if i > 0 else 1, - num_upsample=i if i > 0 else 0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - )) - self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1) - - def _set_things_to_void(self, gt_semantic_seg): - """Merge thing classes to one class. - - In PanopticFPN, the background labels will be reset from `0` to - `self.num_stuff_classes-1`, the foreground labels will be merged to - `self.num_stuff_classes`-th channel. - """ - gt_semantic_seg = gt_semantic_seg.int() - fg_mask = gt_semantic_seg < self.num_things_classes - bg_mask = (gt_semantic_seg >= self.num_things_classes) * ( - gt_semantic_seg < self.num_things_classes + self.num_stuff_classes) - - new_gt_seg = torch.clone(gt_semantic_seg) - new_gt_seg = torch.where(bg_mask, - gt_semantic_seg - self.num_things_classes, - new_gt_seg) - new_gt_seg = torch.where(fg_mask, - fg_mask.int() * self.num_stuff_classes, - new_gt_seg) - return new_gt_seg - - def loss(self, seg_preds, gt_semantic_seg): - """The loss of PanopticFPN head. - - Things classes will be merged to one class in PanopticFPN. - """ - gt_semantic_seg = self._set_things_to_void(gt_semantic_seg) - return super().loss(seg_preds, gt_semantic_seg) - - def init_weights(self): - super().init_weights() - nn.init.normal_(self.conv_logits.weight.data, 0, 0.01) - self.conv_logits.bias.data.zero_() - - def forward(self, x): - # the number of subnets must be not more than - # the length of features. - assert self.num_stages <= len(x) - - feats = [] - for i, layer in enumerate(self.conv_upsample_layers): - f = layer(x[self.start_level + i]) - feats.append(f) - - feats = torch.sum(torch.stack(feats, dim=0), dim=0) - seg_preds = self.conv_logits(feats) - out = dict(seg_preds=seg_preds, feats=feats) - return out diff --git a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py deleted file mode 100644 index 41625a61d6d1c38c633062c24b1e3455bd3ae2df..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_panoptic_fusion_head import \ - BasePanopticFusionHead # noqa: F401,F403 -from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403 -from .maskformer_fusion_head import MaskFormerFusionHead # noqa: F401,F403 diff --git a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py b/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py deleted file mode 100644 index a38ac1c6cd092f0c68fa51853bcd1969de7287a7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta, abstractmethod - -from mmcv.runner import BaseModule - -from ...builder import build_loss - - -class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta): - """Base class for panoptic heads.""" - - def __init__(self, - num_things_classes=80, - num_stuff_classes=53, - test_cfg=None, - loss_panoptic=None, - init_cfg=None, - **kwargs): - super(BasePanopticFusionHead, self).__init__(init_cfg) - self.num_things_classes = num_things_classes - self.num_stuff_classes = num_stuff_classes - self.num_classes = num_things_classes + num_stuff_classes - self.test_cfg = test_cfg - - if loss_panoptic: - self.loss_panoptic = build_loss(loss_panoptic) - else: - self.loss_panoptic = None - - @property - def with_loss(self): - """bool: whether the panoptic head contains loss function.""" - return self.loss_panoptic is not None - - @abstractmethod - def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): - """Forward function during training.""" - - @abstractmethod - def simple_test(self, - img_metas, - det_labels, - mask_preds, - seg_preds, - det_bboxes, - cfg=None, - **kwargs): - """Test without augmentation.""" diff --git a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py b/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py deleted file mode 100644 index 06c1de2b9010fef13bd2322bbd3352d82a1f3e2f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET -from mmdet.models.builder import HEADS -from .base_panoptic_fusion_head import BasePanopticFusionHead - - -@HEADS.register_module() -class HeuristicFusionHead(BasePanopticFusionHead): - """Fusion Head with Heuristic method.""" - - def __init__(self, - num_things_classes=80, - num_stuff_classes=53, - test_cfg=None, - init_cfg=None, - **kwargs): - super(HeuristicFusionHead, - self).__init__(num_things_classes, num_stuff_classes, test_cfg, - None, init_cfg, **kwargs) - - def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): - """HeuristicFusionHead has no training loss.""" - return dict() - - def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5): - """Lay instance masks to a result map. - - Args: - bboxes: The bboxes results, (K, 4). - labels: The labels of bboxes, (K, ). - masks: The instance masks, (K, H, W). - overlap_thr: Threshold to determine whether two masks overlap. - default: 0.5. - - Returns: - Tensor: The result map, (H, W). - """ - num_insts = bboxes.shape[0] - id_map = torch.zeros( - masks.shape[-2:], device=bboxes.device, dtype=torch.long) - if num_insts == 0: - return id_map, labels - - scores, bboxes = bboxes[:, -1], bboxes[:, :4] - - # Sort by score to use heuristic fusion - order = torch.argsort(-scores) - bboxes = bboxes[order] - labels = labels[order] - segm_masks = masks[order] - - instance_id = 1 - left_labels = [] - for idx in range(bboxes.shape[0]): - _cls = labels[idx] - _mask = segm_masks[idx] - instance_id_map = torch.ones_like( - _mask, dtype=torch.long) * instance_id - area = _mask.sum() - if area == 0: - continue - - pasted = id_map > 0 - intersect = (_mask * pasted).sum() - if (intersect / (area + 1e-5)) > overlap_thr: - continue - - _part = _mask * (~pasted) - id_map = torch.where(_part, instance_id_map, id_map) - left_labels.append(_cls) - instance_id += 1 - - if len(left_labels) > 0: - instance_labels = torch.stack(left_labels) - else: - instance_labels = bboxes.new_zeros((0, ), dtype=torch.long) - assert instance_id == (len(instance_labels) + 1) - return id_map, instance_labels - - def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds, - **kwargs): - """Fuse the results of instance and semantic segmentations. - - Args: - det_bboxes: The bboxes results, (K, 4). - det_labels: The labels of bboxes, (K,). - mask_preds: The masks results, (K, H, W). - seg_preds: The semantic segmentation results, - (K, num_stuff + 1, H, W). - - Returns: - Tensor : The panoptic segmentation result, (H, W). - """ - mask_preds = mask_preds >= self.test_cfg.mask_thr_binary - id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds, - self.test_cfg.mask_overlap) - - seg_results = seg_preds.argmax(dim=0) - seg_results = seg_results + self.num_things_classes - - pan_results = seg_results - instance_id = 1 - for idx in range(det_labels.shape[0]): - _mask = id_map == (idx + 1) - if _mask.sum() == 0: - continue - _cls = labels[idx] - # simply trust detection - segment_id = _cls + instance_id * INSTANCE_OFFSET - pan_results[_mask] = segment_id - instance_id += 1 - - ids, counts = torch.unique( - pan_results % INSTANCE_OFFSET, return_counts=True) - stuff_ids = ids[ids >= self.num_things_classes] - stuff_counts = counts[ids >= self.num_things_classes] - ignore_stuff_ids = stuff_ids[ - stuff_counts < self.test_cfg.stuff_area_limit] - - assert pan_results.ndim == 2 - pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape( - 1, 1, -1)).any(dim=2)] = self.num_classes - - return pan_results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py b/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py deleted file mode 100644 index 5b59ce4deaed11b98f5d9cf7a22f177eebfeb6b7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn.functional as F - -from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET -from mmdet.core.mask import mask2bbox -from mmdet.models.builder import HEADS -from .base_panoptic_fusion_head import BasePanopticFusionHead - - -@HEADS.register_module() -class MaskFormerFusionHead(BasePanopticFusionHead): - - def __init__(self, - num_things_classes=80, - num_stuff_classes=53, - test_cfg=None, - loss_panoptic=None, - init_cfg=None, - **kwargs): - super().__init__(num_things_classes, num_stuff_classes, test_cfg, - loss_panoptic, init_cfg, **kwargs) - - def forward_train(self, **kwargs): - """MaskFormerFusionHead has no training loss.""" - return dict() - - def panoptic_postprocess(self, mask_cls, mask_pred): - """Panoptic segmengation inference. - - Args: - mask_cls (Tensor): Classfication outputs of shape - (num_queries, cls_out_channels) for a image. - Note `cls_out_channels` should includes - background. - mask_pred (Tensor): Mask outputs of shape - (num_queries, h, w) for a image. - - Returns: - Tensor: Panoptic segment result of shape \ - (h, w), each element in Tensor means: \ - ``segment_id = _cls + instance_id * INSTANCE_OFFSET``. - """ - object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8) - iou_thr = self.test_cfg.get('iou_thr', 0.8) - filter_low_score = self.test_cfg.get('filter_low_score', False) - - scores, labels = F.softmax(mask_cls, dim=-1).max(-1) - mask_pred = mask_pred.sigmoid() - - keep = labels.ne(self.num_classes) & (scores > object_mask_thr) - cur_scores = scores[keep] - cur_classes = labels[keep] - cur_masks = mask_pred[keep] - - cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks - - h, w = cur_masks.shape[-2:] - panoptic_seg = torch.full((h, w), - self.num_classes, - dtype=torch.int32, - device=cur_masks.device) - if cur_masks.shape[0] == 0: - # We didn't detect any mask :( - pass - else: - cur_mask_ids = cur_prob_masks.argmax(0) - instance_id = 1 - for k in range(cur_classes.shape[0]): - pred_class = int(cur_classes[k].item()) - isthing = pred_class < self.num_things_classes - mask = cur_mask_ids == k - mask_area = mask.sum().item() - original_area = (cur_masks[k] >= 0.5).sum().item() - - if filter_low_score: - mask = mask & (cur_masks[k] >= 0.5) - - if mask_area > 0 and original_area > 0: - if mask_area / original_area < iou_thr: - continue - - if not isthing: - # different stuff regions of same class will be - # merged here, and stuff share the instance_id 0. - panoptic_seg[mask] = pred_class - else: - panoptic_seg[mask] = ( - pred_class + instance_id * INSTANCE_OFFSET) - instance_id += 1 - - return panoptic_seg - - def semantic_postprocess(self, mask_cls, mask_pred): - """Semantic segmengation postprocess. - - Args: - mask_cls (Tensor): Classfication outputs of shape - (num_queries, cls_out_channels) for a image. - Note `cls_out_channels` should includes - background. - mask_pred (Tensor): Mask outputs of shape - (num_queries, h, w) for a image. - - Returns: - Tensor: Semantic segment result of shape \ - (cls_out_channels, h, w). - """ - # TODO add semantic segmentation result - raise NotImplementedError - - def instance_postprocess(self, mask_cls, mask_pred): - """Instance segmengation postprocess. - - Args: - mask_cls (Tensor): Classfication outputs of shape - (num_queries, cls_out_channels) for a image. - Note `cls_out_channels` should includes - background. - mask_pred (Tensor): Mask outputs of shape - (num_queries, h, w) for a image. - - Returns: - tuple[Tensor]: Instance segmentation results. - - - labels_per_image (Tensor): Predicted labels,\ - shape (n, ). - - bboxes (Tensor): Bboxes and scores with shape (n, 5) of \ - positive region in binary mask, the last column is scores. - - mask_pred_binary (Tensor): Instance masks of \ - shape (n, h, w). - """ - max_per_image = self.test_cfg.get('max_per_image', 100) - num_queries = mask_cls.shape[0] - # shape (num_queries, num_class) - scores = F.softmax(mask_cls, dim=-1)[:, :-1] - # shape (num_queries * num_class, ) - labels = torch.arange(self.num_classes, device=mask_cls.device).\ - unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) - scores_per_image, top_indices = scores.flatten(0, 1).topk( - max_per_image, sorted=False) - labels_per_image = labels[top_indices] - - query_indices = top_indices // self.num_classes - mask_pred = mask_pred[query_indices] - - # extract things - is_thing = labels_per_image < self.num_things_classes - scores_per_image = scores_per_image[is_thing] - labels_per_image = labels_per_image[is_thing] - mask_pred = mask_pred[is_thing] - - mask_pred_binary = (mask_pred > 0).float() - mask_scores_per_image = (mask_pred.sigmoid() * - mask_pred_binary).flatten(1).sum(1) / ( - mask_pred_binary.flatten(1).sum(1) + 1e-6) - det_scores = scores_per_image * mask_scores_per_image - mask_pred_binary = mask_pred_binary.bool() - bboxes = mask2bbox(mask_pred_binary) - bboxes = torch.cat([bboxes, det_scores[:, None]], dim=-1) - - return labels_per_image, bboxes, mask_pred_binary - - def simple_test(self, - mask_cls_results, - mask_pred_results, - img_metas, - rescale=False, - **kwargs): - """Test segment without test-time aumengtation. - - Only the output of last decoder layers was used. - - Args: - mask_cls_results (Tensor): Mask classification logits, - shape (batch_size, num_queries, cls_out_channels). - Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape - (batch_size, num_queries, h, w). - img_metas (list[dict]): List of image information. - rescale (bool, optional): If True, return boxes in - original image space. Default False. - - Returns: - list[dict[str, Tensor | tuple[Tensor]]]: Semantic segmentation \ - results and panoptic segmentation results for each \ - image. - - .. code-block:: none - - [ - { - 'pan_results': Tensor, # shape = [h, w] - 'ins_results': tuple[Tensor], - # semantic segmentation results are not supported yet - 'sem_results': Tensor - }, - ... - ] - """ - panoptic_on = self.test_cfg.get('panoptic_on', True) - semantic_on = self.test_cfg.get('semantic_on', False) - instance_on = self.test_cfg.get('instance_on', False) - assert not semantic_on, 'segmantic segmentation '\ - 'results are not supported yet.' - - results = [] - for mask_cls_result, mask_pred_result, meta in zip( - mask_cls_results, mask_pred_results, img_metas): - # remove padding - img_height, img_width = meta['img_shape'][:2] - mask_pred_result = mask_pred_result[:, :img_height, :img_width] - - if rescale: - # return result in original resolution - ori_height, ori_width = meta['ori_shape'][:2] - mask_pred_result = F.interpolate( - mask_pred_result[:, None], - size=(ori_height, ori_width), - mode='bilinear', - align_corners=False)[:, 0] - - result = dict() - if panoptic_on: - pan_results = self.panoptic_postprocess( - mask_cls_result, mask_pred_result) - result['pan_results'] = pan_results - - if instance_on: - ins_results = self.instance_postprocess( - mask_cls_result, mask_pred_result) - result['ins_results'] = ins_results - - if semantic_on: - sem_results = self.semantic_postprocess( - mask_cls_result, mask_pred_result) - result['sem_results'] = sem_results - - results.append(result) - - return results diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/__init__.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/__init__.py deleted file mode 100644 index e74ba89e8c2101360d921a5f8437da48d0250e9a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d -from .builder import build_linear_layer, build_transformer -from .ckpt_convert import pvt_convert -from .conv_upsample import ConvUpsample -from .csp_layer import CSPLayer -from .gaussian_target import gaussian_radius, gen_gaussian_target -from .inverted_residual import InvertedResidual -from .make_divisible import make_divisible -from .misc import interpolate_as, sigmoid_geometric_mean -from .normed_predictor import NormedConv2d, NormedLinear -from .panoptic_gt_processing import preprocess_panoptic_gt -from .point_sample import (get_uncertain_point_coords_with_randomness, - get_uncertainty) -from .positional_encoding import (LearnedPositionalEncoding, - SinePositionalEncoding) -from .res_layer import ResLayer, SimplifiedBasicBlock -from .se_layer import DyReLU, SELayer -from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer, - DynamicConv, PatchEmbed, Transformer, nchw_to_nlc, - nlc_to_nchw) - -__all__ = [ - 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', - 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer', - 'build_transformer', 'build_linear_layer', 'SinePositionalEncoding', - 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', - 'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual', - 'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer', - 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', - 'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean', - 'preprocess_panoptic_gt', 'DyReLU', - 'get_uncertain_point_coords_with_randomness', 'get_uncertainty' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/brick_wrappers.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/brick_wrappers.py deleted file mode 100644 index fa0279ab60d0943bf68ea2616df9dad87e220db4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/brick_wrappers.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version - -if torch.__version__ == 'parrots': - TORCH_VERSION = torch.__version__ -else: - # torch.__version__ could be 1.3.1+cu92, we only need the first two - # for comparison - TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) - - -def adaptive_avg_pool2d(input, output_size): - """Handle empty batch dimension to adaptive_avg_pool2d. - - Args: - input (tensor): 4D tensor. - output_size (int, tuple[int,int]): the target output size. - """ - if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): - if isinstance(output_size, int): - output_size = [output_size, output_size] - output_size = [*input.shape[:2], *output_size] - empty = NewEmptyTensorOp.apply(input, output_size) - return empty - else: - return F.adaptive_avg_pool2d(input, output_size) - - -class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d): - """Handle empty batch dimension to AdaptiveAvgPool2d.""" - - def forward(self, x): - # PyTorch 1.9 does not support empty tensor inference yet - if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): - output_size = self.output_size - if isinstance(output_size, int): - output_size = [output_size, output_size] - else: - output_size = [ - v if v is not None else d - for v, d in zip(output_size, - x.size()[-2:]) - ] - output_size = [*x.shape[:2], *output_size] - empty = NewEmptyTensorOp.apply(x, output_size) - return empty - - return super().forward(x) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/builder.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/builder.py deleted file mode 100644 index 20fe7a6dcfcf242728dcd7b7639032006cc6c4e2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/builder.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmcv.utils import Registry, build_from_cfg - -TRANSFORMER = Registry('Transformer') -LINEAR_LAYERS = Registry('linear layers') - - -def build_transformer(cfg, default_args=None): - """Builder for Transformer.""" - return build_from_cfg(cfg, TRANSFORMER, default_args) - - -LINEAR_LAYERS.register_module('Linear', module=nn.Linear) - - -def build_linear_layer(cfg, *args, **kwargs): - """Build linear layer. - Args: - cfg (None or dict): The linear layer config, which should contain: - - type (str): Layer type. - - layer args: Args needed to instantiate an linear layer. - args (argument list): Arguments passed to the `__init__` - method of the corresponding linear layer. - kwargs (keyword arguments): Keyword arguments passed to the `__init__` - method of the corresponding linear layer. - Returns: - nn.Module: Created linear layer. - """ - if cfg is None: - cfg_ = dict(type='Linear') - else: - if not isinstance(cfg, dict): - raise TypeError('cfg must be a dict') - if 'type' not in cfg: - raise KeyError('the cfg dict must contain the key "type"') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in LINEAR_LAYERS: - raise KeyError(f'Unrecognized linear type {layer_type}') - else: - linear_layer = LINEAR_LAYERS.get(layer_type) - - layer = linear_layer(*args, **kwargs, **cfg_) - - return layer diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/ckpt_convert.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/ckpt_convert.py deleted file mode 100644 index 4d660c4e4ddbc289f6882333e5eec4360a17aaf2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/ckpt_convert.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -# This script consists of several convert functions which -# can modify the weights of model in original repo to be -# pre-trained weights. - -from collections import OrderedDict - -import torch - - -def pvt_convert(ckpt): - new_ckpt = OrderedDict() - # Process the concat between q linear weights and kv linear weights - use_abs_pos_embed = False - use_conv_ffn = False - for k in ckpt.keys(): - if k.startswith('pos_embed'): - use_abs_pos_embed = True - if k.find('dwconv') >= 0: - use_conv_ffn = True - for k, v in ckpt.items(): - if k.startswith('head'): - continue - if k.startswith('norm.'): - continue - if k.startswith('cls_token'): - continue - if k.startswith('pos_embed'): - stage_i = int(k.replace('pos_embed', '')) - new_k = k.replace(f'pos_embed{stage_i}', - f'layers.{stage_i - 1}.1.0.pos_embed') - if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 - new_v = v[:, 1:, :] # remove cls token - else: - new_v = v - elif k.startswith('patch_embed'): - stage_i = int(k.split('.')[0].replace('patch_embed', '')) - new_k = k.replace(f'patch_embed{stage_i}', - f'layers.{stage_i - 1}.0') - new_v = v - if 'proj.' in new_k: - new_k = new_k.replace('proj.', 'projection.') - elif k.startswith('block'): - stage_i = int(k.split('.')[0].replace('block', '')) - layer_i = int(k.split('.')[1]) - new_layer_i = layer_i + use_abs_pos_embed - new_k = k.replace(f'block{stage_i}.{layer_i}', - f'layers.{stage_i - 1}.1.{new_layer_i}') - new_v = v - if 'attn.q.' in new_k: - sub_item_k = k.replace('q.', 'kv.') - new_k = new_k.replace('q.', 'attn.in_proj_') - new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) - elif 'attn.kv.' in new_k: - continue - elif 'attn.proj.' in new_k: - new_k = new_k.replace('proj.', 'attn.out_proj.') - elif 'attn.sr.' in new_k: - new_k = new_k.replace('sr.', 'sr.') - elif 'mlp.' in new_k: - string = f'{new_k}-' - new_k = new_k.replace('mlp.', 'ffn.layers.') - if 'fc1.weight' in new_k or 'fc2.weight' in new_k: - new_v = v.reshape((*v.shape, 1, 1)) - new_k = new_k.replace('fc1.', '0.') - new_k = new_k.replace('dwconv.dwconv.', '1.') - if use_conv_ffn: - new_k = new_k.replace('fc2.', '4.') - else: - new_k = new_k.replace('fc2.', '3.') - string += f'{new_k} {v.shape}-{new_v.shape}' - elif k.startswith('norm'): - stage_i = int(k[4]) - new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') - new_v = v - else: - new_k = k - new_v = v - new_ckpt[new_k] = new_v - - return new_ckpt - - -def swin_converter(ckpt): - - new_ckpt = OrderedDict() - - def correct_unfold_reduction_order(x): - out_channel, in_channel = x.shape - x = x.reshape(out_channel, 4, in_channel // 4) - x = x[:, [0, 2, 1, 3], :].transpose(1, - 2).reshape(out_channel, in_channel) - return x - - def correct_unfold_norm_order(x): - in_channel = x.shape[0] - x = x.reshape(4, in_channel // 4) - x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) - return x - - for k, v in ckpt.items(): - if k.startswith('head'): - continue - elif k.startswith('layers'): - new_v = v - if 'attn.' in k: - new_k = k.replace('attn.', 'attn.w_msa.') - elif 'mlp.' in k: - if 'mlp.fc1.' in k: - new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') - elif 'mlp.fc2.' in k: - new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') - else: - new_k = k.replace('mlp.', 'ffn.') - elif 'downsample' in k: - new_k = k - if 'reduction.' in k: - new_v = correct_unfold_reduction_order(v) - elif 'norm.' in k: - new_v = correct_unfold_norm_order(v) - else: - new_k = k - new_k = new_k.replace('layers', 'stages', 1) - elif k.startswith('patch_embed'): - new_v = v - if 'proj' in k: - new_k = k.replace('proj', 'projection') - else: - new_k = k - else: - new_v = v - new_k = k - - new_ckpt['backbone.' + new_k] = new_v - - return new_ckpt diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/conv_upsample.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/conv_upsample.py deleted file mode 100644 index bb5ba7670a996af7debf5a33da955faa9fb1827a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/conv_upsample.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn.functional as F -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule, ModuleList - - -class ConvUpsample(BaseModule): - """ConvUpsample performs 2x upsampling after Conv. - - There are several `ConvModule` layers. In the first few layers, upsampling - will be applied after each layer of convolution. The number of upsampling - must be no more than the number of ConvModule layers. - - Args: - in_channels (int): Number of channels in the input feature map. - inner_channels (int): Number of channels produced by the convolution. - num_layers (int): Number of convolution layers. - num_upsample (int | optional): Number of upsampling layer. Must be no - more than num_layers. Upsampling will be applied after the first - ``num_upsample`` layers of convolution. Default: ``num_layers``. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. Default: None. - init_cfg (dict): Config dict for initialization. Default: None. - kwargs (key word augments): Other augments used in ConvModule. - """ - - def __init__(self, - in_channels, - inner_channels, - num_layers=1, - num_upsample=None, - conv_cfg=None, - norm_cfg=None, - init_cfg=None, - **kwargs): - super(ConvUpsample, self).__init__(init_cfg) - if num_upsample is None: - num_upsample = num_layers - assert num_upsample <= num_layers, \ - f'num_upsample({num_upsample})must be no more than ' \ - f'num_layers({num_layers})' - self.num_layers = num_layers - self.num_upsample = num_upsample - self.conv = ModuleList() - for i in range(num_layers): - self.conv.append( - ConvModule( - in_channels, - inner_channels, - 3, - padding=1, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - in_channels = inner_channels - - def forward(self, x): - num_upsample = self.num_upsample - for i in range(self.num_layers): - x = self.conv[i](x) - if num_upsample > 0: - num_upsample -= 1 - x = F.interpolate( - x, scale_factor=2, mode='bilinear', align_corners=False) - return x diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/csp_layer.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/csp_layer.py deleted file mode 100644 index 5760b014f25219a4f1d547edc9dcebe618ada2c5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/csp_layer.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule -from mmcv.runner import BaseModule - - -class DarknetBottleneck(BaseModule): - """The basic bottleneck block used in Darknet. - - Each ResBlock consists of two ConvModules and the input is added to the - final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. - The first convLayer has filter size of 1x1 and the second one has the - filter size of 3x3. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - expansion (int): The kernel size of the convolution. Default: 0.5 - add_identity (bool): Whether to add identity to the out. - Default: True - use_depthwise (bool): Whether to use depthwise separable convolution. - Default: False - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='Swish'). - """ - - def __init__(self, - in_channels, - out_channels, - expansion=0.5, - add_identity=True, - use_depthwise=False, - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - init_cfg=None): - super().__init__(init_cfg) - hidden_channels = int(out_channels * expansion) - conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule - self.conv1 = ConvModule( - in_channels, - hidden_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.conv2 = conv( - hidden_channels, - out_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.add_identity = \ - add_identity and in_channels == out_channels - - def forward(self, x): - identity = x - out = self.conv1(x) - out = self.conv2(out) - - if self.add_identity: - return out + identity - else: - return out - - -class CSPLayer(BaseModule): - """Cross Stage Partial Layer. - - Args: - in_channels (int): The input channels of the CSP layer. - out_channels (int): The output channels of the CSP layer. - expand_ratio (float): Ratio to adjust the number of channels of the - hidden layer. Default: 0.5 - num_blocks (int): Number of blocks. Default: 1 - add_identity (bool): Whether to add identity in blocks. - Default: True - use_depthwise (bool): Whether to depthwise separable convolution in - blocks. Default: False - conv_cfg (dict, optional): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN') - act_cfg (dict): Config dict for activation layer. - Default: dict(type='Swish') - """ - - def __init__(self, - in_channels, - out_channels, - expand_ratio=0.5, - num_blocks=1, - add_identity=True, - use_depthwise=False, - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - init_cfg=None): - super().__init__(init_cfg) - mid_channels = int(out_channels * expand_ratio) - self.main_conv = ConvModule( - in_channels, - mid_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.short_conv = ConvModule( - in_channels, - mid_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.final_conv = ConvModule( - 2 * mid_channels, - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - self.blocks = nn.Sequential(*[ - DarknetBottleneck( - mid_channels, - mid_channels, - 1.0, - add_identity, - use_depthwise, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) for _ in range(num_blocks) - ]) - - def forward(self, x): - x_short = self.short_conv(x) - - x_main = self.main_conv(x) - x_main = self.blocks(x_main) - - x_final = torch.cat((x_main, x_short), dim=1) - return self.final_conv(x_final) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/gaussian_target.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/gaussian_target.py deleted file mode 100644 index 9997d3b13a90eca2b302b170b09a445776eda1ee..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/gaussian_target.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from math import sqrt - -import torch -import torch.nn.functional as F - - -def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'): - """Generate 2D gaussian kernel. - - Args: - radius (int): Radius of gaussian kernel. - sigma (int): Sigma of gaussian function. Default: 1. - dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32. - device (str): Device of gaussian tensor. Default: 'cpu'. - - Returns: - h (Tensor): Gaussian kernel with a - ``(2 * radius + 1) * (2 * radius + 1)`` shape. - """ - x = torch.arange( - -radius, radius + 1, dtype=dtype, device=device).view(1, -1) - y = torch.arange( - -radius, radius + 1, dtype=dtype, device=device).view(-1, 1) - - h = (-(x * x + y * y) / (2 * sigma * sigma)).exp() - - h[h < torch.finfo(h.dtype).eps * h.max()] = 0 - return h - - -def gen_gaussian_target(heatmap, center, radius, k=1): - """Generate 2D gaussian heatmap. - - Args: - heatmap (Tensor): Input heatmap, the gaussian kernel will cover on - it and maintain the max value. - center (list[int]): Coord of gaussian kernel's center. - radius (int): Radius of gaussian kernel. - k (int): Coefficient of gaussian kernel. Default: 1. - - Returns: - out_heatmap (Tensor): Updated heatmap covered by gaussian kernel. - """ - diameter = 2 * radius + 1 - gaussian_kernel = gaussian2D( - radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device) - - x, y = center - - height, width = heatmap.shape[:2] - - left, right = min(x, radius), min(width - x, radius + 1) - top, bottom = min(y, radius), min(height - y, radius + 1) - - masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] - masked_gaussian = gaussian_kernel[radius - top:radius + bottom, - radius - left:radius + right] - out_heatmap = heatmap - torch.max( - masked_heatmap, - masked_gaussian * k, - out=out_heatmap[y - top:y + bottom, x - left:x + right]) - - return out_heatmap - - -def gaussian_radius(det_size, min_overlap): - r"""Generate 2D gaussian radius. - - This function is modified from the `official github repo - `_. - - Given ``min_overlap``, radius could computed by a quadratic equation - according to Vieta's formulas. - - There are 3 cases for computing gaussian radius, details are following: - - - Explanation of figure: ``lt`` and ``br`` indicates the left-top and - bottom-right corner of ground truth box. ``x`` indicates the - generated corner at the limited position when ``radius=r``. - - - Case1: one corner is inside the gt box and the other is outside. - - .. code:: text - - |< width >| - - lt-+----------+ - - | | | ^ - +--x----------+--+ - | | | | - | | | | height - | | overlap | | - | | | | - | | | | v - +--+---------br--+ - - | | | - +----------+--x - - To ensure IoU of generated box and gt box is larger than ``min_overlap``: - - .. math:: - \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad - {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\ - {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} \\ - {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - - - Case2: both two corners are inside the gt box. - - .. code:: text - - |< width >| - - lt-+----------+ - - | | | ^ - +--x-------+ | - | | | | - | |overlap| | height - | | | | - | +-------x--+ - | | | v - +----------+-br - - - To ensure IoU of generated box and gt box is larger than ``min_overlap``: - - .. math:: - \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad - {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\ - {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} \\ - {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a} - - - Case3: both two corners are outside the gt box. - - .. code:: text - - |< width >| - - x--+----------------+ - | | | - +-lt-------------+ | - - | | | | ^ - | | | | - | | overlap | | height - | | | | - | | | | v - | +------------br--+ - - | | | - +----------------+--x - - To ensure IoU of generated box and gt box is larger than ``min_overlap``: - - .. math:: - \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad - {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\ - {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\ - {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a} - - Args: - det_size (list[int]): Shape of object. - min_overlap (float): Min IoU with ground truth for boxes generated by - keypoints inside the gaussian kernel. - - Returns: - radius (int): Radius of gaussian kernel. - """ - height, width = det_size - - a1 = 1 - b1 = (height + width) - c1 = width * height * (1 - min_overlap) / (1 + min_overlap) - sq1 = sqrt(b1**2 - 4 * a1 * c1) - r1 = (b1 - sq1) / (2 * a1) - - a2 = 4 - b2 = 2 * (height + width) - c2 = (1 - min_overlap) * width * height - sq2 = sqrt(b2**2 - 4 * a2 * c2) - r2 = (b2 - sq2) / (2 * a2) - - a3 = 4 * min_overlap - b3 = -2 * min_overlap * (height + width) - c3 = (min_overlap - 1) * width * height - sq3 = sqrt(b3**2 - 4 * a3 * c3) - r3 = (b3 + sq3) / (2 * a3) - return min(r1, r2, r3) - - -def get_local_maximum(heat, kernel=3): - """Extract local maximum pixel with given kernel. - - Args: - heat (Tensor): Target heatmap. - kernel (int): Kernel size of max pooling. Default: 3. - - Returns: - heat (Tensor): A heatmap where local maximum pixels maintain its - own value and other positions are 0. - """ - pad = (kernel - 1) // 2 - hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad) - keep = (hmax == heat).float() - return heat * keep - - -def get_topk_from_heatmap(scores, k=20): - """Get top k positions from heatmap. - - Args: - scores (Tensor): Target heatmap with shape - [batch, num_classes, height, width]. - k (int): Target number. Default: 20. - - Returns: - tuple[torch.Tensor]: Scores, indexes, categories and coords of - topk keypoint. Containing following Tensors: - - - topk_scores (Tensor): Max scores of each topk keypoint. - - topk_inds (Tensor): Indexes of each topk keypoint. - - topk_clses (Tensor): Categories of each topk keypoint. - - topk_ys (Tensor): Y-coord of each topk keypoint. - - topk_xs (Tensor): X-coord of each topk keypoint. - """ - batch, _, height, width = scores.size() - topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k) - topk_clses = topk_inds // (height * width) - topk_inds = topk_inds % (height * width) - topk_ys = topk_inds // width - topk_xs = (topk_inds % width).int().float() - return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs - - -def gather_feat(feat, ind, mask=None): - """Gather feature according to index. - - Args: - feat (Tensor): Target feature map. - ind (Tensor): Target coord index. - mask (Tensor | None): Mask of feature map. Default: None. - - Returns: - feat (Tensor): Gathered feature. - """ - dim = feat.size(2) - ind = ind.unsqueeze(2).repeat(1, 1, dim) - feat = feat.gather(1, ind) - if mask is not None: - mask = mask.unsqueeze(2).expand_as(feat) - feat = feat[mask] - feat = feat.view(-1, dim) - return feat - - -def transpose_and_gather_feat(feat, ind): - """Transpose and gather feature according to index. - - Args: - feat (Tensor): Target feature map. - ind (Tensor): Target coord index. - - Returns: - feat (Tensor): Transposed and gathered feature. - """ - feat = feat.permute(0, 2, 3, 1).contiguous() - feat = feat.view(feat.size(0), -1, feat.size(3)) - feat = gather_feat(feat, ind) - return feat diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/inverted_residual.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/inverted_residual.py deleted file mode 100644 index 1f241ae3e433c4aba1496cf2038ae88e9ef395ef..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/inverted_residual.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import ConvModule -from mmcv.cnn.bricks import DropPath -from mmcv.runner import BaseModule - -from .se_layer import SELayer - - -class InvertedResidual(BaseModule): - """Inverted Residual Block. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - mid_channels (int): The input channels of the depthwise convolution. - kernel_size (int): The kernel size of the depthwise convolution. - Default: 3. - stride (int): The stride of the depthwise convolution. Default: 1. - se_cfg (dict): Config dict for se layer. Default: None, which means no - se layer. - with_expand_conv (bool): Use expand conv or not. If set False, - mid_channels must be the same with in_channels. - Default: True. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - drop_path_rate (float): stochastic depth rate. Defaults to 0. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels, - kernel_size=3, - stride=1, - se_cfg=None, - with_expand_conv=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - drop_path_rate=0., - with_cp=False, - init_cfg=None): - super(InvertedResidual, self).__init__(init_cfg) - self.with_res_shortcut = (stride == 1 and in_channels == out_channels) - assert stride in [1, 2], f'stride must in [1, 2]. ' \ - f'But received {stride}.' - self.with_cp = with_cp - self.drop_path = DropPath( - drop_path_rate) if drop_path_rate > 0 else nn.Identity() - self.with_se = se_cfg is not None - self.with_expand_conv = with_expand_conv - - if self.with_se: - assert isinstance(se_cfg, dict) - if not self.with_expand_conv: - assert mid_channels == in_channels - - if self.with_expand_conv: - self.expand_conv = ConvModule( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.depthwise_conv = ConvModule( - in_channels=mid_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - padding=kernel_size // 2, - groups=mid_channels, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - if self.with_se: - self.se = SELayer(**se_cfg) - - self.linear_conv = ConvModule( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, x): - - def _inner_forward(x): - out = x - - if self.with_expand_conv: - out = self.expand_conv(out) - - out = self.depthwise_conv(out) - - if self.with_se: - out = self.se(out) - - out = self.linear_conv(out) - - if self.with_res_shortcut: - return x + self.drop_path(out) - else: - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/make_divisible.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/make_divisible.py deleted file mode 100644 index ed42c2eeea2a6aed03a0be5516b8d1ef1139e486..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/make_divisible.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -def make_divisible(value, divisor, min_value=None, min_ratio=0.9): - """Make divisible function. - - This function rounds the channel number to the nearest value that can be - divisible by the divisor. It is taken from the original tf repo. It ensures - that all layers have a channel number that is divisible by divisor. It can - be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa - - Args: - value (int): The original channel number. - divisor (int): The divisor to fully divide the channel number. - min_value (int): The minimum value of the output channel. - Default: None, means that the minimum value equal to the divisor. - min_ratio (float): The minimum ratio of the rounded channel number to - the original channel number. Default: 0.9. - - Returns: - int: The modified output channel number. - """ - - if min_value is None: - min_value = divisor - new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) - # Make sure that round down does not go down by more than (1-min_ratio). - if new_value < min_ratio * value: - new_value += divisor - return new_value diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/misc.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/misc.py deleted file mode 100644 index 8f9be9abb75f99a3db9b8f6e30dcdc09748c3952..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/misc.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch.autograd import Function -from torch.nn import functional as F - - -class SigmoidGeometricMean(Function): - """Forward and backward function of geometric mean of two sigmoid - functions. - - This implementation with analytical gradient function substitutes - the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The - original implementation incurs none during gradient backprapagation - if both x and y are very small values. - """ - - @staticmethod - def forward(ctx, x, y): - x_sigmoid = x.sigmoid() - y_sigmoid = y.sigmoid() - z = (x_sigmoid * y_sigmoid).sqrt() - ctx.save_for_backward(x_sigmoid, y_sigmoid, z) - return z - - @staticmethod - def backward(ctx, grad_output): - x_sigmoid, y_sigmoid, z = ctx.saved_tensors - grad_x = grad_output * z * (1 - x_sigmoid) / 2 - grad_y = grad_output * z * (1 - y_sigmoid) / 2 - return grad_x, grad_y - - -sigmoid_geometric_mean = SigmoidGeometricMean.apply - - -def interpolate_as(source, target, mode='bilinear', align_corners=False): - """Interpolate the `source` to the shape of the `target`. - - The `source` must be a Tensor, but the `target` can be a Tensor or a - np.ndarray with the shape (..., target_h, target_w). - - Args: - source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or - (N, C, H, W). - target (Tensor | np.ndarray): The interpolation target with the shape - (..., target_h, target_w). - mode (str): Algorithm used for interpolation. The options are the - same as those in F.interpolate(). Default: ``'bilinear'``. - align_corners (bool): The same as the argument in F.interpolate(). - - Returns: - Tensor: The interpolated source Tensor. - """ - assert len(target.shape) >= 2 - - def _interpolate_as(source, target, mode='bilinear', align_corners=False): - """Interpolate the `source` (4D) to the shape of the `target`.""" - target_h, target_w = target.shape[-2:] - source_h, source_w = source.shape[-2:] - if target_h != source_h or target_w != source_w: - source = F.interpolate( - source, - size=(target_h, target_w), - mode=mode, - align_corners=align_corners) - return source - - if len(source.shape) == 3: - source = source[:, None, :, :] - source = _interpolate_as(source, target, mode, align_corners) - return source[:, 0, :, :] - else: - return _interpolate_as(source, target, mode, align_corners) diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/normed_predictor.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/normed_predictor.py deleted file mode 100644 index f0eeef7db0ca8af73c87a14f925bfa52edda0232..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/normed_predictor.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import CONV_LAYERS - -from .builder import LINEAR_LAYERS - - -@LINEAR_LAYERS.register_module(name='NormedLinear') -class NormedLinear(nn.Linear): - """Normalized Linear Layer. - - Args: - tempeature (float, optional): Tempeature term. Default to 20. - power (int, optional): Power term. Default to 1.0. - eps (float, optional): The minimal value of divisor to - keep numerical stability. Default to 1e-6. - """ - - def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs): - super(NormedLinear, self).__init__(*args, **kwargs) - self.tempearture = tempearture - self.power = power - self.eps = eps - self.init_weights() - - def init_weights(self): - nn.init.normal_(self.weight, mean=0, std=0.01) - if self.bias is not None: - nn.init.constant_(self.bias, 0) - - def forward(self, x): - weight_ = self.weight / ( - self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps) - x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) - x_ = x_ * self.tempearture - - return F.linear(x_, weight_, self.bias) - - -@CONV_LAYERS.register_module(name='NormedConv2d') -class NormedConv2d(nn.Conv2d): - """Normalized Conv2d Layer. - - Args: - tempeature (float, optional): Tempeature term. Default to 20. - power (int, optional): Power term. Default to 1.0. - eps (float, optional): The minimal value of divisor to - keep numerical stability. Default to 1e-6. - norm_over_kernel (bool, optional): Normalize over kernel. - Default to False. - """ - - def __init__(self, - *args, - tempearture=20, - power=1.0, - eps=1e-6, - norm_over_kernel=False, - **kwargs): - super(NormedConv2d, self).__init__(*args, **kwargs) - self.tempearture = tempearture - self.power = power - self.norm_over_kernel = norm_over_kernel - self.eps = eps - - def forward(self, x): - if not self.norm_over_kernel: - weight_ = self.weight / ( - self.weight.norm(dim=1, keepdim=True).pow(self.power) + - self.eps) - else: - weight_ = self.weight / ( - self.weight.view(self.weight.size(0), -1).norm( - dim=1, keepdim=True).pow(self.power)[..., None, None] + - self.eps) - x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps) - x_ = x_ * self.tempearture - - if hasattr(self, 'conv2d_forward'): - x_ = self.conv2d_forward(x_, weight_) - else: - if torch.__version__ >= '1.8': - x_ = self._conv_forward(x_, weight_, self.bias) - else: - x_ = self._conv_forward(x_, weight_) - return x_ diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/panoptic_gt_processing.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/panoptic_gt_processing.py deleted file mode 100644 index 7685ac96fb9750e5c3dd11aa13aa22d9fc7eeb2f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/panoptic_gt_processing.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - - -def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things, - num_stuff, img_metas): - """Preprocess the ground truth for a image. - - Args: - gt_labels (Tensor): Ground truth labels of each bbox, - with shape (num_gts, ). - gt_masks (BitmapMasks): Ground truth masks of each instances - of a image, shape (num_gts, h, w). - gt_semantic_seg (Tensor | None): Ground truth of semantic - segmentation with the shape (1, h, w). - [0, num_thing_class - 1] means things, - [num_thing_class, num_class-1] means stuff, - 255 means VOID. It's None when training instance segmentation. - img_metas (dict): List of image meta information. - - Returns: - tuple: a tuple containing the following targets. - - - labels (Tensor): Ground truth class indices for a - image, with shape (n, ), n is the sum of number - of stuff type and number of instance in a image. - - masks (Tensor): Ground truth mask for a image, with - shape (n, h, w). Contains stuff and things when training - panoptic segmentation, and things only when training - instance segmentation. - """ - num_classes = num_things + num_stuff - - things_masks = gt_masks.pad(img_metas['pad_shape'][:2], pad_val=0)\ - .to_tensor(dtype=torch.bool, device=gt_labels.device) - - if gt_semantic_seg is None: - masks = things_masks.long() - return gt_labels, masks - - things_labels = gt_labels - gt_semantic_seg = gt_semantic_seg.squeeze(0) - - semantic_labels = torch.unique( - gt_semantic_seg, - sorted=False, - return_inverse=False, - return_counts=False) - stuff_masks_list = [] - stuff_labels_list = [] - for label in semantic_labels: - if label < num_things or label >= num_classes: - continue - stuff_mask = gt_semantic_seg == label - stuff_masks_list.append(stuff_mask) - stuff_labels_list.append(label) - - if len(stuff_masks_list) > 0: - stuff_masks = torch.stack(stuff_masks_list, dim=0) - stuff_labels = torch.stack(stuff_labels_list, dim=0) - labels = torch.cat([things_labels, stuff_labels], dim=0) - masks = torch.cat([things_masks, stuff_masks], dim=0) - else: - labels = things_labels - masks = things_masks - - masks = masks.long() - return labels, masks diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/point_sample.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/point_sample.py deleted file mode 100644 index c2c3cf91cc934987f57cf528d4a1763c0873e4b2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/point_sample.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.ops import point_sample - - -def get_uncertainty(mask_pred, labels): - """Estimate uncertainty based on pred logits. - - We estimate uncertainty as L1 distance between 0.0 and the logits - prediction in 'mask_pred' for the foreground class in `classes`. - - Args: - mask_pred (Tensor): mask predication logits, shape (num_rois, - num_classes, mask_height, mask_width). - - labels (list[Tensor]): Either predicted or ground truth label for - each predicted mask, of length num_rois. - - Returns: - scores (Tensor): Uncertainty scores with the most uncertain - locations having the highest uncertainty score, - shape (num_rois, 1, mask_height, mask_width) - """ - if mask_pred.shape[1] == 1: - gt_class_logits = mask_pred.clone() - else: - inds = torch.arange(mask_pred.shape[0], device=mask_pred.device) - gt_class_logits = mask_pred[inds, labels].unsqueeze(1) - return -torch.abs(gt_class_logits) - - -def get_uncertain_point_coords_with_randomness(mask_pred, labels, num_points, - oversample_ratio, - importance_sample_ratio): - """Get ``num_points`` most uncertain points with random points during - train. - - Sample points in [0, 1] x [0, 1] coordinate space based on their - uncertainty. The uncertainties are calculated for each point using - 'get_uncertainty()' function that takes point's logit prediction as - input. - - Args: - mask_pred (Tensor): A tensor of shape (num_rois, num_classes, - mask_height, mask_width) for class-specific or class-agnostic - prediction. - labels (list): The ground truth class for each instance. - num_points (int): The number of points to sample. - oversample_ratio (int): Oversampling parameter. - importance_sample_ratio (float): Ratio of points that are sampled - via importnace sampling. - - Returns: - point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) - that contains the coordinates sampled points. - """ - assert oversample_ratio >= 1 - assert 0 <= importance_sample_ratio <= 1 - batch_size = mask_pred.shape[0] - num_sampled = int(num_points * oversample_ratio) - point_coords = torch.rand( - batch_size, num_sampled, 2, device=mask_pred.device) - point_logits = point_sample(mask_pred, point_coords) - # It is crucial to calculate uncertainty based on the sampled - # prediction value for the points. Calculating uncertainties of the - # coarse predictions first and sampling them for points leads to - # incorrect results. To illustrate this: assume uncertainty func( - # logits)=-abs(logits), a sampled point between two coarse - # predictions with -1 and 1 logits has 0 logits, and therefore 0 - # uncertainty value. However, if we calculate uncertainties for the - # coarse predictions first, both will have -1 uncertainty, - # and sampled point will get -1 uncertainty. - point_uncertainties = get_uncertainty(point_logits, labels) - num_uncertain_points = int(importance_sample_ratio * num_points) - num_random_points = num_points - num_uncertain_points - idx = torch.topk( - point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] - shift = num_sampled * torch.arange( - batch_size, dtype=torch.long, device=mask_pred.device) - idx += shift[:, None] - point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( - batch_size, num_uncertain_points, 2) - if num_random_points > 0: - rand_roi_coords = torch.rand( - batch_size, num_random_points, 2, device=mask_pred.device) - point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) - return point_coords diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/positional_encoding.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/positional_encoding.py deleted file mode 100644 index dd29cd65606e9af1b91d422fb199d71532deeffe..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/positional_encoding.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING -from mmcv.runner import BaseModule - - -@POSITIONAL_ENCODING.register_module() -class SinePositionalEncoding(BaseModule): - """Position encoding with sine and cosine functions. - - See `End-to-End Object Detection with Transformers - `_ for details. - - Args: - num_feats (int): The feature dimension for each position - along x-axis or y-axis. Note the final returned dimension - for each position is 2 times of this value. - temperature (int, optional): The temperature used for scaling - the position embedding. Defaults to 10000. - normalize (bool, optional): Whether to normalize the position - embedding. Defaults to False. - scale (float, optional): A scale factor that scales the position - embedding. The scale will be used only when `normalize` is True. - Defaults to 2*pi. - eps (float, optional): A value added to the denominator for - numerical stability. Defaults to 1e-6. - offset (float): offset add to embed when do the normalization. - Defaults to 0. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - num_feats, - temperature=10000, - normalize=False, - scale=2 * math.pi, - eps=1e-6, - offset=0., - init_cfg=None): - super(SinePositionalEncoding, self).__init__(init_cfg) - if normalize: - assert isinstance(scale, (float, int)), 'when normalize is set,' \ - 'scale should be provided and in float or int type, ' \ - f'found {type(scale)}' - self.num_feats = num_feats - self.temperature = temperature - self.normalize = normalize - self.scale = scale - self.eps = eps - self.offset = offset - - def forward(self, mask): - """Forward function for `SinePositionalEncoding`. - - Args: - mask (Tensor): ByteTensor mask. Non-zero values representing - ignored positions, while zero values means valid positions - for this image. Shape [bs, h, w]. - - Returns: - pos (Tensor): Returned position embedding with shape - [bs, num_feats*2, h, w]. - """ - # For convenience of exporting to ONNX, it's required to convert - # `masks` from bool to int. - mask = mask.to(torch.int) - not_mask = 1 - mask # logical_not - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - y_embed = (y_embed + self.offset) / \ - (y_embed[:, -1:, :] + self.eps) * self.scale - x_embed = (x_embed + self.offset) / \ - (x_embed[:, :, -1:] + self.eps) * self.scale - dim_t = torch.arange( - self.num_feats, dtype=torch.float32, device=mask.device) - dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - # use `view` instead of `flatten` for dynamically exporting to ONNX - B, H, W = mask.size() - pos_x = torch.stack( - (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), - dim=4).view(B, H, W, -1) - pos_y = torch.stack( - (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), - dim=4).view(B, H, W, -1) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - def __repr__(self): - """str: a string that describes the module""" - repr_str = self.__class__.__name__ - repr_str += f'(num_feats={self.num_feats}, ' - repr_str += f'temperature={self.temperature}, ' - repr_str += f'normalize={self.normalize}, ' - repr_str += f'scale={self.scale}, ' - repr_str += f'eps={self.eps})' - return repr_str - - -@POSITIONAL_ENCODING.register_module() -class LearnedPositionalEncoding(BaseModule): - """Position embedding with learnable embedding weights. - - Args: - num_feats (int): The feature dimension for each position - along x-axis or y-axis. The final returned dimension for - each position is 2 times of this value. - row_num_embed (int, optional): The dictionary size of row embeddings. - Default 50. - col_num_embed (int, optional): The dictionary size of col embeddings. - Default 50. - init_cfg (dict or list[dict], optional): Initialization config dict. - """ - - def __init__(self, - num_feats, - row_num_embed=50, - col_num_embed=50, - init_cfg=dict(type='Uniform', layer='Embedding')): - super(LearnedPositionalEncoding, self).__init__(init_cfg) - self.row_embed = nn.Embedding(row_num_embed, num_feats) - self.col_embed = nn.Embedding(col_num_embed, num_feats) - self.num_feats = num_feats - self.row_num_embed = row_num_embed - self.col_num_embed = col_num_embed - - def forward(self, mask): - """Forward function for `LearnedPositionalEncoding`. - - Args: - mask (Tensor): ByteTensor mask. Non-zero values representing - ignored positions, while zero values means valid positions - for this image. Shape [bs, h, w]. - - Returns: - pos (Tensor): Returned position embedding with shape - [bs, num_feats*2, h, w]. - """ - h, w = mask.shape[-2:] - x = torch.arange(w, device=mask.device) - y = torch.arange(h, device=mask.device) - x_embed = self.col_embed(x) - y_embed = self.row_embed(y) - pos = torch.cat( - (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( - 1, w, 1)), - dim=-1).permute(2, 0, - 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) - return pos - - def __repr__(self): - """str: a string that describes the module""" - repr_str = self.__class__.__name__ - repr_str += f'(num_feats={self.num_feats}, ' - repr_str += f'row_num_embed={self.row_num_embed}, ' - repr_str += f'col_num_embed={self.col_num_embed})' - return repr_str diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/res_layer.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/res_layer.py deleted file mode 100644 index 5c3e89fb035d197cb82173e90659dac89ff07fab..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/res_layer.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.cnn import build_conv_layer, build_norm_layer -from mmcv.runner import BaseModule, Sequential -from torch import nn as nn - - -class ResLayer(Sequential): - """ResLayer to build ResNet style backbone. - - Args: - block (nn.Module): block used to build ResLayer. - inplanes (int): inplanes of block. - planes (int): planes of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - downsample_first (bool): Downsample at the first block or last block. - False for Hourglass, True for ResNet. Default: True - """ - - def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - downsample_first=True, - **kwargs): - self.block = block - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = [] - conv_stride = stride - if avg_down: - conv_stride = 1 - downsample.append( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False)) - downsample.extend([ - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=conv_stride, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1] - ]) - downsample = nn.Sequential(*downsample) - - layers = [] - if downsample_first: - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - inplanes = planes * block.expansion - for _ in range(1, num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - - else: # downsample_first=False is for HourglassModule - for _ in range(num_blocks - 1): - layers.append( - block( - inplanes=inplanes, - planes=inplanes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - **kwargs)) - super(ResLayer, self).__init__(*layers) - - -class SimplifiedBasicBlock(BaseModule): - """Simplified version of original basic residual block. This is used in - `SCNet `_. - - - Norm layer is now optional - - Last ReLU in forward function is removed - """ - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None, - init_fg=None): - super(SimplifiedBasicBlock, self).__init__(init_fg) - assert dcn is None, 'Not implemented yet.' - assert plugins is None, 'Not implemented yet.' - assert not with_cp, 'Not implemented yet.' - self.with_norm = norm_cfg is not None - with_bias = True if norm_cfg is None else False - self.conv1 = build_conv_layer( - conv_cfg, - inplanes, - planes, - 3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=with_bias) - if self.with_norm: - self.norm1_name, norm1 = build_norm_layer( - norm_cfg, planes, postfix=1) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - conv_cfg, planes, planes, 3, padding=1, bias=with_bias) - if self.with_norm: - self.norm2_name, norm2 = build_norm_layer( - norm_cfg, planes, postfix=2) - self.add_module(self.norm2_name, norm2) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.with_cp = with_cp - - @property - def norm1(self): - """nn.Module: normalization layer after the first convolution layer""" - return getattr(self, self.norm1_name) if self.with_norm else None - - @property - def norm2(self): - """nn.Module: normalization layer after the second convolution layer""" - return getattr(self, self.norm2_name) if self.with_norm else None - - def forward(self, x): - """Forward function.""" - - identity = x - - out = self.conv1(x) - if self.with_norm: - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - if self.with_norm: - out = self.norm2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/se_layer.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/se_layer.py deleted file mode 100644 index a2492103b1559df3b6d3a06811ba829621ad0cae..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/se_layer.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmcv.runner import BaseModule - - -class SELayer(BaseModule): - """Squeeze-and-Excitation Module. - - Args: - channels (int): The input (and output) channels of the SE layer. - ratio (int): Squeeze ratio in SELayer, the intermediate channel will be - ``int(channels/ratio)``. Default: 16. - conv_cfg (None or dict): Config dict for convolution layer. - Default: None, which means using conv2d. - act_cfg (dict or Sequence[dict]): Config dict for activation layer. - If act_cfg is a dict, two activation layers will be configurated - by this dict. If act_cfg is a sequence of dicts, the first - activation layer will be configurated by the first dict and the - second activation layer will be configurated by the second dict. - Default: (dict(type='ReLU'), dict(type='Sigmoid')) - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - channels, - ratio=16, - conv_cfg=None, - act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), - init_cfg=None): - super(SELayer, self).__init__(init_cfg) - if isinstance(act_cfg, dict): - act_cfg = (act_cfg, act_cfg) - assert len(act_cfg) == 2 - assert mmcv.is_tuple_of(act_cfg, dict) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.conv1 = ConvModule( - in_channels=channels, - out_channels=int(channels / ratio), - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[0]) - self.conv2 = ConvModule( - in_channels=int(channels / ratio), - out_channels=channels, - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[1]) - - def forward(self, x): - out = self.global_avgpool(x) - out = self.conv1(out) - out = self.conv2(out) - return x * out - - -class DyReLU(BaseModule): - """Dynamic ReLU (DyReLU) module. - - See `Dynamic ReLU `_ for details. - Current implementation is specialized for task-aware attention in DyHead. - HSigmoid arguments in default act_cfg follow DyHead official code. - https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py - - Args: - channels (int): The input (and output) channels of DyReLU module. - ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module, - the intermediate channel will be ``int(channels/ratio)``. - Default: 4. - conv_cfg (None or dict): Config dict for convolution layer. - Default: None, which means using conv2d. - act_cfg (dict or Sequence[dict]): Config dict for activation layer. - If act_cfg is a dict, two activation layers will be configurated - by this dict. If act_cfg is a sequence of dicts, the first - activation layer will be configurated by the first dict and the - second activation layer will be configurated by the second dict. - Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, - divisor=6.0)) - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - """ - - def __init__(self, - channels, - ratio=4, - conv_cfg=None, - act_cfg=(dict(type='ReLU'), - dict(type='HSigmoid', bias=3.0, divisor=6.0)), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - if isinstance(act_cfg, dict): - act_cfg = (act_cfg, act_cfg) - assert len(act_cfg) == 2 - assert mmcv.is_tuple_of(act_cfg, dict) - self.channels = channels - self.expansion = 4 # for a1, b1, a2, b2 - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.conv1 = ConvModule( - in_channels=channels, - out_channels=int(channels / ratio), - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[0]) - self.conv2 = ConvModule( - in_channels=int(channels / ratio), - out_channels=channels * self.expansion, - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[1]) - - def forward(self, x): - """Forward function.""" - coeffs = self.global_avgpool(x) - coeffs = self.conv1(coeffs) - coeffs = self.conv2(coeffs) - 0.5 # value range: [-0.5, 0.5] - a1, b1, a2, b2 = torch.split(coeffs, self.channels, dim=1) - a1 = a1 * 2.0 + 1.0 # [-1.0, 1.0] + 1.0 - a2 = a2 * 2.0 # [-1.0, 1.0] - out = torch.max(x * a1 + b1, x * a2 + b2) - return out diff --git a/cv/detection/co-detr/pytorch/mmdet/models/utils/transformer.py b/cv/detection/co-detr/pytorch/mmdet/models/utils/transformer.py deleted file mode 100644 index 16c955f937d1b1067b327e0159d66d47a4961979..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/models/utils/transformer.py +++ /dev/null @@ -1,1163 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -import warnings -from typing import Sequence - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import (build_activation_layer, build_conv_layer, - build_norm_layer, xavier_init) -from mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER, - TRANSFORMER_LAYER_SEQUENCE) -from mmcv.cnn.bricks.transformer import (BaseTransformerLayer, - TransformerLayerSequence, - build_transformer_layer_sequence) -from mmcv.runner.base_module import BaseModule -from mmcv.utils import to_2tuple -from torch.nn.init import normal_ - -from mmdet.models.utils.builder import TRANSFORMER - -try: - from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention - -except ImportError: - warnings.warn( - '`MultiScaleDeformableAttention` in MMCV has been moved to ' - '`mmcv.ops.multi_scale_deform_attn`, please update your MMCV') - from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention - -import fairscale -from fairscale.nn.checkpoint import checkpoint_wrapper - -def nlc_to_nchw(x, hw_shape): - """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. - - Args: - x (Tensor): The input tensor of shape [N, L, C] before conversion. - hw_shape (Sequence[int]): The height and width of output feature map. - - Returns: - Tensor: The output tensor of shape [N, C, H, W] after conversion. - """ - H, W = hw_shape - assert len(x.shape) == 3 - B, L, C = x.shape - assert L == H * W, 'The seq_len does not match H, W' - return x.transpose(1, 2).reshape(B, C, H, W).contiguous() - - -def nchw_to_nlc(x): - """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. - - Args: - x (Tensor): The input tensor of shape [N, C, H, W] before conversion. - - Returns: - Tensor: The output tensor of shape [N, L, C] after conversion. - """ - assert len(x.shape) == 4 - return x.flatten(2).transpose(1, 2).contiguous() - - -class AdaptivePadding(nn.Module): - """Applies padding to input (if needed) so that input can get fully covered - by filter you specified. It support two modes "same" and "corner". The - "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around - input. The "corner" mode would pad zero to bottom right. - - Args: - kernel_size (int | tuple): Size of the kernel: - stride (int | tuple): Stride of the filter. Default: 1: - dilation (int | tuple): Spacing between kernel elements. - Default: 1 - padding (str): Support "same" and "corner", "corner" mode - would pad zero to bottom right, and "same" mode would - pad zero around input. Default: "corner". - Example: - >>> kernel_size = 16 - >>> stride = 16 - >>> dilation = 1 - >>> input = torch.rand(1, 1, 15, 17) - >>> adap_pad = AdaptivePadding( - >>> kernel_size=kernel_size, - >>> stride=stride, - >>> dilation=dilation, - >>> padding="corner") - >>> out = adap_pad(input) - >>> assert (out.shape[2], out.shape[3]) == (16, 32) - >>> input = torch.rand(1, 1, 16, 17) - >>> out = adap_pad(input) - >>> assert (out.shape[2], out.shape[3]) == (16, 32) - """ - - def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): - - super(AdaptivePadding, self).__init__() - - assert padding in ('same', 'corner') - - kernel_size = to_2tuple(kernel_size) - stride = to_2tuple(stride) - padding = to_2tuple(padding) - dilation = to_2tuple(dilation) - - self.padding = padding - self.kernel_size = kernel_size - self.stride = stride - self.dilation = dilation - - def get_pad_shape(self, input_shape): - input_h, input_w = input_shape - kernel_h, kernel_w = self.kernel_size - stride_h, stride_w = self.stride - output_h = math.ceil(input_h / stride_h) - output_w = math.ceil(input_w / stride_w) - pad_h = max((output_h - 1) * stride_h + - (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) - pad_w = max((output_w - 1) * stride_w + - (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) - return pad_h, pad_w - - def forward(self, x): - pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) - if pad_h > 0 or pad_w > 0: - if self.padding == 'corner': - x = F.pad(x, [0, pad_w, 0, pad_h]) - elif self.padding == 'same': - x = F.pad(x, [ - pad_w // 2, pad_w - pad_w // 2, pad_h // 2, - pad_h - pad_h // 2 - ]) - return x - - -class PatchEmbed(BaseModule): - """Image to Patch Embedding. - - We use a conv layer to implement PatchEmbed. - - Args: - in_channels (int): The num of input channels. Default: 3 - embed_dims (int): The dimensions of embedding. Default: 768 - conv_type (str): The config dict for embedding - conv layer type selection. Default: "Conv2d. - kernel_size (int): The kernel_size of embedding conv. Default: 16. - stride (int): The slide stride of embedding conv. - Default: None (Would be set as `kernel_size`). - padding (int | tuple | string ): The padding length of - embedding conv. When it is a string, it means the mode - of adaptive padding, support "same" and "corner" now. - Default: "corner". - dilation (int): The dilation rate of embedding conv. Default: 1. - bias (bool): Bias of embed conv. Default: True. - norm_cfg (dict, optional): Config dict for normalization layer. - Default: None. - input_size (int | tuple | None): The size of input, which will be - used to calculate the out size. Only work when `dynamic_size` - is False. Default: None. - init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. - Default: None. - """ - - def __init__( - self, - in_channels=3, - embed_dims=768, - conv_type='Conv2d', - kernel_size=16, - stride=16, - padding='corner', - dilation=1, - bias=True, - norm_cfg=None, - input_size=None, - init_cfg=None, - ): - super(PatchEmbed, self).__init__(init_cfg=init_cfg) - - self.embed_dims = embed_dims - if stride is None: - stride = kernel_size - - kernel_size = to_2tuple(kernel_size) - stride = to_2tuple(stride) - dilation = to_2tuple(dilation) - - if isinstance(padding, str): - self.adap_padding = AdaptivePadding( - kernel_size=kernel_size, - stride=stride, - dilation=dilation, - padding=padding) - # disable the padding of conv - padding = 0 - else: - self.adap_padding = None - padding = to_2tuple(padding) - - self.projection = build_conv_layer( - dict(type=conv_type), - in_channels=in_channels, - out_channels=embed_dims, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - bias=bias) - - if norm_cfg is not None: - self.norm = build_norm_layer(norm_cfg, embed_dims)[1] - else: - self.norm = None - - if input_size: - input_size = to_2tuple(input_size) - # `init_out_size` would be used outside to - # calculate the num_patches - # when `use_abs_pos_embed` outside - self.init_input_size = input_size - if self.adap_padding: - pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) - input_h, input_w = input_size - input_h = input_h + pad_h - input_w = input_w + pad_w - input_size = (input_h, input_w) - - # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html - h_out = (input_size[0] + 2 * padding[0] - dilation[0] * - (kernel_size[0] - 1) - 1) // stride[0] + 1 - w_out = (input_size[1] + 2 * padding[1] - dilation[1] * - (kernel_size[1] - 1) - 1) // stride[1] + 1 - self.init_out_size = (h_out, w_out) - else: - self.init_input_size = None - self.init_out_size = None - - def forward(self, x): - """ - Args: - x (Tensor): Has shape (B, C, H, W). In most case, C is 3. - - Returns: - tuple: Contains merged results and its spatial shape. - - - x (Tensor): Has shape (B, out_h * out_w, embed_dims) - - out_size (tuple[int]): Spatial shape of x, arrange as - (out_h, out_w). - """ - - if self.adap_padding: - x = self.adap_padding(x) - - x = self.projection(x) - out_size = (x.shape[2], x.shape[3]) - x = x.flatten(2).transpose(1, 2) - if self.norm is not None: - x = self.norm(x) - return x, out_size - - -class PatchMerging(BaseModule): - """Merge patch feature map. - - This layer groups feature map by kernel_size, and applies norm and linear - layers to the grouped feature map. Our implementation uses `nn.Unfold` to - merge patch, which is about 25% faster than original implementation. - Instead, we need to modify pretrained models for compatibility. - - Args: - in_channels (int): The num of input channels. - to gets fully covered by filter and stride you specified.. - Default: True. - out_channels (int): The num of output channels. - kernel_size (int | tuple, optional): the kernel size in the unfold - layer. Defaults to 2. - stride (int | tuple, optional): the stride of the sliding blocks in the - unfold layer. Default: None. (Would be set as `kernel_size`) - padding (int | tuple | string ): The padding length of - embedding conv. When it is a string, it means the mode - of adaptive padding, support "same" and "corner" now. - Default: "corner". - dilation (int | tuple, optional): dilation parameter in the unfold - layer. Default: 1. - bias (bool, optional): Whether to add bias in linear layer or not. - Defaults: False. - norm_cfg (dict, optional): Config dict for normalization layer. - Default: dict(type='LN'). - init_cfg (dict, optional): The extra config for initialization. - Default: None. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=2, - stride=None, - padding='corner', - dilation=1, - bias=False, - norm_cfg=dict(type='LN'), - init_cfg=None): - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.out_channels = out_channels - if stride: - stride = stride - else: - stride = kernel_size - - kernel_size = to_2tuple(kernel_size) - stride = to_2tuple(stride) - dilation = to_2tuple(dilation) - - if isinstance(padding, str): - self.adap_padding = AdaptivePadding( - kernel_size=kernel_size, - stride=stride, - dilation=dilation, - padding=padding) - # disable the padding of unfold - padding = 0 - else: - self.adap_padding = None - - padding = to_2tuple(padding) - self.sampler = nn.Unfold( - kernel_size=kernel_size, - dilation=dilation, - padding=padding, - stride=stride) - - sample_dim = kernel_size[0] * kernel_size[1] * in_channels - - if norm_cfg is not None: - self.norm = build_norm_layer(norm_cfg, sample_dim)[1] - else: - self.norm = None - - self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) - - def forward(self, x, input_size): - """ - Args: - x (Tensor): Has shape (B, H*W, C_in). - input_size (tuple[int]): The spatial shape of x, arrange as (H, W). - Default: None. - - Returns: - tuple: Contains merged results and its spatial shape. - - - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) - - out_size (tuple[int]): Spatial shape of x, arrange as - (Merged_H, Merged_W). - """ - B, L, C = x.shape - assert isinstance(input_size, Sequence), f'Expect ' \ - f'input_size is ' \ - f'`Sequence` ' \ - f'but get {input_size}' - - H, W = input_size - assert L == H * W, 'input feature has wrong size' - - x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W - # Use nn.Unfold to merge patch. About 25% faster than original method, - # but need to modify pretrained model for compatibility - - if self.adap_padding: - x = self.adap_padding(x) - H, W = x.shape[-2:] - - x = self.sampler(x) - # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) - - out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * - (self.sampler.kernel_size[0] - 1) - - 1) // self.sampler.stride[0] + 1 - out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * - (self.sampler.kernel_size[1] - 1) - - 1) // self.sampler.stride[1] + 1 - - output_size = (out_h, out_w) - x = x.transpose(1, 2) # B, H/2*W/2, 4*C - x = self.norm(x) if self.norm else x - x = self.reduction(x) - return x, output_size - - -def inverse_sigmoid(x, eps=1e-5): - """Inverse function of sigmoid. - - Args: - x (Tensor): The tensor to do the - inverse. - eps (float): EPS avoid numerical - overflow. Defaults 1e-5. - Returns: - Tensor: The x has passed the inverse - function of sigmoid, has same - shape with input. - """ - x = x.clamp(min=0, max=1) - x1 = x.clamp(min=eps) - x2 = (1 - x).clamp(min=eps) - return torch.log(x1 / x2) - - -@TRANSFORMER_LAYER.register_module() -class DetrTransformerDecoderLayer(BaseTransformerLayer): - """Implements decoder layer in DETR transformer. - - Args: - attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): - Configs for self_attention or cross_attention, the order - should be consistent with it in `operation_order`. If it is - a dict, it would be expand to the number of attention in - `operation_order`. - feedforward_channels (int): The hidden dimension for FFNs. - ffn_dropout (float): Probability of an element to be zeroed - in ffn. Default 0.0. - operation_order (tuple[str]): The execution order of operation - in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). - Default:None - act_cfg (dict): The activation config for FFNs. Default: `LN` - norm_cfg (dict): Config dict for normalization layer. - Default: `LN`. - ffn_num_fcs (int): The number of fully-connected layers in FFNs. - Default:2. - """ - - def __init__(self, - attn_cfgs, - feedforward_channels, - ffn_dropout=0.0, - operation_order=None, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN'), - ffn_num_fcs=2, - **kwargs): - super(DetrTransformerDecoderLayer, self).__init__( - attn_cfgs=attn_cfgs, - feedforward_channels=feedforward_channels, - ffn_dropout=ffn_dropout, - operation_order=operation_order, - act_cfg=act_cfg, - norm_cfg=norm_cfg, - ffn_num_fcs=ffn_num_fcs, - **kwargs) - assert len(operation_order) == 6 - assert set(operation_order) == set( - ['self_attn', 'norm', 'cross_attn', 'ffn']) - - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class DetrTransformerEncoder(TransformerLayerSequence): - """TransformerEncoder of DETR. - - Args: - post_norm_cfg (dict): Config of last normalization layer. Default: - `LN`. Only used when `self.pre_norm` is `True` - """ - - def __init__(self, *args, post_norm_cfg=dict(type='LN'), with_cp=-1, **kwargs): - super(DetrTransformerEncoder, self).__init__(*args, **kwargs) - if post_norm_cfg is not None: - self.post_norm = build_norm_layer( - post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None - else: - assert not self.pre_norm, f'Use prenorm in ' \ - f'{self.__class__.__name__},' \ - f'Please specify post_norm_cfg' - self.post_norm = None - self.with_cp = with_cp - if self.with_cp > 0: - for i in range(self.with_cp): - self.layers[i] = checkpoint_wrapper(self.layers[i]) - - - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class DetrTransformerDecoder(TransformerLayerSequence): - """Implements the decoder in DETR transformer. - - Args: - return_intermediate (bool): Whether to return intermediate outputs. - post_norm_cfg (dict): Config of last normalization layer. Default: - `LN`. - """ - - def __init__(self, - *args, - post_norm_cfg=dict(type='LN'), - return_intermediate=False, - **kwargs): - - super(DetrTransformerDecoder, self).__init__(*args, **kwargs) - self.return_intermediate = return_intermediate - if post_norm_cfg is not None: - self.post_norm = build_norm_layer(post_norm_cfg, - self.embed_dims)[1] - else: - self.post_norm = None - - def forward(self, query, *args, **kwargs): - """Forward function for `TransformerDecoder`. - - Args: - query (Tensor): Input query with shape - `(num_query, bs, embed_dims)`. - - Returns: - Tensor: Results with shape [1, num_query, bs, embed_dims] when - return_intermediate is `False`, otherwise it has shape - [num_layers, num_query, bs, embed_dims]. - """ - if not self.return_intermediate: - x = super().forward(query, *args, **kwargs) - if self.post_norm: - x = self.post_norm(x)[None] - return x - - intermediate = [] - for layer in self.layers: - query = layer(query, *args, **kwargs) - if self.return_intermediate: - if self.post_norm is not None: - intermediate.append(self.post_norm(query)) - else: - intermediate.append(query) - return torch.stack(intermediate) - - -@TRANSFORMER.register_module() -class Transformer(BaseModule): - """Implements the DETR transformer. - - Following the official DETR implementation, this module copy-paste - from torch.nn.Transformer with modifications: - - * positional encodings are passed in MultiheadAttention - * extra LN at the end of encoder is removed - * decoder returns a stack of activations from all decoding layers - - See `paper: End-to-End Object Detection with Transformers - `_ for details. - - Args: - encoder (`mmcv.ConfigDict` | Dict): Config of - TransformerEncoder. Defaults to None. - decoder ((`mmcv.ConfigDict` | Dict)): Config of - TransformerDecoder. Defaults to None - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Defaults to None. - """ - - def __init__(self, encoder=None, decoder=None, init_cfg=None): - super(Transformer, self).__init__(init_cfg=init_cfg) - self.encoder = build_transformer_layer_sequence(encoder) - self.decoder = build_transformer_layer_sequence(decoder) - self.embed_dims = self.encoder.embed_dims - - def init_weights(self): - # follow the official DETR to init parameters - for m in self.modules(): - if hasattr(m, 'weight') and m.weight.dim() > 1: - xavier_init(m, distribution='uniform') - self._is_init = True - - def forward(self, x, mask, query_embed, pos_embed): - """Forward function for `Transformer`. - - Args: - x (Tensor): Input query with shape [bs, c, h, w] where - c = embed_dims. - mask (Tensor): The key_padding_mask used for encoder and decoder, - with shape [bs, h, w]. - query_embed (Tensor): The query embedding for decoder, with shape - [num_query, c]. - pos_embed (Tensor): The positional encoding for encoder and - decoder, with the same shape as `x`. - - Returns: - tuple[Tensor]: results of decoder containing the following tensor. - - - out_dec: Output from decoder. If return_intermediate_dec \ - is True output has shape [num_dec_layers, bs, - num_query, embed_dims], else has shape [1, bs, \ - num_query, embed_dims]. - - memory: Output results from encoder, with shape \ - [bs, embed_dims, h, w]. - """ - bs, c, h, w = x.shape - # use `view` instead of `flatten` for dynamically exporting to ONNX - x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] - pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) - query_embed = query_embed.unsqueeze(1).repeat( - 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] - mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] - memory = self.encoder( - query=x, - key=None, - value=None, - query_pos=pos_embed, - query_key_padding_mask=mask) - target = torch.zeros_like(query_embed) - # out_dec: [num_layers, num_query, bs, dim] - out_dec = self.decoder( - query=target, - key=memory, - value=memory, - key_pos=pos_embed, - query_pos=query_embed, - key_padding_mask=mask) - out_dec = out_dec.transpose(1, 2) - memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) - return out_dec, memory - - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class DeformableDetrTransformerDecoder(TransformerLayerSequence): - """Implements the decoder in DETR transformer. - - Args: - return_intermediate (bool): Whether to return intermediate outputs. - coder_norm_cfg (dict): Config of last normalization layer. Default: - `LN`. - """ - - def __init__(self, *args, return_intermediate=False, **kwargs): - - super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) - self.return_intermediate = return_intermediate - - def forward(self, - query, - *args, - reference_points=None, - valid_ratios=None, - reg_branches=None, - **kwargs): - """Forward function for `TransformerDecoder`. - - Args: - query (Tensor): Input query with shape - `(num_query, bs, embed_dims)`. - reference_points (Tensor): The reference - points of offset. has shape - (bs, num_query, 4) when as_two_stage, - otherwise has shape ((bs, num_query, 2). - valid_ratios (Tensor): The radios of valid - points on the feature map, has shape - (bs, num_levels, 2) - reg_branch: (obj:`nn.ModuleList`): Used for - refining the regression results. Only would - be passed when with_box_refine is True, - otherwise would be passed a `None`. - - Returns: - Tensor: Results with shape [1, num_query, bs, embed_dims] when - return_intermediate is `False`, otherwise it has shape - [num_layers, num_query, bs, embed_dims]. - """ - output = query - intermediate = [] - intermediate_reference_points = [] - for lid, layer in enumerate(self.layers): - if reference_points.shape[-1] == 4: - reference_points_input = reference_points[:, :, None] * \ - torch.cat([valid_ratios, valid_ratios], -1)[:, None] - else: - assert reference_points.shape[-1] == 2 - reference_points_input = reference_points[:, :, None] * \ - valid_ratios[:, None] - output = layer( - output, - *args, - reference_points=reference_points_input, - **kwargs) - output = output.permute(1, 0, 2) - - if reg_branches is not None: - tmp = reg_branches[lid](output) - if reference_points.shape[-1] == 4: - new_reference_points = tmp + inverse_sigmoid( - reference_points) - new_reference_points = new_reference_points.sigmoid() - else: - assert reference_points.shape[-1] == 2 - new_reference_points = tmp - new_reference_points[..., :2] = tmp[ - ..., :2] + inverse_sigmoid(reference_points) - new_reference_points = new_reference_points.sigmoid() - reference_points = new_reference_points.detach() - - output = output.permute(1, 0, 2) - if self.return_intermediate: - intermediate.append(output) - intermediate_reference_points.append(reference_points) - - if self.return_intermediate: - return torch.stack(intermediate), torch.stack( - intermediate_reference_points) - - return output, reference_points - - -@TRANSFORMER.register_module() -class DeformableDetrTransformer(Transformer): - """Implements the DeformableDETR transformer. - - Args: - as_two_stage (bool): Generate query from encoder features. - Default: False. - num_feature_levels (int): Number of feature maps from FPN: - Default: 4. - two_stage_num_proposals (int): Number of proposals when set - `as_two_stage` as True. Default: 300. - """ - - def __init__(self, - as_two_stage=False, - num_feature_levels=4, - two_stage_num_proposals=300, - **kwargs): - super(DeformableDetrTransformer, self).__init__(**kwargs) - self.as_two_stage = as_two_stage - self.num_feature_levels = num_feature_levels - self.two_stage_num_proposals = two_stage_num_proposals - self.embed_dims = self.encoder.embed_dims - self.init_layers() - - def init_layers(self): - """Initialize layers of the DeformableDetrTransformer.""" - self.level_embeds = nn.Parameter( - torch.Tensor(self.num_feature_levels, self.embed_dims)) - - if self.as_two_stage: - self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) - self.enc_output_norm = nn.LayerNorm(self.embed_dims) - self.pos_trans = nn.Linear(self.embed_dims * 2, - self.embed_dims * 2) - self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) - else: - self.reference_points = nn.Linear(self.embed_dims, 2) - - def init_weights(self): - """Initialize the transformer weights.""" - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - for m in self.modules(): - if isinstance(m, MultiScaleDeformableAttention): - m.init_weights() - if not self.as_two_stage: - xavier_init(self.reference_points, distribution='uniform', bias=0.) - normal_(self.level_embeds) - - def gen_encoder_output_proposals(self, memory, memory_padding_mask, - spatial_shapes): - """Generate proposals from encoded memory. - - Args: - memory (Tensor) : The output of encoder, - has shape (bs, num_key, embed_dim). num_key is - equal the number of points on feature map from - all level. - memory_padding_mask (Tensor): Padding mask for memory. - has shape (bs, num_key). - spatial_shapes (Tensor): The shape of all feature maps. - has shape (num_level, 2). - - Returns: - tuple: A tuple of feature map and bbox prediction. - - - output_memory (Tensor): The input of decoder, \ - has shape (bs, num_key, embed_dim). num_key is \ - equal the number of points on feature map from \ - all levels. - - output_proposals (Tensor): The normalized proposal \ - after a inverse sigmoid, has shape \ - (bs, num_keys, 4). - """ - - N, S, C = memory.shape - proposals = [] - _cur = 0 - for lvl, (H, W) in enumerate(spatial_shapes): - mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( - N, H, W, 1) - valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) - valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) - - grid_y, grid_x = torch.meshgrid( - torch.linspace( - 0, H - 1, H, dtype=torch.float32, device=memory.device), - torch.linspace( - 0, W - 1, W, dtype=torch.float32, device=memory.device)) - grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) - - scale = torch.cat([valid_W.unsqueeze(-1), - valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) - grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale - wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) - proposal = torch.cat((grid, wh), -1).view(N, -1, 4) - proposals.append(proposal) - _cur += (H * W) - output_proposals = torch.cat(proposals, 1) - output_proposals_valid = ((output_proposals > 0.01) & - (output_proposals < 0.99)).all( - -1, keepdim=True) - output_proposals = torch.log(output_proposals / (1 - output_proposals)) - output_proposals = output_proposals.masked_fill( - memory_padding_mask.unsqueeze(-1), float('inf')) - output_proposals = output_proposals.masked_fill( - ~output_proposals_valid, float('inf')) - - output_memory = memory - output_memory = output_memory.masked_fill( - memory_padding_mask.unsqueeze(-1), float(0)) - output_memory = output_memory.masked_fill(~output_proposals_valid, - float(0)) - output_memory = self.enc_output_norm(self.enc_output(output_memory)) - return output_memory, output_proposals - - @staticmethod - def get_reference_points(spatial_shapes, valid_ratios, device): - """Get the reference points used in decoder. - - Args: - spatial_shapes (Tensor): The shape of all - feature maps, has shape (num_level, 2). - valid_ratios (Tensor): The radios of valid - points on the feature map, has shape - (bs, num_levels, 2) - device (obj:`device`): The device where - reference_points should be. - - Returns: - Tensor: reference points used in decoder, has \ - shape (bs, num_keys, num_levels, 2). - """ - reference_points_list = [] - for lvl, (H, W) in enumerate(spatial_shapes): - # TODO check this 0.5 - ref_y, ref_x = torch.meshgrid( - torch.linspace( - 0.5, H - 0.5, H, dtype=torch.float32, device=device), - torch.linspace( - 0.5, W - 0.5, W, dtype=torch.float32, device=device)) - ref_y = ref_y.reshape(-1)[None] / ( - valid_ratios[:, None, lvl, 1] * H) - ref_x = ref_x.reshape(-1)[None] / ( - valid_ratios[:, None, lvl, 0] * W) - ref = torch.stack((ref_x, ref_y), -1) - reference_points_list.append(ref) - reference_points = torch.cat(reference_points_list, 1) - reference_points = reference_points[:, :, None] * valid_ratios[:, None] - return reference_points - - def get_valid_ratio(self, mask): - """Get the valid radios of feature maps of all level.""" - _, H, W = mask.shape - valid_H = torch.sum(~mask[:, :, 0], 1) - valid_W = torch.sum(~mask[:, 0, :], 1) - valid_ratio_h = valid_H.float() / H - valid_ratio_w = valid_W.float() / W - valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) - return valid_ratio - - def get_proposal_pos_embed(self, - proposals, - num_pos_feats=128, - temperature=10000): - """Get the position embedding of proposal.""" - scale = 2 * math.pi - dim_t = torch.arange( - num_pos_feats, dtype=torch.float32, device=proposals.device) - dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) - # N, L, 4 - proposals = proposals.sigmoid() * scale - # N, L, 4, 128 - pos = proposals[:, :, :, None] / dim_t - # N, L, 4, 64, 2 - pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), - dim=4).flatten(2) - return pos - - def forward(self, - mlvl_feats, - mlvl_masks, - query_embed, - mlvl_pos_embeds, - reg_branches=None, - cls_branches=None, - **kwargs): - """Forward function for `Transformer`. - - Args: - mlvl_feats (list(Tensor)): Input queries from - different level. Each element has shape - [bs, embed_dims, h, w]. - mlvl_masks (list(Tensor)): The key_padding_mask from - different level used for encoder and decoder, - each element has shape [bs, h, w]. - query_embed (Tensor): The query embedding for decoder, - with shape [num_query, c]. - mlvl_pos_embeds (list(Tensor)): The positional encoding - of feats from different level, has the shape - [bs, embed_dims, h, w]. - reg_branches (obj:`nn.ModuleList`): Regression heads for - feature maps from each decoder layer. Only would - be passed when - `with_box_refine` is True. Default to None. - cls_branches (obj:`nn.ModuleList`): Classification heads - for feature maps from each decoder layer. Only would - be passed when `as_two_stage` - is True. Default to None. - - - Returns: - tuple[Tensor]: results of decoder containing the following tensor. - - - inter_states: Outputs from decoder. If - return_intermediate_dec is True output has shape \ - (num_dec_layers, bs, num_query, embed_dims), else has \ - shape (1, bs, num_query, embed_dims). - - init_reference_out: The initial value of reference \ - points, has shape (bs, num_queries, 4). - - inter_references_out: The internal value of reference \ - points in decoder, has shape \ - (num_dec_layers, bs,num_query, embed_dims) - - enc_outputs_class: The classification score of \ - proposals generated from \ - encoder's feature maps, has shape \ - (batch, h*w, num_classes). \ - Only would be returned when `as_two_stage` is True, \ - otherwise None. - - enc_outputs_coord_unact: The regression results \ - generated from encoder's feature maps., has shape \ - (batch, h*w, 4). Only would \ - be returned when `as_two_stage` is True, \ - otherwise None. - """ - assert self.as_two_stage or query_embed is not None - - feat_flatten = [] - mask_flatten = [] - lvl_pos_embed_flatten = [] - spatial_shapes = [] - for lvl, (feat, mask, pos_embed) in enumerate( - zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): - bs, c, h, w = feat.shape - spatial_shape = (h, w) - spatial_shapes.append(spatial_shape) - feat = feat.flatten(2).transpose(1, 2) - mask = mask.flatten(1) - pos_embed = pos_embed.flatten(2).transpose(1, 2) - lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) - lvl_pos_embed_flatten.append(lvl_pos_embed) - feat_flatten.append(feat) - mask_flatten.append(mask) - feat_flatten = torch.cat(feat_flatten, 1) - mask_flatten = torch.cat(mask_flatten, 1) - lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=feat_flatten.device) - level_start_index = torch.cat((spatial_shapes.new_zeros( - (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) - valid_ratios = torch.stack( - [self.get_valid_ratio(m) for m in mlvl_masks], 1) - - reference_points = \ - self.get_reference_points(spatial_shapes, - valid_ratios, - device=feat.device) - - feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) - lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( - 1, 0, 2) # (H*W, bs, embed_dims) - memory = self.encoder( - query=feat_flatten, - key=None, - value=None, - query_pos=lvl_pos_embed_flatten, - query_key_padding_mask=mask_flatten, - spatial_shapes=spatial_shapes, - reference_points=reference_points, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - **kwargs) - - memory = memory.permute(1, 0, 2) - bs, _, c = memory.shape - if self.as_two_stage: - output_memory, output_proposals = \ - self.gen_encoder_output_proposals( - memory, mask_flatten, spatial_shapes) - enc_outputs_class = cls_branches[self.decoder.num_layers]( - output_memory) - enc_outputs_coord_unact = \ - reg_branches[ - self.decoder.num_layers](output_memory) + output_proposals - - topk = self.two_stage_num_proposals - # We only use the first channel in enc_outputs_class as foreground, - # the other (num_classes - 1) channels are actually not used. - # Its targets are set to be 0s, which indicates the first - # class (foreground) because we use [0, num_classes - 1] to - # indicate class labels, background class is indicated by - # num_classes (similar convention in RPN). - # See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa - # This follows the official implementation of Deformable DETR. - topk_proposals = torch.topk( - enc_outputs_class[..., 0], topk, dim=1)[1] - topk_coords_unact = torch.gather( - enc_outputs_coord_unact, 1, - topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) - topk_coords_unact = topk_coords_unact.detach() - reference_points = topk_coords_unact.sigmoid() - init_reference_out = reference_points - pos_trans_out = self.pos_trans_norm( - self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) - query_pos, query = torch.split(pos_trans_out, c, dim=2) - else: - query_pos, query = torch.split(query_embed, c, dim=1) - query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) - query = query.unsqueeze(0).expand(bs, -1, -1) - reference_points = self.reference_points(query_pos).sigmoid() - init_reference_out = reference_points - - # decoder - query = query.permute(1, 0, 2) - memory = memory.permute(1, 0, 2) - query_pos = query_pos.permute(1, 0, 2) - inter_states, inter_references = self.decoder( - query=query, - key=None, - value=memory, - query_pos=query_pos, - key_padding_mask=mask_flatten, - reference_points=reference_points, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - reg_branches=reg_branches, - **kwargs) - - inter_references_out = inter_references - if self.as_two_stage: - return inter_states, init_reference_out,\ - inter_references_out, enc_outputs_class,\ - enc_outputs_coord_unact - return inter_states, init_reference_out, \ - inter_references_out, None, None - - -@TRANSFORMER.register_module() -class DynamicConv(BaseModule): - """Implements Dynamic Convolution. - - This module generate parameters for each sample and - use bmm to implement 1*1 convolution. Code is modified - from the `official github repo `_ . - - Args: - in_channels (int): The input feature channel. - Defaults to 256. - feat_channels (int): The inner feature channel. - Defaults to 64. - out_channels (int, optional): The output feature channel. - When not specified, it will be set to `in_channels` - by default - input_feat_shape (int): The shape of input feature. - Defaults to 7. - with_proj (bool): Project two-dimentional feature to - one-dimentional feature. Default to True. - act_cfg (dict): The activation config for DynamicConv. - norm_cfg (dict): Config dict for normalization layer. Default - layer normalization. - init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. - Default: None. - """ - - def __init__(self, - in_channels=256, - feat_channels=64, - out_channels=None, - input_feat_shape=7, - with_proj=True, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN'), - init_cfg=None): - super(DynamicConv, self).__init__(init_cfg) - self.in_channels = in_channels - self.feat_channels = feat_channels - self.out_channels_raw = out_channels - self.input_feat_shape = input_feat_shape - self.with_proj = with_proj - self.act_cfg = act_cfg - self.norm_cfg = norm_cfg - self.out_channels = out_channels if out_channels else in_channels - - self.num_params_in = self.in_channels * self.feat_channels - self.num_params_out = self.out_channels * self.feat_channels - self.dynamic_layer = nn.Linear( - self.in_channels, self.num_params_in + self.num_params_out) - - self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] - self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1] - - self.activation = build_activation_layer(act_cfg) - - num_output = self.out_channels * input_feat_shape**2 - if self.with_proj: - self.fc_layer = nn.Linear(num_output, self.out_channels) - self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] - - def forward(self, param_feature, input_feature): - """Forward function for `DynamicConv`. - - Args: - param_feature (Tensor): The feature can be used - to generate the parameter, has shape - (num_all_proposals, in_channels). - input_feature (Tensor): Feature that - interact with parameters, has shape - (num_all_proposals, in_channels, H, W). - - Returns: - Tensor: The output feature has shape - (num_all_proposals, out_channels). - """ - input_feature = input_feature.flatten(2).permute(2, 0, 1) - - input_feature = input_feature.permute(1, 0, 2) - parameters = self.dynamic_layer(param_feature) - - param_in = parameters[:, :self.num_params_in].view( - -1, self.in_channels, self.feat_channels) - param_out = parameters[:, -self.num_params_out:].view( - -1, self.feat_channels, self.out_channels) - - # input_feature has shape (num_all_proposals, H*W, in_channels) - # param_in has shape (num_all_proposals, in_channels, feat_channels) - # feature has shape (num_all_proposals, H*W, feat_channels) - features = torch.bmm(input_feature, param_in) - features = self.norm_in(features) - features = self.activation(features) - - # param_out has shape (batch_size, feat_channels, out_channels) - features = torch.bmm(features, param_out) - features = self.norm_out(features) - features = self.activation(features) - - if self.with_proj: - features = features.flatten(1) - features = self.fc_layer(features) - features = self.fc_norm(features) - features = self.activation(features) - - return features diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/__init__.py b/cv/detection/co-detr/pytorch/mmdet/utils/__init__.py deleted file mode 100644 index f57acb5f030dc6a984204ff57a7375ceb28e42f2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .collect_env import collect_env -from .compat_config import compat_cfg -from .logger import get_caller_name, get_root_logger, log_img_scale -from .memory import AvoidCUDAOOM, AvoidOOM -from .misc import find_latest_checkpoint, update_data_root -from .replace_cfg_vals import replace_cfg_vals -from .setup_env import setup_multi_processes -from .split_batch import split_batch -from .util_distribution import build_ddp, build_dp, get_device - -__all__ = [ - 'get_root_logger', 'collect_env', 'find_latest_checkpoint', - 'update_data_root', 'setup_multi_processes', 'get_caller_name', - 'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp', - 'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM' -] diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/collect_env.py b/cv/detection/co-detr/pytorch/mmdet/utils/collect_env.py deleted file mode 100644 index 97e25c0e95394dcced4b9ddd25df7a16758886d5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/collect_env.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmcv.utils import collect_env as collect_base_env -from mmcv.utils import get_git_hash - -import mmdet - - -def collect_env(): - """Collect the information of the running environments.""" - env_info = collect_base_env() - env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7] - return env_info - - -if __name__ == '__main__': - for name, val in collect_env().items(): - print(f'{name}: {val}') diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/compat_config.py b/cv/detection/co-detr/pytorch/mmdet/utils/compat_config.py deleted file mode 100644 index 05aa37dcd6f74dd1884069e90edf39684c897798..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/compat_config.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import warnings - -from mmcv import ConfigDict - - -def compat_cfg(cfg): - """This function would modify some filed to keep the compatibility of - config. - - For example, it will move some args which will be deprecated to the correct - fields. - """ - cfg = copy.deepcopy(cfg) - cfg = compat_imgs_per_gpu(cfg) - cfg = compat_loader_args(cfg) - cfg = compat_runner_args(cfg) - return cfg - - -def compat_runner_args(cfg): - if 'runner' not in cfg: - cfg.runner = ConfigDict({ - 'type': 'EpochBasedRunner', - 'max_epochs': cfg.total_epochs - }) - warnings.warn( - 'config is now expected to have a `runner` section, ' - 'please set `runner` in your config.', UserWarning) - else: - if 'total_epochs' in cfg: - assert cfg.total_epochs == cfg.runner.max_epochs - return cfg - - -def compat_imgs_per_gpu(cfg): - cfg = copy.deepcopy(cfg) - if 'imgs_per_gpu' in cfg.data: - warnings.warn('"imgs_per_gpu" is deprecated in MMDet V2.0. ' - 'Please use "samples_per_gpu" instead') - if 'samples_per_gpu' in cfg.data: - warnings.warn( - f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' - f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' - f'={cfg.data.imgs_per_gpu} is used in this experiments') - else: - warnings.warn('Automatically set "samples_per_gpu"="imgs_per_gpu"=' - f'{cfg.data.imgs_per_gpu} in this experiments') - cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu - return cfg - - -def compat_loader_args(cfg): - """Deprecated sample_per_gpu in cfg.data.""" - - cfg = copy.deepcopy(cfg) - if 'train_dataloader' not in cfg.data: - cfg.data['train_dataloader'] = ConfigDict() - if 'val_dataloader' not in cfg.data: - cfg.data['val_dataloader'] = ConfigDict() - if 'test_dataloader' not in cfg.data: - cfg.data['test_dataloader'] = ConfigDict() - - # special process for train_dataloader - if 'samples_per_gpu' in cfg.data: - - samples_per_gpu = cfg.data.pop('samples_per_gpu') - assert 'samples_per_gpu' not in \ - cfg.data.train_dataloader, ('`samples_per_gpu` are set ' - 'in `data` field and ` ' - 'data.train_dataloader` ' - 'at the same time. ' - 'Please only set it in ' - '`data.train_dataloader`. ') - cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu - - if 'persistent_workers' in cfg.data: - - persistent_workers = cfg.data.pop('persistent_workers') - assert 'persistent_workers' not in \ - cfg.data.train_dataloader, ('`persistent_workers` are set ' - 'in `data` field and ` ' - 'data.train_dataloader` ' - 'at the same time. ' - 'Please only set it in ' - '`data.train_dataloader`. ') - cfg.data.train_dataloader['persistent_workers'] = persistent_workers - - if 'workers_per_gpu' in cfg.data: - - workers_per_gpu = cfg.data.pop('workers_per_gpu') - cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu - cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu - cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu - - # special process for val_dataloader - if 'samples_per_gpu' in cfg.data.val: - # keep default value of `sample_per_gpu` is 1 - assert 'samples_per_gpu' not in \ - cfg.data.val_dataloader, ('`samples_per_gpu` are set ' - 'in `data.val` field and ` ' - 'data.val_dataloader` at ' - 'the same time. ' - 'Please only set it in ' - '`data.val_dataloader`. ') - cfg.data.val_dataloader['samples_per_gpu'] = \ - cfg.data.val.pop('samples_per_gpu') - # special process for val_dataloader - - # in case the test dataset is concatenated - if isinstance(cfg.data.test, dict): - if 'samples_per_gpu' in cfg.data.test: - assert 'samples_per_gpu' not in \ - cfg.data.test_dataloader, ('`samples_per_gpu` are set ' - 'in `data.test` field and ` ' - 'data.test_dataloader` ' - 'at the same time. ' - 'Please only set it in ' - '`data.test_dataloader`. ') - - cfg.data.test_dataloader['samples_per_gpu'] = \ - cfg.data.test.pop('samples_per_gpu') - - elif isinstance(cfg.data.test, list): - for ds_cfg in cfg.data.test: - if 'samples_per_gpu' in ds_cfg: - assert 'samples_per_gpu' not in \ - cfg.data.test_dataloader, ('`samples_per_gpu` are set ' - 'in `data.test` field and ` ' - 'data.test_dataloader` at' - ' the same time. ' - 'Please only set it in ' - '`data.test_dataloader`. ') - samples_per_gpu = max( - [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) - cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu - - return cfg diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/contextmanagers.py b/cv/detection/co-detr/pytorch/mmdet/utils/contextmanagers.py deleted file mode 100644 index fa12bfcaff1e781b0a8cc7d7c8b839c2f2955a05..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/contextmanagers.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import asyncio -import contextlib -import logging -import os -import time -from typing import List - -import torch - -logger = logging.getLogger(__name__) - -DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) - - -@contextlib.asynccontextmanager -async def completed(trace_name='', - name='', - sleep_interval=0.05, - streams: List[torch.cuda.Stream] = None): - """Async context manager that waits for work to complete on given CUDA - streams.""" - if not torch.cuda.is_available(): - yield - return - - stream_before_context_switch = torch.cuda.current_stream() - if not streams: - streams = [stream_before_context_switch] - else: - streams = [s if s else stream_before_context_switch for s in streams] - - end_events = [ - torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams - ] - - if DEBUG_COMPLETED_TIME: - start = torch.cuda.Event(enable_timing=True) - stream_before_context_switch.record_event(start) - - cpu_start = time.monotonic() - logger.debug('%s %s starting, streams: %s', trace_name, name, streams) - grad_enabled_before = torch.is_grad_enabled() - try: - yield - finally: - current_stream = torch.cuda.current_stream() - assert current_stream == stream_before_context_switch - - if DEBUG_COMPLETED_TIME: - cpu_end = time.monotonic() - for i, stream in enumerate(streams): - event = end_events[i] - stream.record_event(event) - - grad_enabled_after = torch.is_grad_enabled() - - # observed change of torch.is_grad_enabled() during concurrent run of - # async_test_bboxes code - assert (grad_enabled_before == grad_enabled_after - ), 'Unexpected is_grad_enabled() value change' - - are_done = [e.query() for e in end_events] - logger.debug('%s %s completed: %s streams: %s', trace_name, name, - are_done, streams) - with torch.cuda.stream(stream_before_context_switch): - while not all(are_done): - await asyncio.sleep(sleep_interval) - are_done = [e.query() for e in end_events] - logger.debug( - '%s %s completed: %s streams: %s', - trace_name, - name, - are_done, - streams, - ) - - current_stream = torch.cuda.current_stream() - assert current_stream == stream_before_context_switch - - if DEBUG_COMPLETED_TIME: - cpu_time = (cpu_end - cpu_start) * 1000 - stream_times_ms = '' - for i, stream in enumerate(streams): - elapsed_time = start.elapsed_time(end_events[i]) - stream_times_ms += f' {stream} {elapsed_time:.2f} ms' - logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, - stream_times_ms) - - -@contextlib.asynccontextmanager -async def concurrent(streamqueue: asyncio.Queue, - trace_name='concurrent', - name='stream'): - """Run code concurrently in different streams. - - :param streamqueue: asyncio.Queue instance. - - Queue tasks define the pool of streams used for concurrent execution. - """ - if not torch.cuda.is_available(): - yield - return - - initial_stream = torch.cuda.current_stream() - - with torch.cuda.stream(initial_stream): - stream = await streamqueue.get() - assert isinstance(stream, torch.cuda.Stream) - - try: - with torch.cuda.stream(stream): - logger.debug('%s %s is starting, stream: %s', trace_name, name, - stream) - yield - current = torch.cuda.current_stream() - assert current == stream - logger.debug('%s %s has finished, stream: %s', trace_name, - name, stream) - finally: - streamqueue.task_done() - streamqueue.put_nowait(stream) diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/logger.py b/cv/detection/co-detr/pytorch/mmdet/utils/logger.py deleted file mode 100644 index 485f641b709d88f21789c7c6048ff058bcb2bf29..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/logger.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import inspect -import logging - -from mmcv.utils import get_logger - - -def get_root_logger(log_file=None, log_level=logging.INFO): - """Get root logger. - - Args: - log_file (str, optional): File path of log. Defaults to None. - log_level (int, optional): The level of logger. - Defaults to logging.INFO. - - Returns: - :obj:`logging.Logger`: The obtained logger - """ - logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level) - - return logger - - -def get_caller_name(): - """Get name of caller method.""" - # this_func_frame = inspect.stack()[0][0] # i.e., get_caller_name - # callee_frame = inspect.stack()[1][0] # e.g., log_img_scale - caller_frame = inspect.stack()[2][0] # e.g., caller of log_img_scale - caller_method = caller_frame.f_code.co_name - try: - caller_class = caller_frame.f_locals['self'].__class__.__name__ - return f'{caller_class}.{caller_method}' - except KeyError: # caller is a function - return caller_method - - -def log_img_scale(img_scale, shape_order='hw', skip_square=False): - """Log image size. - - Args: - img_scale (tuple): Image size to be logged. - shape_order (str, optional): The order of image shape. - 'hw' for (height, width) and 'wh' for (width, height). - Defaults to 'hw'. - skip_square (bool, optional): Whether to skip logging for square - img_scale. Defaults to False. - - Returns: - bool: Whether to have done logging. - """ - if shape_order == 'hw': - height, width = img_scale - elif shape_order == 'wh': - width, height = img_scale - else: - raise ValueError(f'Invalid shape_order {shape_order}.') - - if skip_square and (height == width): - return False - - logger = get_root_logger() - caller = get_caller_name() - logger.info(f'image shape: height={height}, width={width} in {caller}') - - return True diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/memory.py b/cv/detection/co-detr/pytorch/mmdet/utils/memory.py deleted file mode 100644 index eb212bcaed139e5c9db595186ee8e16677921512..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/memory.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from collections import abc -from contextlib import contextmanager -from functools import wraps - -import torch - -from mmdet.utils import get_root_logger - - -def cast_tensor_type(inputs, src_type=None, dst_type=None): - """Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``. - - Args: - inputs: Inputs that to be casted. - src_type (torch.dtype | torch.device): Source type. - src_type (torch.dtype | torch.device): Destination type. - - Returns: - The same type with inputs, but all contained Tensors have been cast. - """ - assert dst_type is not None - if isinstance(inputs, torch.Tensor): - if isinstance(dst_type, torch.device): - # convert Tensor to dst_device - if hasattr(inputs, 'to') and \ - hasattr(inputs, 'device') and \ - (inputs.device == src_type or src_type is None): - return inputs.to(dst_type) - else: - return inputs - else: - # convert Tensor to dst_dtype - if hasattr(inputs, 'to') and \ - hasattr(inputs, 'dtype') and \ - (inputs.dtype == src_type or src_type is None): - return inputs.to(dst_type) - else: - return inputs - # we need to ensure that the type of inputs to be casted are the same - # as the argument `src_type`. - elif isinstance(inputs, abc.Mapping): - return type(inputs)({ - k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type) - for k, v in inputs.items() - }) - elif isinstance(inputs, abc.Iterable): - return type(inputs)( - cast_tensor_type(item, src_type=src_type, dst_type=dst_type) - for item in inputs) - # TODO: Currently not supported - # elif isinstance(inputs, InstanceData): - # for key, value in inputs.items(): - # inputs[key] = cast_tensor_type( - # value, src_type=src_type, dst_type=dst_type) - # return inputs - else: - return inputs - - -@contextmanager -def _ignore_torch_cuda_oom(): - """A context which ignores CUDA OOM exception from pytorch. - - Code is modified from - # noqa: E501 - """ - try: - yield - except RuntimeError as e: - # NOTE: the string may change? - if 'CUDA out of memory. ' in str(e): - pass - else: - raise - - -class AvoidOOM: - """Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of - Memory error. It will do the following steps: - - 1. First retry after calling `torch.cuda.empty_cache()`. - 2. If that still fails, it will then retry by converting inputs - to FP16. - 3. If that still fails trying to convert inputs to CPUs. - In this case, it expects the function to dispatch to - CPU implementation. - - Args: - to_cpu (bool): Whether to convert outputs to CPU if get an OOM - error. This will slow down the code significantly. - Defaults to True. - test (bool): Skip `_ignore_torch_cuda_oom` operate that can use - lightweight data in unit test, only used in - test unit. Defaults to False. - - Examples: - >>> from mmdet.utils.memory import AvoidOOM - >>> AvoidCUDAOOM = AvoidOOM() - >>> output = AvoidOOM.retry_if_cuda_oom( - >>> some_torch_function)(input1, input2) - >>> # To use as a decorator - >>> # from mmdet.utils import AvoidCUDAOOM - >>> @AvoidCUDAOOM.retry_if_cuda_oom - >>> def function(*args, **kwargs): - >>> return None - ``` - - Note: - 1. The output may be on CPU even if inputs are on GPU. Processing - on CPU will slow down the code significantly. - 2. When converting inputs to CPU, it will only look at each argument - and check if it has `.device` and `.to` for conversion. Nested - structures of tensors are not supported. - 3. Since the function might be called more than once, it has to be - stateless. - """ - - def __init__(self, to_cpu=True, test=False): - self.to_cpu = to_cpu - self.test = test - - def retry_if_cuda_oom(self, func): - """Makes a function retry itself after encountering pytorch's CUDA OOM - error. - - The implementation logic is referred to - https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py - - Args: - func: a stateless callable that takes tensor-like objects - as arguments. - Returns: - func: a callable which retries `func` if OOM is encountered. - """ # noqa: W605 - - @wraps(func) - def wrapped(*args, **kwargs): - - # raw function - if not self.test: - with _ignore_torch_cuda_oom(): - return func(*args, **kwargs) - - # Clear cache and retry - torch.cuda.empty_cache() - with _ignore_torch_cuda_oom(): - return func(*args, **kwargs) - - # get the type and device of first tensor - dtype, device = None, None - values = args + tuple(kwargs.values()) - for value in values: - if isinstance(value, torch.Tensor): - dtype = value.dtype - device = value.device - break - if dtype is None or device is None: - raise ValueError('There is no tensor in the inputs, ' - 'cannot get dtype and device.') - - # Convert to FP16 - fp16_args = cast_tensor_type(args, dst_type=torch.half) - fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half) - logger = get_root_logger() - logger.warning(f'Attempting to copy inputs of {str(func)} ' - 'to FP16 due to CUDA OOM') - - # get input tensor type, the output type will same as - # the first parameter type. - with _ignore_torch_cuda_oom(): - output = func(*fp16_args, **fp16_kwargs) - output = cast_tensor_type( - output, src_type=torch.half, dst_type=dtype) - if not self.test: - return output - logger.warning('Using FP16 still meet CUDA OOM') - - # Try on CPU. This will slow down the code significantly, - # therefore print a notice. - if self.to_cpu: - logger.warning(f'Attempting to copy inputs of {str(func)} ' - 'to CPU due to CUDA OOM') - cpu_device = torch.empty(0).device - cpu_args = cast_tensor_type(args, dst_type=cpu_device) - cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device) - - # convert outputs to GPU - with _ignore_torch_cuda_oom(): - logger.warning(f'Convert outputs to GPU (device={device})') - output = func(*cpu_args, **cpu_kwargs) - output = cast_tensor_type( - output, src_type=cpu_device, dst_type=device) - return output - - warnings.warn('Cannot convert output to GPU due to CUDA OOM, ' - 'the output is now on CPU, which might cause ' - 'errors if the output need to interact with GPU ' - 'data in subsequent operations') - logger.warning('Cannot convert output to GPU due to ' - 'CUDA OOM, the output is on CPU now.') - - return func(*cpu_args, **cpu_kwargs) - else: - # may still get CUDA OOM error - return func(*args, **kwargs) - - return wrapped - - -# To use AvoidOOM as a decorator -AvoidCUDAOOM = AvoidOOM() diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/misc.py b/cv/detection/co-detr/pytorch/mmdet/utils/misc.py deleted file mode 100644 index 2017cbb94660c919a99e522393e83b42b27e46fe..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/misc.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import glob -import os -import os.path as osp -import warnings - -import mmcv -import torch -from mmcv.utils import TORCH_VERSION, digit_version, print_log - - -def find_latest_checkpoint(path, suffix='pth'): - """Find the latest checkpoint from the working directory. - - Args: - path(str): The path to find checkpoints. - suffix(str): File extension. - Defaults to pth. - - Returns: - latest_path(str | None): File path of the latest checkpoint. - References: - .. [1] https://github.com/microsoft/SoftTeacher - /blob/main/ssod/utils/patch.py - """ - if not osp.exists(path): - warnings.warn('The path of checkpoints does not exist.') - return None - if osp.exists(osp.join(path, f'latest.{suffix}')): - return osp.join(path, f'latest.{suffix}') - - checkpoints = glob.glob(osp.join(path, f'*.{suffix}')) - if len(checkpoints) == 0: - warnings.warn('There are no checkpoints in the path.') - return None - latest = -1 - latest_path = None - for checkpoint in checkpoints: - count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0]) - if count > latest: - latest = count - latest_path = checkpoint - return latest_path - - -def update_data_root(cfg, logger=None): - """Update data root according to env MMDET_DATASETS. - - If set env MMDET_DATASETS, update cfg.data_root according to - MMDET_DATASETS. Otherwise, using cfg.data_root as default. - - Args: - cfg (mmcv.Config): The model config need to modify - logger (logging.Logger | str | None): the way to print msg - """ - assert isinstance(cfg, mmcv.Config), \ - f'cfg got wrong type: {type(cfg)}, expected mmcv.Config' - - if 'MMDET_DATASETS' in os.environ: - dst_root = os.environ['MMDET_DATASETS'] - print_log(f'MMDET_DATASETS has been set to be {dst_root}.' - f'Using {dst_root} as data root.') - else: - return - - assert isinstance(cfg, mmcv.Config), \ - f'cfg got wrong type: {type(cfg)}, expected mmcv.Config' - - def update(cfg, src_str, dst_str): - for k, v in cfg.items(): - if isinstance(v, mmcv.ConfigDict): - update(cfg[k], src_str, dst_str) - if isinstance(v, str) and src_str in v: - cfg[k] = v.replace(src_str, dst_str) - - update(cfg.data, cfg.data_root, dst_root) - cfg.data_root = dst_root - - -_torch_version_div_indexing = ( - 'parrots' not in TORCH_VERSION - and digit_version(TORCH_VERSION) >= digit_version('1.8')) - - -def floordiv(dividend, divisor, rounding_mode='trunc'): - if _torch_version_div_indexing: - return torch.div(dividend, divisor, rounding_mode=rounding_mode) - else: - return dividend // divisor diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/profiling.py b/cv/detection/co-detr/pytorch/mmdet/utils/profiling.py deleted file mode 100644 index 2f53f456c72db57bfa69a8d022c92d153580209e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/profiling.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import contextlib -import sys -import time - -import torch - -if sys.version_info >= (3, 7): - - @contextlib.contextmanager - def profile_time(trace_name, - name, - enabled=True, - stream=None, - end_stream=None): - """Print time spent by CPU and GPU. - - Useful as a temporary context manager to find sweet spots of code - suitable for async implementation. - """ - if (not enabled) or not torch.cuda.is_available(): - yield - return - stream = stream if stream else torch.cuda.current_stream() - end_stream = end_stream if end_stream else stream - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - stream.record_event(start) - try: - cpu_start = time.monotonic() - yield - finally: - cpu_end = time.monotonic() - end_stream.record_event(end) - end.synchronize() - cpu_time = (cpu_end - cpu_start) * 1000 - gpu_time = start.elapsed_time(end) - msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' - msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' - print(msg, end_stream) diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/replace_cfg_vals.py b/cv/detection/co-detr/pytorch/mmdet/utils/replace_cfg_vals.py deleted file mode 100644 index 6ca301dc937bb9c3fe376d7a047b8c0430e8ec73..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/replace_cfg_vals.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import re - -from mmcv.utils import Config - - -def replace_cfg_vals(ori_cfg): - """Replace the string "${key}" with the corresponding value. - - Replace the "${key}" with the value of ori_cfg.key in the config. And - support replacing the chained ${key}. Such as, replace "${key0.key1}" - with the value of cfg.key0.key1. Code is modified from `vars.py - < https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501 - - Args: - ori_cfg (mmcv.utils.config.Config): - The origin config with "${key}" generated from a file. - - Returns: - updated_cfg [mmcv.utils.config.Config]: - The config with "${key}" replaced by the corresponding value. - """ - - def get_value(cfg, key): - for k in key.split('.'): - cfg = cfg[k] - return cfg - - def replace_value(cfg): - if isinstance(cfg, dict): - return {key: replace_value(value) for key, value in cfg.items()} - elif isinstance(cfg, list): - return [replace_value(item) for item in cfg] - elif isinstance(cfg, tuple): - return tuple([replace_value(item) for item in cfg]) - elif isinstance(cfg, str): - # the format of string cfg may be: - # 1) "${key}", which will be replaced with cfg.key directly - # 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx", - # which will be replaced with the string of the cfg.key - keys = pattern_key.findall(cfg) - values = [get_value(ori_cfg, key[2:-1]) for key in keys] - if len(keys) == 1 and keys[0] == cfg: - # the format of string cfg is "${key}" - cfg = values[0] - else: - for key, value in zip(keys, values): - # the format of string cfg is - # "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx" - assert not isinstance(value, (dict, list, tuple)), \ - f'for the format of string cfg is ' \ - f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \ - f"the type of the value of '${key}' " \ - f'can not be dict, list, or tuple' \ - f'but you input {type(value)} in {cfg}' - cfg = cfg.replace(key, str(value)) - return cfg - else: - return cfg - - # the pattern of string "${key}" - pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}') - # the type of ori_cfg._cfg_dict is mmcv.utils.config.ConfigDict - updated_cfg = Config( - replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename) - # replace the model with model_wrapper - if updated_cfg.get('model_wrapper', None) is not None: - updated_cfg.model = updated_cfg.model_wrapper - updated_cfg.pop('model_wrapper') - return updated_cfg diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/setup_env.py b/cv/detection/co-detr/pytorch/mmdet/utils/setup_env.py deleted file mode 100644 index 6637cf878f8205f1a3fc3938472e07f272bc19b8..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/setup_env.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import platform -import warnings - -import cv2 -import torch.multiprocessing as mp - - -def setup_multi_processes(cfg): - """Setup multi-processing environment variables.""" - # set multi-process start method as `fork` to speed up the training - if platform.system() != 'Windows': - mp_start_method = cfg.get('mp_start_method', 'fork') - current_method = mp.get_start_method(allow_none=True) - if current_method is not None and current_method != mp_start_method: - warnings.warn( - f'Multi-processing start method `{mp_start_method}` is ' - f'different from the previous setting `{current_method}`.' - f'It will be force set to `{mp_start_method}`. You can change ' - f'this behavior by changing `mp_start_method` in your config.') - mp.set_start_method(mp_start_method, force=True) - - # disable opencv multithreading to avoid system being overloaded - opencv_num_threads = cfg.get('opencv_num_threads', 0) - cv2.setNumThreads(opencv_num_threads) - - # setup OMP threads - # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa - workers_per_gpu = cfg.data.get('workers_per_gpu', 1) - if 'train_dataloader' in cfg.data: - workers_per_gpu = \ - max(cfg.data.train_dataloader.get('workers_per_gpu', 1), - workers_per_gpu) - - if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1: - omp_num_threads = 1 - warnings.warn( - f'Setting OMP_NUM_THREADS environment variable for each process ' - f'to be {omp_num_threads} in default, to avoid your system being ' - f'overloaded, please further tune the variable for optimal ' - f'performance in your application as needed.') - os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) - - # setup MKL threads - if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1: - mkl_num_threads = 1 - warnings.warn( - f'Setting MKL_NUM_THREADS environment variable for each process ' - f'to be {mkl_num_threads} in default, to avoid your system being ' - f'overloaded, please further tune the variable for optimal ' - f'performance in your application as needed.') - os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads) diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/split_batch.py b/cv/detection/co-detr/pytorch/mmdet/utils/split_batch.py deleted file mode 100644 index 0276fb331f23c1a7f7451faf2a8f768e616d45fd..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/split_batch.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - - -def split_batch(img, img_metas, kwargs): - """Split data_batch by tags. - - Code is modified from - # noqa: E501 - - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys, see - :class:`mmdet.datasets.pipelines.Collect`. - kwargs (dict): Specific to concrete implementation. - - Returns: - data_groups (dict): a dict that data_batch splited by tags, - such as 'sup', 'unsup_teacher', and 'unsup_student'. - """ - - # only stack img in the batch - def fuse_list(obj_list, obj): - return torch.stack(obj_list) if isinstance(obj, - torch.Tensor) else obj_list - - # select data with tag from data_batch - def select_group(data_batch, current_tag): - group_flag = [tag == current_tag for tag in data_batch['tag']] - return { - k: fuse_list([vv for vv, gf in zip(v, group_flag) if gf], v) - for k, v in data_batch.items() - } - - kwargs.update({'img': img, 'img_metas': img_metas}) - kwargs.update({'tag': [meta['tag'] for meta in img_metas]}) - tags = list(set(kwargs['tag'])) - data_groups = {tag: select_group(kwargs, tag) for tag in tags} - for tag, group in data_groups.items(): - group.pop('tag') - return data_groups diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/util_distribution.py b/cv/detection/co-detr/pytorch/mmdet/utils/util_distribution.py deleted file mode 100644 index f64b8e7a43c59db4b23402618e2731cc9db3fb93..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/util_distribution.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from mmcv.parallel import MMDataParallel, MMDistributedDataParallel - -dp_factory = {'cuda': MMDataParallel, 'cpu': MMDataParallel} - -ddp_factory = {'cuda': MMDistributedDataParallel} - - -def build_dp(model, device='cuda', dim=0, *args, **kwargs): - """build DataParallel module by device type. - - if device is cuda, return a MMDataParallel model; if device is mlu, - return a MLUDataParallel model. - - Args: - model (:class:`nn.Module`): model to be parallelized. - device (str): device type, cuda, cpu or mlu. Defaults to cuda. - dim (int): Dimension used to scatter the data. Defaults to 0. - - Returns: - nn.Module: the model to be parallelized. - """ - if device == 'cuda': - model = model.cuda(kwargs['device_ids'][0]) - elif device == 'mlu': - from mmcv.device.mlu import MLUDataParallel - dp_factory['mlu'] = MLUDataParallel - model = model.mlu() - - return dp_factory[device](model, dim=dim, *args, **kwargs) - - -def build_ddp(model, device='cuda', *args, **kwargs): - """Build DistributedDataParallel module by device type. - - If device is cuda, return a MMDistributedDataParallel model; - if device is mlu, return a MLUDistributedDataParallel model. - - Args: - model (:class:`nn.Module`): module to be parallelized. - device (str): device type, mlu or cuda. - - Returns: - :class:`nn.Module`: the module to be parallelized - - References: - .. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel. - DistributedDataParallel.html - """ - assert device in ['cuda', 'mlu'], 'Only available for cuda or mlu devices.' - if device == 'cuda': - model = model.cuda() - elif device == 'mlu': - from mmcv.device.mlu import MLUDistributedDataParallel - ddp_factory['mlu'] = MLUDistributedDataParallel - model = model.mlu() - - return ddp_factory[device](model, *args, **kwargs) - - -def is_mlu_available(): - """Returns a bool indicating if MLU is currently available.""" - return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available() - - -def get_device(): - """Returns an available device, cpu, cuda or mlu.""" - is_device_available = { - 'cuda': torch.cuda.is_available(), - 'mlu': is_mlu_available() - } - device_list = [k for k, v in is_device_available.items() if v] - return device_list[0] if len(device_list) == 1 else 'cpu' diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/util_mixins.py b/cv/detection/co-detr/pytorch/mmdet/utils/util_mixins.py deleted file mode 100644 index b83b6617f5e4a202067e1659bf448962a2a2bc72..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/util_mixins.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""This module defines the :class:`NiceRepr` mixin class, which defines a -``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__`` -method, which you must define. This means you only have to overload one -function instead of two. Furthermore, if the object defines a ``__len__`` -method, then the ``__nice__`` method defaults to something sensible, otherwise -it is treated as abstract and raises ``NotImplementedError``. - -To use simply have your object inherit from :class:`NiceRepr` -(multi-inheritance should be ok). - -This code was copied from the ubelt library: https://github.com/Erotemic/ubelt - -Example: - >>> # Objects that define __nice__ have a default __str__ and __repr__ - >>> class Student(NiceRepr): - ... def __init__(self, name): - ... self.name = name - ... def __nice__(self): - ... return self.name - >>> s1 = Student('Alice') - >>> s2 = Student('Bob') - >>> print(f's1 = {s1}') - >>> print(f's2 = {s2}') - s1 = - s2 = - -Example: - >>> # Objects that define __len__ have a default __nice__ - >>> class Group(NiceRepr): - ... def __init__(self, data): - ... self.data = data - ... def __len__(self): - ... return len(self.data) - >>> g = Group([1, 2, 3]) - >>> print(f'g = {g}') - g = -""" -import warnings - - -class NiceRepr: - """Inherit from this class and define ``__nice__`` to "nicely" print your - objects. - - Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function - Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. - If the inheriting class has a ``__len__``, method then the default - ``__nice__`` method will return its length. - - Example: - >>> class Foo(NiceRepr): - ... def __nice__(self): - ... return 'info' - >>> foo = Foo() - >>> assert str(foo) == '' - >>> assert repr(foo).startswith('>> class Bar(NiceRepr): - ... pass - >>> bar = Bar() - >>> import pytest - >>> with pytest.warns(None) as record: - >>> assert 'object at' in str(bar) - >>> assert 'object at' in repr(bar) - - Example: - >>> class Baz(NiceRepr): - ... def __len__(self): - ... return 5 - >>> baz = Baz() - >>> assert str(baz) == '' - """ - - def __nice__(self): - """str: a "nice" summary string describing this module""" - if hasattr(self, '__len__'): - # It is a common pattern for objects to use __len__ in __nice__ - # As a convenience we define a default __nice__ for these objects - return str(len(self)) - else: - # In all other cases force the subclass to overload __nice__ - raise NotImplementedError( - f'Define the __nice__ method for {self.__class__!r}') - - def __repr__(self): - """str: the string of the module""" - try: - nice = self.__nice__() - classname = self.__class__.__name__ - return f'<{classname}({nice}) at {hex(id(self))}>' - except NotImplementedError as ex: - warnings.warn(str(ex), category=RuntimeWarning) - return object.__repr__(self) - - def __str__(self): - """str: the string of the module""" - try: - classname = self.__class__.__name__ - nice = self.__nice__() - return f'<{classname}({nice})>' - except NotImplementedError as ex: - warnings.warn(str(ex), category=RuntimeWarning) - return object.__repr__(self) diff --git a/cv/detection/co-detr/pytorch/mmdet/utils/util_random.py b/cv/detection/co-detr/pytorch/mmdet/utils/util_random.py deleted file mode 100644 index dc1ecb6c03b026156c9947cb6d356a822448be0f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/utils/util_random.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""Helpers for random number generators.""" -import numpy as np - - -def ensure_rng(rng=None): - """Coerces input into a random number generator. - - If the input is None, then a global random state is returned. - - If the input is a numeric value, then that is used as a seed to construct a - random state. Otherwise the input is returned as-is. - - Adapted from [1]_. - - Args: - rng (int | numpy.random.RandomState | None): - if None, then defaults to the global rng. Otherwise this can be an - integer or a RandomState class - Returns: - (numpy.random.RandomState) : rng - - a numpy random number generator - - References: - .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501 - """ - - if rng is None: - rng = np.random.mtrand._rand - elif isinstance(rng, int): - rng = np.random.RandomState(rng) - else: - rng = rng - return rng diff --git a/cv/detection/co-detr/pytorch/mmdet/version.py b/cv/detection/co-detr/pytorch/mmdet/version.py deleted file mode 100644 index 622218babfce644a07dadafbf818f52a53cd0bd4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/mmdet/version.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -__version__ = '2.25.3' -short_version = __version__ - - -def parse_version_info(version_str): - version_info = [] - for x in version_str.split('.'): - if x.isdigit(): - version_info.append(int(x)) - elif x.find('rc') != -1: - patch_version = x.split('rc') - version_info.append(int(patch_version[0])) - version_info.append(f'rc{patch_version[1]}') - return tuple(version_info) - - -version_info = parse_version_info(__version__) diff --git a/cv/detection/co-detr/pytorch/projects/__init__.py b/cv/detection/co-detr/pytorch/projects/__init__.py deleted file mode 100644 index aed4fa323c2c8001593fdfdcd878a21718800167..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .models import * diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/cityscapes_detection.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/cityscapes_detection.py deleted file mode 100644 index e341b59d6fa6265c2d17dc32aae2341871670a3d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/cityscapes_detection.py +++ /dev/null @@ -1,56 +0,0 @@ -# dataset settings -dataset_type = 'CityscapesDataset' -data_root = 'data/cityscapes/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=1, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=8, - dataset=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_train.json', - img_prefix=data_root + 'leftImg8bit/train/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_val.json', - img_prefix=data_root + 'leftImg8bit/val/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_test.json', - img_prefix=data_root + 'leftImg8bit/test/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/cityscapes_instance.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/cityscapes_instance.py deleted file mode 100644 index 4e3c34e2c85b4fc2ba854e1b409af70dc2c34e94..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/cityscapes_instance.py +++ /dev/null @@ -1,56 +0,0 @@ -# dataset settings -dataset_type = 'CityscapesDataset' -data_root = 'data/cityscapes/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1024), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=1, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=8, - dataset=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_train.json', - img_prefix=data_root + 'leftImg8bit/train/', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_val.json', - img_prefix=data_root + 'leftImg8bit/val/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/instancesonly_filtered_gtFine_test.json', - img_prefix=data_root + 'leftImg8bit/test/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_detection.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_detection.py deleted file mode 100644 index 149f590bb45fa65c29fd4c005e4a237d7dd2e117..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_detection.py +++ /dev/null @@ -1,49 +0,0 @@ -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_instance.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_instance.py deleted file mode 100644 index 9901a858414465d19d8ec6ced316b460166176b4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_instance.py +++ /dev/null @@ -1,49 +0,0 @@ -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_instance_semantic.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_instance_semantic.py deleted file mode 100644 index 6c8bf07b278f615e7ff5e67490d7a92068574b5b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_instance_semantic.py +++ /dev/null @@ -1,54 +0,0 @@ -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 8), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - seg_prefix=data_root + 'stuffthingmaps/train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_panoptic.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_panoptic.py deleted file mode 100644 index dbade7c0ac20141806b93f0ea7b5ca26d748246e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/coco_panoptic.py +++ /dev/null @@ -1,59 +0,0 @@ -# dataset settings -dataset_type = 'CocoPanopticDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='LoadPanopticAnnotations', - with_bbox=True, - with_mask=True, - with_seg=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='SegRescale', scale_factor=1 / 4), - dict(type='DefaultFormatBundle'), - dict( - type='Collect', - keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/panoptic_train2017.json', - img_prefix=data_root + 'train2017/', - seg_prefix=data_root + 'annotations/panoptic_train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/panoptic_val2017.json', - img_prefix=data_root + 'val2017/', - seg_prefix=data_root + 'annotations/panoptic_val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/panoptic_val2017.json', - img_prefix=data_root + 'val2017/', - seg_prefix=data_root + 'annotations/panoptic_val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric=['PQ']) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/deepfashion.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/deepfashion.py deleted file mode 100644 index 308b4b2ac4d9e3516ba4a57e9d3b6af91e97f24b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/deepfashion.py +++ /dev/null @@ -1,53 +0,0 @@ -# dataset settings -dataset_type = 'DeepFashionDataset' -data_root = 'data/DeepFashion/In-shop/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(750, 1101), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(750, 1101), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - imgs_per_gpu=2, - workers_per_gpu=1, - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', - img_prefix=data_root + 'Img/', - pipeline=train_pipeline, - data_root=data_root), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json', - img_prefix=data_root + 'Img/', - pipeline=test_pipeline, - data_root=data_root), - test=dict( - type=dataset_type, - ann_file=data_root + - 'annotations/DeepFashion_segmentation_gallery.json', - img_prefix=data_root + 'Img/', - pipeline=test_pipeline, - data_root=data_root)) -evaluation = dict(interval=5, metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/lvis_v0.5_instance.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/lvis_v0.5_instance.py deleted file mode 100644 index 207e0053c24d73e05e78c764d05e65c102675320..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/lvis_v0.5_instance.py +++ /dev/null @@ -1,24 +0,0 @@ -# dataset settings -_base_ = 'coco_instance.py' -dataset_type = 'LVISV05Dataset' -data_root = 'data/lvis_v0.5/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - _delete_=True, - type='ClassBalancedDataset', - oversample_thr=1e-3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v0.5_train.json', - img_prefix=data_root + 'train2017/')), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v0.5_val.json', - img_prefix=data_root + 'val2017/'), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v0.5_val.json', - img_prefix=data_root + 'val2017/')) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/lvis_v1_instance.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/lvis_v1_instance.py deleted file mode 100644 index be791edd79495dce88d010eea63e33d398f242b0..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/lvis_v1_instance.py +++ /dev/null @@ -1,24 +0,0 @@ -# dataset settings -_base_ = 'coco_instance.py' -dataset_type = 'LVISV1Dataset' -data_root = 'data/lvis_v1/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - _delete_=True, - type='ClassBalancedDataset', - oversample_thr=1e-3, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_train.json', - img_prefix=data_root)), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_val.json', - img_prefix=data_root)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/openimages_detection.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/openimages_detection.py deleted file mode 100644 index a65d30634adbdc7ce21c1bd24fed6c99adc50f09..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/openimages_detection.py +++ /dev/null @@ -1,65 +0,0 @@ -# dataset settings -dataset_type = 'OpenImagesDataset' -data_root = 'data/OpenImages/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, denorm_bbox=True), - dict(type='Resize', img_scale=(1024, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1024, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ], - ), -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory - train=dict( - type=dataset_type, - ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv', - img_prefix=data_root + 'OpenImages/train/', - label_file=data_root + 'annotations/class-descriptions-boxable.csv', - hierarchy_file=data_root + - 'annotations/bbox_labels_600_hierarchy.json', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root + 'annotations/validation-annotations-bbox.csv', - img_prefix=data_root + 'OpenImages/validation/', - label_file=data_root + 'annotations/class-descriptions-boxable.csv', - hierarchy_file=data_root + - 'annotations/bbox_labels_600_hierarchy.json', - meta_file=data_root + 'annotations/validation-image-metas.pkl', - image_level_ann_file=data_root + - 'annotations/validation-annotations-human-imagelabels-boxable.csv', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'annotations/validation-annotations-bbox.csv', - img_prefix=data_root + 'OpenImages/validation/', - label_file=data_root + 'annotations/class-descriptions-boxable.csv', - hierarchy_file=data_root + - 'annotations/bbox_labels_600_hierarchy.json', - meta_file=data_root + 'annotations/validation-image-metas.pkl', - image_level_ann_file=data_root + - 'annotations/validation-annotations-human-imagelabels-boxable.csv', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='mAP') diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/voc0712.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/voc0712.py deleted file mode 100644 index ae09acdd5c9580217815300abbad9f08b71b37ed..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/voc0712.py +++ /dev/null @@ -1,55 +0,0 @@ -# dataset settings -dataset_type = 'VOCDataset' -data_root = 'data/VOCdevkit/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1000, 600), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=3, - dataset=dict( - type=dataset_type, - ann_file=[ - data_root + 'VOC2007/ImageSets/Main/trainval.txt', - data_root + 'VOC2012/ImageSets/Main/trainval.txt' - ], - img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', - img_prefix=data_root + 'VOC2007/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', - img_prefix=data_root + 'VOC2007/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='mAP') diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/wider_face.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/wider_face.py deleted file mode 100644 index d1d649be42bca2955fb56a784fe80bcc2fdce4e1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/datasets/wider_face.py +++ /dev/null @@ -1,63 +0,0 @@ -# dataset settings -dataset_type = 'WIDERFaceDataset' -data_root = 'data/WIDERFace/' -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(300, 300), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(300, 300), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=60, - workers_per_gpu=2, - train=dict( - type='RepeatDataset', - times=2, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'train.txt', - img_prefix=data_root + 'WIDER_train/', - min_size=17, - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - ann_file=data_root + 'val.txt', - img_prefix=data_root + 'WIDER_val/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root + 'val.txt', - img_prefix=data_root + 'WIDER_val/', - pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/default_runtime.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/default_runtime.py deleted file mode 100644 index 5b0b1452c0a625e331be7b1e6c5cf341cc91ff64..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/default_runtime.py +++ /dev/null @@ -1,27 +0,0 @@ -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -custom_hooks = [dict(type='NumClassCheckHook')] - -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] - -# disable opencv multithreading to avoid system being overloaded -opencv_num_threads = 0 -# set multi-process start method as `fork` to speed up the training -mp_start_method = 'fork' - -# Default setting for scaling LR automatically -# - `enable` means enable scaling LR automatically -# or not by default. -# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). -auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py deleted file mode 100644 index 2902ccae5a8ffaa6ae9c49212b68a71035c83e60..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py +++ /dev/null @@ -1,196 +0,0 @@ -# model settings -model = dict( - type='CascadeRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='CascadeRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ], - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/cascade_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/cascade_rcnn_r50_fpn.py deleted file mode 100644 index 42f74ae748a32bdce10ab9003fd45f87721d02ff..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/cascade_rcnn_r50_fpn.py +++ /dev/null @@ -1,179 +0,0 @@ -# model settings -model = dict( - type='CascadeRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='CascadeRoIHead', - num_stages=3, - stage_loss_weights=[1, 0.5, 0.25], - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), - dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), - reg_class_agnostic=True, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) - ]), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=[ - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.6, - neg_iou_thr=0.6, - min_pos_iou=0.6, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False), - dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.7, - min_pos_iou=0.7, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False) - ]), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/fast_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/fast_rcnn_r50_fpn.py deleted file mode 100644 index 9982fe0956d60022a2c702a824ffaff192e93e1e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/fast_rcnn_r50_fpn.py +++ /dev/null @@ -1,62 +0,0 @@ -# model settings -model = dict( - type='FastRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_caffe_c4.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_caffe_c4.py deleted file mode 100644 index dbf965afe3de8e91505cf5deeae0d32c55f93c4f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_caffe_c4.py +++ /dev/null @@ -1,117 +0,0 @@ -# model settings -norm_cfg = dict(type='BN', requires_grad=False) -model = dict( - type='FasterRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=3, - strides=(1, 2, 2), - dilations=(1, 1, 1), - out_indices=(2, ), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - rpn_head=dict( - type='RPNHead', - in_channels=1024, - feat_channels=1024, - anchor_generator=dict( - type='AnchorGenerator', - scales=[2, 4, 8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[16]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - shared_head=dict( - type='ResLayer', - depth=50, - stage=3, - stride=2, - dilation=1, - style='caffe', - norm_cfg=norm_cfg, - norm_eval=True, - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=1024, - featmap_strides=[16]), - bbox_head=dict( - type='BBoxHead', - with_avg_pool=True, - roi_feat_size=7, - in_channels=2048, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=12000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=6000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py deleted file mode 100644 index a377a6f09664b5eca189fa77dcb47c69842fdbf2..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py +++ /dev/null @@ -1,105 +0,0 @@ -# model settings -norm_cfg = dict(type='BN', requires_grad=False) -model = dict( - type='FasterRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - strides=(1, 2, 2, 1), - dilations=(1, 1, 1, 2), - out_indices=(3, ), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - rpn_head=dict( - type='RPNHead', - in_channels=2048, - feat_channels=2048, - anchor_generator=dict( - type='AnchorGenerator', - scales=[2, 4, 8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[16]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=2048, - featmap_strides=[16]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=2048, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=12000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms=dict(type='nms', iou_threshold=0.7), - nms_pre=6000, - max_per_img=1000, - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_fpn.py deleted file mode 100644 index 1ef8e7b2579504e7614429609524ae38239701cc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/faster_rcnn_r50_fpn.py +++ /dev/null @@ -1,108 +0,0 @@ -# model settings -model = dict( - type='FasterRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100) - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - )) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/mask_rcnn_r50_caffe_c4.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/mask_rcnn_r50_caffe_c4.py deleted file mode 100644 index 122202e1a5d6b3367de9a8c632864cf168ca5b9d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/mask_rcnn_r50_caffe_c4.py +++ /dev/null @@ -1,125 +0,0 @@ -# model settings -norm_cfg = dict(type='BN', requires_grad=False) -model = dict( - type='MaskRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=3, - strides=(1, 2, 2), - dilations=(1, 1, 1), - out_indices=(2, ), - frozen_stages=1, - norm_cfg=norm_cfg, - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - rpn_head=dict( - type='RPNHead', - in_channels=1024, - feat_channels=1024, - anchor_generator=dict( - type='AnchorGenerator', - scales=[2, 4, 8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[16]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - shared_head=dict( - type='ResLayer', - depth=50, - stage=3, - stride=2, - dilation=1, - style='caffe', - norm_cfg=norm_cfg, - norm_eval=True), - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=1024, - featmap_strides=[16]), - bbox_head=dict( - type='BBoxHead', - with_avg_pool=True, - roi_feat_size=7, - in_channels=2048, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=None, - mask_head=dict( - type='FCNMaskHead', - num_convs=0, - in_channels=2048, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=12000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=14, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=6000, - nms=dict(type='nms', iou_threshold=0.7), - max_per_img=1000, - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/mask_rcnn_r50_fpn.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/mask_rcnn_r50_fpn.py deleted file mode 100644 index d903e55e2d95135b1448e566d4d5ec8146597a6a..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/mask_rcnn_r50_fpn.py +++ /dev/null @@ -1,120 +0,0 @@ -# model settings -model = dict( - type='MaskRCNN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/retinanet_r50_fpn.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/retinanet_r50_fpn.py deleted file mode 100644 index 56e43fa7764cb0f48510415f21888ba0df0c6eb5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/retinanet_r50_fpn.py +++ /dev/null @@ -1,60 +0,0 @@ -# model settings -model = dict( - type='RetinaNet', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_input', - num_outs=5), - bbox_head=dict( - type='RetinaHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - # model training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/rpn_r50_caffe_c4.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/rpn_r50_caffe_c4.py deleted file mode 100644 index 8b32ca99258e5ddf249d11eadcd46630d88bd55e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/rpn_r50_caffe_c4.py +++ /dev/null @@ -1,58 +0,0 @@ -# model settings -model = dict( - type='RPN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=3, - strides=(1, 2, 2), - dilations=(1, 1, 1), - out_indices=(2, ), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='caffe', - init_cfg=dict( - type='Pretrained', - checkpoint='open-mmlab://detectron2/resnet50_caffe')), - neck=None, - rpn_head=dict( - type='RPNHead', - in_channels=1024, - feat_channels=1024, - anchor_generator=dict( - type='AnchorGenerator', - scales=[2, 4, 8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[16]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=12000, - max_per_img=2000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/rpn_r50_fpn.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/rpn_r50_fpn.py deleted file mode 100644 index edaf4d4b06b64b88a4ddd64419fc026e64a6af1d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/rpn_r50_fpn.py +++ /dev/null @@ -1,58 +0,0 @@ -# model settings -model = dict( - type='RPN', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=0, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0))) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/ssd300.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/models/ssd300.py deleted file mode 100644 index f17df010069e300f9f0b6eb456f87e61b8582787..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/models/ssd300.py +++ /dev/null @@ -1,56 +0,0 @@ -# model settings -input_size = 300 -model = dict( - type='SingleStageDetector', - backbone=dict( - type='SSDVGG', - depth=16, - with_last_pool=False, - ceil_mode=True, - out_indices=(3, 4), - out_feature_indices=(22, 34), - init_cfg=dict( - type='Pretrained', checkpoint='open-mmlab://vgg16_caffe')), - neck=dict( - type='SSDNeck', - in_channels=(512, 1024), - out_channels=(512, 1024, 512, 256, 256, 256), - level_strides=(2, 2, 1, 1), - level_paddings=(1, 1, 0, 0), - l2_norm_scale=20), - bbox_head=dict( - type='SSDHead', - in_channels=(512, 1024, 512, 256, 256, 256), - num_classes=80, - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - input_size=input_size, - basesize_ratio_range=(0.15, 0.9), - strides=[8, 16, 32, 64, 100, 300], - ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2])), - # model training and testing settings - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0., - ignore_iof_thr=-1, - gt_max_assign_all=False), - smoothl1_beta=1., - allowed_border=-1, - pos_weight=-1, - neg_pos_ratio=3, - debug=False), - test_cfg=dict( - nms_pre=1000, - nms=dict(type='nms', iou_threshold=0.45), - min_bbox_size=0, - score_thr=0.02, - max_per_img=200)) -cudnn_benchmark = True diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_1x.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_1x.py deleted file mode 100644 index 13b3783cbbe93b6c32bc415dc50f633dffa4aec7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_1x.py +++ /dev/null @@ -1,11 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_20e.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_20e.py deleted file mode 100644 index 00e859022156dcbef6501c04d03f335639f2c1f6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_20e.py +++ /dev/null @@ -1,11 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_2x.py b/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_2x.py deleted file mode 100644 index 69dc9ee8080649ce3646b5775b0ca2e9c863d0f5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/_base_/schedules/schedule_2x.py +++ /dev/null @@ -1,11 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_mask_r50_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_mask_r50_1x_coco.py deleted file mode 100644 index 0656d624f5b3434ad08e82deefd562da21c60017..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_mask_r50_1x_coco.py +++ /dev/null @@ -1,197 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -# model settings -num_dec_layer = 6 -lambda_2 = 2.0 - -model = dict( - roi_head=[dict( - type='CoStandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[8, 16, 32, 64], - finest_scale=112), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0*num_dec_layer*lambda_2)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[8, 16, 32, 64], - finest_scale=112), - mask_head=dict( - type='FCNMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0*num_dec_layer*lambda_2)), - )], - # model training and testing settings - train_cfg=[ - dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), - dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=4000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False),], - test_cfg=[ - dict(max_per_img=100), - dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.5), - mask_thr_binary=0.5, - max_per_img=100)), - dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100), - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - ]) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different -# from the default setting in mmdet. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[ - [ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - # The radio of all image in train dataset < 7 - # follow the original impl - img_scale=[(400, 4200), (500, 4200), (600, 4200)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) -] -# test_pipeline, NOTE the Pad's size_divisor is different from the default -# setting (size_divisor=32). While there is little effect on the performance -# whether we use the default setting or use size_divisor=1. -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(filter_empty_gt=False, pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py deleted file mode 100644 index 545b03887f5656527326c571bb81dc8d3c468f6b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_r50_1x_coco.py +++ /dev/null @@ -1,314 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/default_runtime.py' -] -# model settings -num_dec_layer = 6 -lambda_2 = 2.0 - -model = dict( - type='CoDETR', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='ChannelMapper', - in_channels=[512, 1024, 2048], - kernel_size=1, - out_channels=256, - act_cfg=None, - norm_cfg=dict(type='GN', num_groups=32), - num_outs=4), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='L1Loss', loss_weight=1.0*num_dec_layer*lambda_2)), - query_head=dict( - type='CoDeformDETRHead', - num_query=300, - num_classes=80, - in_channels=2048, - sync_cls_avg_factor=True, - with_box_refine=True, - as_two_stage=True, - mixed_selection=True, - transformer=dict( - type='CoDeformableDetrTransformer', - num_co_heads=2, - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=dict( - type='MultiScaleDeformableAttention', embed_dims=256, dropout=0.0), - feedforward_channels=2048, - ffn_dropout=0.0, - operation_order=('self_attn', 'norm', 'ffn', 'norm'))), - decoder=dict( - type='CoDeformableDetrTransformerDecoder', - num_layers=num_dec_layer, - return_intermediate=True, - look_forward_twice=True, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.0), - dict( - type='MultiScaleDeformableAttention', - embed_dims=256, - dropout=0.0) - ], - feedforward_channels=2048, - ffn_dropout=0.0, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - normalize=True, - offset=-0.5), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0)), - roi_head=[dict( - type='CoStandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[8, 16, 32, 64], - finest_scale=112), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0*num_dec_layer*lambda_2)))], - bbox_head=[dict( - type='CoATSSHead', - num_classes=80, - in_channels=256, - stacked_convs=1, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0*num_dec_layer*lambda_2), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0*num_dec_layer*lambda_2)),], - # model training and testing settings - train_cfg=[ - dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), - dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=4000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False),], - test_cfg=[ - dict(max_per_img=100), - dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)), - dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100), - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - ]) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different -# from the default setting in mmdet. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[ - [ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - # The radio of all image in train dataset < 7 - # follow the original impl - img_scale=[(400, 4200), (500, 4200), (600, 4200)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -# test_pipeline, NOTE the Pad's size_divisor is different from the default -# setting (size_divisor=32). While there is little effect on the performance -# whether we use the default setting or use size_divisor=1. -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(filter_empty_gt=False, pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='AdamW', - lr=2e-4, - weight_decay=1e-4, - paramwise_cfg=dict( - custom_keys={ - 'backbone': dict(lr_mult=0.1), - 'sampling_offsets': dict(lr_mult=0.1), - 'reference_points': dict(lr_mult=0.1) - })) -optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_base_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_base_1x_coco.py deleted file mode 100644 index c9a4fba6300f8dda17124fe8b75ac5615b359d47..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_base_1x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -pretrained = 'models/swin_base_patch4_window12_384_22k.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=128, - depths=[2, 2, 18, 2], - num_heads=[4, 8, 16, 32], - out_indices=(1, 2, 3), - window_size=12, - ape=False, - drop_path_rate=0.2, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[128*2, 128*4, 128*8])) - -# optimizer -optimizer = dict(weight_decay=0.05) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_base_3x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_base_3x_coco.py deleted file mode 100644 index 27096d4cadfb9095c63f9310f7ea37ca52baa3b3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_base_3x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -pretrained = 'models/swin_base_patch4_window12_384_22k.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=128, - depths=[2, 2, 18, 2], - num_heads=[4, 8, 16, 32], - out_indices=(1, 2, 3), - window_size=12, - ape=False, - drop_path_rate=0.4, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[128*2, 128*4, 128*8])) - -# optimizer -optimizer = dict(weight_decay=0.05) -lr_config = dict(policy='step', step=[30]) -runner = dict(type='EpochBasedRunner', max_epochs=36) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_large_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_large_1x_coco.py deleted file mode 100644 index f90521fccdd430faa6c2fb7871cc9503ffc3d0f7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_large_1x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -pretrained = 'models/swin_large_patch4_window12_384_22k.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - out_indices=(1, 2, 3), - window_size=12, - ape=False, - drop_path_rate=0.3, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[192*2, 192*4, 192*8])) - -# optimizer -optimizer = dict(weight_decay=0.05) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_large_900q_3x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_large_900q_3x_coco.py deleted file mode 100644 index 2dc2f3abc5f0b8442628c0e880df76ea1cec4de5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_large_900q_3x_coco.py +++ /dev/null @@ -1,47 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -pretrained = 'models/swin_large_patch4_window12_384_22k.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - out_indices=(1, 2, 3), - window_size=12, - ape=False, - drop_path_rate=0.6, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[192*2, 192*4, 192*8]), - query_head=dict(num_query=900), - test_cfg=[ - dict(max_per_img=300), - dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)), - dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100), - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - ]) - -# optimizer -optimizer = dict(weight_decay=0.05) -lr_config = dict(policy='step', step=[30]) -runner = dict(type='EpochBasedRunner', max_epochs=36) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_small_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_small_1x_coco.py deleted file mode 100644 index 935f1e0cbbf4d50ee12d3f01fa1225e01b7356d4..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_small_1x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -pretrained = 'models/swin_small_patch4_window7_224.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=96, - depths=[2, 2, 18, 2], - num_heads=[3, 6, 12, 24], - out_indices=(1, 2, 3), - window_size=7, - ape=False, - drop_path_rate=0.2, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[96*2, 96*4, 96*8])) - -# optimizer -optimizer = dict(weight_decay=0.05) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_small_3x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_small_3x_coco.py deleted file mode 100644 index 43c1ccc84cc583413633cd3d00615c41a7dae709..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_small_3x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -pretrained = 'models/swin_small_patch4_window7_224.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=96, - depths=[2, 2, 18, 2], - num_heads=[3, 6, 12, 24], - out_indices=(1, 2, 3), - window_size=7, - ape=False, - drop_path_rate=0.4, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[96*2, 96*4, 96*8])) - -# optimizer -optimizer = dict(weight_decay=0.05) -lr_config = dict(policy='step', step=[30]) -runner = dict(type='EpochBasedRunner', max_epochs=36) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_tiny_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_tiny_1x_coco.py deleted file mode 100644 index 2b1e55ca4742752a1a5beedb25634da11cb9c5db..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_tiny_1x_coco.py +++ /dev/null @@ -1,23 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -pretrained = 'models/swin_tiny_patch4_window7_224.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - out_indices=(1, 2, 3), - window_size=7, - ape=False, - drop_path_rate=0.2, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[96*2, 96*4, 96*8])) - -# optimizer -optimizer = dict(weight_decay=0.05) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_tiny_3x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_tiny_3x_coco.py deleted file mode 100644 index a8f47efad700823c81874de4d6a8efa61149787d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_deformable_detr/co_deformable_detr_swin_tiny_3x_coco.py +++ /dev/null @@ -1,25 +0,0 @@ -_base_ = [ - 'co_deformable_detr_r50_1x_coco.py' -] -pretrained = 'models/swin_tiny_patch4_window7_224.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - out_indices=(1, 2, 3), - window_size=7, - ape=False, - drop_path_rate=0.3, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[96*2, 96*4, 96*8])) - -# optimizer -optimizer = dict(weight_decay=0.05) -lr_config = dict(policy='step', step=[30]) -runner = dict(type='EpochBasedRunner', max_epochs=36) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_9encoder_lsj_r50_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_9encoder_lsj_r50_1x_coco.py deleted file mode 100644 index 966bdcbdc6780c696a3d5709cdfc4d38104d9a24..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_9encoder_lsj_r50_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = [ - 'co_dino_5scale_lsj_r50_1x_coco.py' -] -# model settings - -model = dict( - query_head=dict( - transformer=dict( - encoder=dict( - type='DetrTransformerEncoder', - num_layers=9, - # number of layers that use checkpoint - with_cp=9)))) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_9encoder_lsj_r50_3x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_9encoder_lsj_r50_3x_coco.py deleted file mode 100644 index 04ad721b3aa44954313504d27da9610994b463f7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_9encoder_lsj_r50_3x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - 'co_dino_5scale_9encoder_lsj_r50_1x_coco.py' -] -lr_config = dict(policy='step', step=[30]) -runner = dict(type='EpochBasedRunner', max_epochs=36) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_1x_coco.py deleted file mode 100644 index 42b3ce7f5a871511688ff4a4abeedbb0fa9eb2c7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_1x_coco.py +++ /dev/null @@ -1,98 +0,0 @@ -_base_ = [ - 'co_dino_5scale_r50_1x_coco.py' -] - -model = dict(with_attn_mask=False) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -image_size = (1024, 1024) -load_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), -] -train_pipeline = [ - dict(type='CopyPaste', max_num_pasted=100), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=image_size, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='MultiImageMixDataset', - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - filter_empty_gt=False, - pipeline=load_pipeline), - pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# NOTE: LSJ with copy-paste requires segmentation annotations. -# If your training data only contains bounding boxes, please use the following code: - -# train_pipeline = [ -# dict(type='LoadImageFromFile'), -# dict(type='LoadAnnotations', with_bbox=True), -# dict( -# type='Resize', -# img_scale=image_size, -# ratio_range=(0.1, 2.0), -# multiscale_mode='range', -# keep_ratio=True), -# dict( -# type='RandomCrop', -# crop_type='absolute_range', -# crop_size=image_size, -# recompute_bbox=True, -# allow_negative_crop=True), -# dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), -# dict(type='RandomFlip', flip_ratio=0.5), -# dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), -# dict(type='Normalize', **img_norm_cfg), -# dict(type='DefaultFormatBundle'), -# dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -# ] -# data = dict( -# samples_per_gpu=2, -# workers_per_gpu=2, -# train=dict(filter_empty_gt=False, pipeline=train_pipeline), -# val=dict(pipeline=test_pipeline), -# test=dict(pipeline=test_pipeline)) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_1x_lvis.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_1x_lvis.py deleted file mode 100644 index 5680a2500d2b686bf0556e7a86949433b853253c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_1x_lvis.py +++ /dev/null @@ -1,67 +0,0 @@ -_base_ = [ - 'co_dino_5scale_r50_1x_lvis.py' -] - -model = dict(with_attn_mask=False) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -image_size = (1024, 1024) -load_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), -] -train_pipeline = [ - dict(type='CopyPaste', max_num_pasted=100), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=image_size, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -dataset_type = 'LVISV1Dataset' -data_root = 'data/lvis_v1/' -img_data_root = 'data/coco/' -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='MultiImageMixDataset', - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_train.json', - img_prefix=img_data_root, - filter_empty_gt=False, - pipeline=load_pipeline), - pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_3x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_3x_coco.py deleted file mode 100644 index 610140196b614da11576227933240dd48b360057..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_r50_3x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - 'co_dino_5scale_lsj_r50_1x_coco.py' -] -lr_config = dict(policy='step', step=[30]) -runner = dict(type='EpochBasedRunner', max_epochs=36) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_16e_o365tolvis.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_16e_o365tolvis.py deleted file mode 100644 index 0b902cc1952c8edca3bde6d0edfc1e7a434c8c23..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_16e_o365tolvis.py +++ /dev/null @@ -1,119 +0,0 @@ -_base_ = [ - 'co_dino_5scale_lsj_swin_large_3x_lvis.py' -] - -load_from = 'models/co_dino_5scale_swin_large_22e_o365.pth' -pretrained = None -# model settings -model = dict( - query_head=dict( - dn_cfg=dict( - type='CdnQueryGenerator', - noise_scale=dict(label=0.5, box=0.4), - group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=500)), - transformer=dict( - encoder=dict( - # number of layers that use checkpoint - with_cp=6))), - test_cfg=[ - dict( - max_per_img=1000, - nms=dict(type='soft_nms', iou_threshold=0.8) - ), - dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=1000)), - dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=1000), - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - ]) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -image_size = (1536, 1536) -load_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), -] -train_pipeline = [ - dict(type='CopyPaste', max_num_pasted=100), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=image_size, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -dataset_type = 'LVISV1Dataset' -data_root = 'data/lvis_v1/' -img_data_root = 'data/coco/' -data = dict( - samples_per_gpu=1, - workers_per_gpu=1, - train=dict( - type='MultiImageMixDataset', - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_train.json', - img_prefix=img_data_root, - filter_empty_gt=False, - pipeline=load_pipeline), - pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict( - type='AdamW', - lr=1e-4, - weight_decay=0.0001, - # custom_keys of sampling_offsets and reference_points in DeformDETR - paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})) - -optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[8]) -runner = dict(type='EpochBasedRunner', max_epochs=16) -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (16 GPUs) x (1 samples per GPU) -auto_scale_lr = dict(base_batch_size=16) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_1x_coco.py deleted file mode 100644 index 6769a4730f9e7ec05577a3d24fd246f080edc2bc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_1x_coco.py +++ /dev/null @@ -1,118 +0,0 @@ -_base_ = [ - 'co_dino_5scale_lsj_r50_1x_coco.py' -] -pretrained = 'models/swin_large_patch4_window12_384_22k.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - out_indices=(0, 1, 2, 3), - window_size=12, - ape=False, - drop_path_rate=0.3, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[192, 192*2, 192*4, 192*8]), - query_head=dict( - transformer=dict( - encoder=dict( - # number of layers that use checkpoint - with_cp=6)))) - - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -image_size = (1280, 1280) -load_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), -] -train_pipeline = [ - dict(type='CopyPaste', max_num_pasted=100), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=image_size, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data_root = 'data/coco/' -dataset_type = 'CocoDataset' -data = dict( - samples_per_gpu=1, - workers_per_gpu=1, - train=dict( - type='MultiImageMixDataset', - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - filter_empty_gt=False, - pipeline=load_pipeline), - pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# NOTE: LSJ with copy-paste requires segmentation annotations. -# If your training data only contains bounding boxes, please use the following code: - -# train_pipeline = [ -# dict(type='LoadImageFromFile'), -# dict(type='LoadAnnotations', with_bbox=True), -# dict( -# type='Resize', -# img_scale=image_size, -# ratio_range=(0.1, 2.0), -# multiscale_mode='range', -# keep_ratio=True), -# dict( -# type='RandomCrop', -# crop_type='absolute_range', -# crop_size=image_size, -# recompute_bbox=True, -# allow_negative_crop=True), -# dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), -# dict(type='RandomFlip', flip_ratio=0.5), -# dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), -# dict(type='Normalize', **img_norm_cfg), -# dict(type='DefaultFormatBundle'), -# dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -# ] -# data = dict( -# samples_per_gpu=1, -# workers_per_gpu=1, -# train=dict(filter_empty_gt=False, pipeline=train_pipeline), -# val=dict(pipeline=test_pipeline), -# test=dict(pipeline=test_pipeline)) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_2x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_2x_coco.py deleted file mode 100644 index 528514dd4c90e7edffd3a9502b092782a94d0b24..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_2x_coco.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - 'co_dino_5scale_lsj_swin_large_1x_coco.py' -] -# model settings -model = dict( - backbone=dict(drop_path_rate=0.5)) - -lr_config = dict(policy='step', step=[20]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_3x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_3x_coco.py deleted file mode 100644 index b1bf8cb77837771e8d3ea51a460961f950eb5607..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_3x_coco.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - 'co_dino_5scale_lsj_swin_large_1x_coco.py' -] -# model settings -model = dict( - backbone=dict(drop_path_rate=0.5)) - -lr_config = dict(policy='step', step=[30]) -runner = dict(type='EpochBasedRunner', max_epochs=36) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_3x_lvis.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_3x_lvis.py deleted file mode 100644 index d9cd2f21e22477bef6760ae32661baa6d6708334..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_lsj_swin_large_3x_lvis.py +++ /dev/null @@ -1,87 +0,0 @@ -_base_ = [ - 'co_dino_5scale_lsj_r50_1x_lvis.py' -] -pretrained = 'models/co_dino_5scale_lsj_swin_large_3x_lvis.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - out_indices=(0, 1, 2, 3), - window_size=12, - ape=False, - drop_path_rate=0.3, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[192, 192*2, 192*4, 192*8]), - query_head=dict( - transformer=dict( - encoder=dict( - # number of layers that use checkpoint - with_cp=6)))) - - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -image_size = (1280, 1280) -load_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='Resize', - img_scale=image_size, - ratio_range=(0.1, 2.0), - multiscale_mode='range', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=image_size, - recompute_bbox=True, - allow_negative_crop=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), -] -train_pipeline = [ - dict(type='CopyPaste', max_num_pasted=100), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=image_size, - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Pad', size=image_size, pad_val=dict(img=(114, 114, 114))), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -dataset_type = 'LVISV1Dataset' -data_root = 'data/lvis_v1/' -img_data_root = 'data/coco/' -data = dict( - samples_per_gpu=1, - workers_per_gpu=1, - train=dict( - type='MultiImageMixDataset', - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/lvis_v1_train.json', - img_prefix=img_data_root, - filter_empty_gt=False, - pipeline=load_pipeline), - pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_r50_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_r50_1x_coco.py deleted file mode 100644 index 9a9ade6dfbcfdcab5c6b822b7065224326dc74bc..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_r50_1x_coco.py +++ /dev/null @@ -1,327 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/default_runtime.py' -] -# model settings -num_dec_layer = 6 -lambda_2 = 2.0 - -model = dict( - type='CoDETR', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='ChannelMapper', - in_channels=[256, 512, 1024, 2048], - kernel_size=1, - out_channels=256, - act_cfg=None, - norm_cfg=dict(type='GN', num_groups=32), - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='L1Loss', loss_weight=1.0*num_dec_layer*lambda_2)), - query_head=dict( - type='CoDINOHead', - num_query=900, - num_classes=80, - num_feature_levels=5, - in_channels=2048, - sync_cls_avg_factor=True, - as_two_stage=True, - with_box_refine=True, - mixed_selection=True, - dn_cfg=dict( - type='CdnQueryGenerator', - noise_scale=dict(label=0.5, box=1.0), # 0.5, 0.4 for DN-DETR - group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=100)), - transformer=dict( - type='CoDinoTransformer', - with_pos_coord=True, - with_coord_feat=False, - num_co_heads=2, - num_feature_levels=5, - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - with_cp=4, # number of layers that use checkpoint - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=dict( - type='MultiScaleDeformableAttention', embed_dims=256, num_levels=5, dropout=0.0), - feedforward_channels=2048, - ffn_dropout=0.0, - operation_order=('self_attn', 'norm', 'ffn', 'norm'))), - decoder=dict( - type='DinoTransformerDecoder', - num_layers=6, - return_intermediate=True, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.0), - dict( - type='MultiScaleDeformableAttention', - embed_dims=256, - num_levels=5, - dropout=0.0), - ], - feedforward_channels=2048, - ffn_dropout=0.0, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - temperature=20, - normalize=True), - loss_cls=dict( - type='QualityFocalLoss', - use_sigmoid=True, - beta=2.0, - loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0)), - roi_head=[dict( - type='CoStandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32, 64], - finest_scale=56), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0*num_dec_layer*lambda_2)))], - bbox_head=[dict( - type='CoATSSHead', - num_classes=80, - in_channels=256, - stacked_convs=1, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[4, 8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0*num_dec_layer*lambda_2), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0*num_dec_layer*lambda_2)),], - # model training and testing settings - train_cfg=[ - dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), - dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=4000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False),], - test_cfg=[ - dict( - max_per_img=300, - nms=dict(type='soft_nms', iou_threshold=0.8)), - dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)), - dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100), - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - ]) -#find_unused_parameters = True -#fp16 = dict(loss_scale=dict(init_scale=512)) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different -# from the default setting in mmdet. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[ - [ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - # The radio of all image in train dataset < 7 - # follow the original impl - img_scale=[(400, 4200), (500, 4200), (600, 4200)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -# test_pipeline, NOTE the Pad's size_divisor is different from the default -# setting (size_divisor=32). While there is little effect on the performance -# whether we use the default setting or use size_divisor=1. -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(filter_empty_gt=False, pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='AdamW', - lr=2e-4, - weight_decay=0.0001, - # custom_keys of sampling_offsets and reference_points in DeformDETR - paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})) - -optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=16) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_r50_1x_lvis.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_r50_1x_lvis.py deleted file mode 100644 index 35e4a94b0691509e322bcd3ca831cb1ebacf7a42..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_r50_1x_lvis.py +++ /dev/null @@ -1,328 +0,0 @@ -_base_ = [ - '../_base_/datasets/lvis_v1_instance.py', - '../_base_/default_runtime.py' -] -# model settings -num_dec_layer = 6 -lambda_2 = 2.0 - -model = dict( - type='CoDETR', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='ChannelMapper', - in_channels=[256, 512, 1024, 2048], - kernel_size=1, - out_channels=256, - act_cfg=None, - norm_cfg=dict(type='GN', num_groups=32), - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='L1Loss', loss_weight=1.0*num_dec_layer*lambda_2)), - query_head=dict( - type='CoDINOHead', - num_query=900, - num_classes=1203, - num_feature_levels=5, - in_channels=2048, - sync_cls_avg_factor=True, - as_two_stage=True, - with_box_refine=True, - mixed_selection=True, - dn_cfg=dict( - type='CdnQueryGenerator', - noise_scale=dict(label=0.5, box=1.0), # 0.5, 0.4 for DN-DETR - group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=100)), - transformer=dict( - type='CoDinoTransformer', - with_pos_coord=True, - with_coord_feat=False, - num_co_heads=2, - num_feature_levels=5, - encoder=dict( - type='DetrTransformerEncoder', - num_layers=6, - with_cp=4, # number of layers that use checkpoint - transformerlayers=dict( - type='BaseTransformerLayer', - attn_cfgs=dict( - type='MultiScaleDeformableAttention', embed_dims=256, num_levels=5, dropout=0.0), - feedforward_channels=2048, - ffn_dropout=0.0, - operation_order=('self_attn', 'norm', 'ffn', 'norm'))), - decoder=dict( - type='DinoTransformerDecoder', - num_layers=6, - return_intermediate=True, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=256, - num_heads=8, - dropout=0.0), - dict( - type='MultiScaleDeformableAttention', - embed_dims=256, - num_levels=5, - dropout=0.0), - ], - feedforward_channels=2048, - ffn_dropout=0.0, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), - positional_encoding=dict( - type='SinePositionalEncoding', - num_feats=128, - temperature=20, - normalize=True), - loss_cls=dict( - type='QualityFocalLoss', - use_sigmoid=True, - beta=2.0, - loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=5.0), - loss_iou=dict(type='GIoULoss', loss_weight=2.0)), - roi_head=[dict( - type='CoStandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32, 64], - finest_scale=56), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=1203, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - reg_decoded_bbox=True, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='GIoULoss', loss_weight=10.0*num_dec_layer*lambda_2)))], - bbox_head=[dict( - type='CoATSSHead', - num_classes=1203, - in_channels=256, - stacked_convs=1, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[4, 8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.1, 0.1, 0.2, 0.2]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0*num_dec_layer*lambda_2), - loss_bbox=dict(type='GIoULoss', loss_weight=2.0*num_dec_layer*lambda_2), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0*num_dec_layer*lambda_2)),], - # model training and testing settings - train_cfg=[ - dict( - assigner=dict( - type='HungarianAssigner', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), - dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=4000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False),], - test_cfg=[ - dict( - max_per_img=300, - nms=dict(type='soft_nms', iou_threshold=0.8) - ), - dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)), - dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.0, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100), - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - ]) -#find_unused_parameters = True -#fp16 = dict(loss_scale=dict(init_scale=512)) -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different -# from the default setting in mmdet. -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[ - [ - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - # The radio of all image in train dataset < 7 - # follow the original impl - img_scale=[(400, 4200), (500, 4200), (600, 4200)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 1333), (512, 1333), (544, 1333), - (576, 1333), (608, 1333), (640, 1333), - (672, 1333), (704, 1333), (736, 1333), - (768, 1333), (800, 1333)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -# test_pipeline, NOTE the Pad's size_divisor is different from the default -# setting (size_divisor=32). While there is little effect on the performance -# whether we use the default setting or use size_divisor=1. -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=1), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict(filter_empty_gt=False, pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict( - type='AdamW', - lr=2e-4, - weight_decay=0.0001, - # custom_keys of sampling_offsets and reference_points in DeformDETR - paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})) - -optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) - -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (8 GPUs) x (2 samples per GPU) -auto_scale_lr = dict(base_batch_size=16) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_16e_o365tococo.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_16e_o365tococo.py deleted file mode 100644 index 5f9819593e1829cc5eac6255e797b3c26e7359ca..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_16e_o365tococo.py +++ /dev/null @@ -1,128 +0,0 @@ -_base_ = [ - 'co_dino_5scale_r50_1x_coco.py' -] - -load_from = 'models/co_dino_5scale_swin_large_22e_o365.pth' -pretrained = None -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - out_indices=(0, 1, 2, 3), - window_size=12, - ape=False, - drop_path_rate=0.3, - patch_norm=True, - use_checkpoint=True, - pretrained=pretrained), - neck=dict(in_channels=[192, 192*2, 192*4, 192*8]), - query_head=dict( - dn_cfg=dict( - type='CdnQueryGenerator', - noise_scale=dict(label=0.5, box=0.4), - group_cfg=dict(dynamic=True, num_groups=None, num_dn_queries=500)), - transformer=dict( - encoder=dict( - # number of layers that use checkpoint - with_cp=6)))) - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='AutoAugment', - policies=[ - [ - dict( - type='Resize', - img_scale=[(480, 2048), (512, 2048), (544, 2048), (576, 2048), - (608, 2048), (640, 2048), (672, 2048), (704, 2048), - (736, 2048), (768, 2048), (800, 2048), (832, 2048), - (864, 2048), (896, 2048), (928, 2048), (960, 2048), - (992, 2048), (1024, 2048), (1056, 2048), (1088, 2048), - (1120, 2048), (1152, 2048), (1184, 2048), (1216, 2048), - (1248, 2048), (1280, 2048), (1312, 2048), (1344, 2048), - (1376, 2048), (1408, 2048), (1440, 2048), (1472, 2048), - (1504, 2048), (1536, 2048)], - multiscale_mode='value', - keep_ratio=True) - ], - [ - dict( - type='Resize', - # The radio of all image in train dataset < 7 - # follow the original impl - img_scale=[(400, 4200), (500, 4200), (600, 4200)], - multiscale_mode='value', - keep_ratio=True), - dict( - type='RandomCrop', - crop_type='absolute_range', - crop_size=(384, 600), - allow_negative_crop=True), - dict( - type='Resize', - img_scale=[(480, 2048), (512, 2048), (544, 2048), (576, 2048), - (608, 2048), (640, 2048), (672, 2048), (704, 2048), - (736, 2048), (768, 2048), (800, 2048), (832, 2048), - (864, 2048), (896, 2048), (928, 2048), (960, 2048), - (992, 2048), (1024, 2048), (1056, 2048), (1088, 2048), - (1120, 2048), (1152, 2048), (1184, 2048), (1216, 2048), - (1248, 2048), (1280, 2048), (1312, 2048), (1344, 2048), - (1376, 2048), (1408, 2048), (1440, 2048), (1472, 2048), - (1504, 2048), (1536, 2048)], - multiscale_mode='value', - override=True, - keep_ratio=True) - ] - ]), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 1280), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=1, - workers_per_gpu=1, - train=dict(filter_empty_gt=False, pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -# optimizer -optimizer = dict( - type='AdamW', - lr=1e-4, - weight_decay=0.0001, - # custom_keys of sampling_offsets and reference_points in DeformDETR - paramwise_cfg=dict(custom_keys={'backbone': dict(lr_mult=0.1)})) - -optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) -# learning policy -lr_config = dict(policy='step', step=[8]) -runner = dict(type='EpochBasedRunner', max_epochs=16) -# NOTE: `auto_scale_lr` is for automatically scaling LR, -# USER SHOULD NOT CHANGE ITS VALUES. -# base_batch_size = (16 GPUs) x (1 samples per GPU) -auto_scale_lr = dict(base_batch_size=16) diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_1x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_1x_coco.py deleted file mode 100644 index 23c257dfb2d0ddb892d98f87d5d34808498b6318..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_1x_coco.py +++ /dev/null @@ -1,29 +0,0 @@ -_base_ = [ - 'co_dino_5scale_r50_1x_coco.py' -] -pretrained = 'models/swin_large_patch4_window12_384_22k.pth' -# model settings -model = dict( - backbone=dict( - _delete_=True, - type='SwinTransformerV1', - embed_dim=192, - depths=[2, 2, 18, 2], - num_heads=[6, 12, 24, 48], - out_indices=(0, 1, 2, 3), - window_size=12, - ape=False, - drop_path_rate=0.3, - patch_norm=True, - use_checkpoint=False, - pretrained=pretrained), - neck=dict(in_channels=[192, 192*2, 192*4, 192*8]), - query_head=dict( - transformer=dict( - encoder=dict( - # number of layers that use checkpoint - with_cp=6)))) - -data = dict( - samples_per_gpu=1, - workers_per_gpu=1) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_2x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_2x_coco.py deleted file mode 100644 index 3e93268bd47627c70b329bd5052bb76b2f20b154..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_2x_coco.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - 'co_dino_5scale_swin_large_1x_coco.py' -] -# model settings -model = dict( - backbone=dict(drop_path_rate=0.5)) - -lr_config = dict(policy='step', step=[20]) -runner = dict(type='EpochBasedRunner', max_epochs=24) \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_3x_coco.py b/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_3x_coco.py deleted file mode 100644 index 8bc064ef4ceee19522c20bafdf797bcecc4f4f3f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/configs/co_dino/co_dino_5scale_swin_large_3x_coco.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - 'co_dino_5scale_swin_large_1x_coco.py' -] -# model settings -model = dict( - backbone=dict(drop_path_rate=0.6)) - -lr_config = dict(policy='step', step=[30]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/cv/detection/co-detr/pytorch/projects/models/__init__.py b/cv/detection/co-detr/pytorch/projects/models/__init__.py deleted file mode 100644 index 9e2d468c9bf4fac2595d91601bc961b745176a75..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .co_detr import * -from .co_deformable_detr_head import * -from .co_dino_head import * -from .co_atss_head import * -from .co_roi_head import * -from .transformer import * -from .query_denoising import build_dn_generator -from .swin_transformer import * diff --git a/cv/detection/co-detr/pytorch/projects/models/co_atss_head.py b/cv/detection/co-detr/pytorch/projects/models/co_atss_head.py deleted file mode 100644 index a49bc311483c79b9aab27492dbefa662de09c435..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/co_atss_head.py +++ /dev/null @@ -1,538 +0,0 @@ -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, Scale -from mmcv.runner import force_fp32 - -from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler, - images_to_levels, multi_apply, reduce_mean, unmap) -from mmdet.models.builder import HEADS, build_loss -from mmdet.models.dense_heads.anchor_head import AnchorHead - - -@HEADS.register_module() -class CoATSSHead(AnchorHead): - """Bridging the Gap Between Anchor-based and Anchor-free Detection via - Adaptive Training Sample Selection. - - ATSS head structure is similar with FCOS, however ATSS use anchor boxes - and assign label by Adaptive Training Sample Selection instead max-iou. - - https://arxiv.org/abs/1912.02424 - """ - - def __init__(self, - num_classes, - in_channels, - stacked_convs=4, - conv_cfg=None, - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - reg_decoded_bbox=True, - loss_centerness=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - init_cfg=dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='atss_cls', - std=0.01, - bias_prob=0.01)), - **kwargs): - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - super(CoATSSHead, self).__init__( - num_classes, - in_channels, - reg_decoded_bbox=reg_decoded_bbox, - init_cfg=init_cfg, - **kwargs) - - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # SSD sampling=False so use PseudoSampler - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.loss_centerness = build_loss(loss_centerness) - - - def _init_layers(self): - """Initialize layers of the head.""" - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.atss_cls = nn.Conv2d( - self.feat_channels, - self.num_anchors * self.cls_out_channels, - 3, - padding=1) - self.atss_reg = nn.Conv2d( - self.feat_channels, self.num_base_priors * 4, 3, padding=1) - self.atss_centerness = nn.Conv2d( - self.feat_channels, self.num_base_priors * 1, 3, padding=1) - self.scales = nn.ModuleList( - [Scale(1.0) for _ in self.prior_generator.strides]) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually a tuple of classification scores and bbox prediction - cls_scores (list[Tensor]): Classification scores for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale - levels, each is a 4D-tensor, the channels number is - num_anchors * 4. - """ - return multi_apply(self.forward_single, feats, self.scales) - - def forward_single(self, x, scale): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level - the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale - level, the channels number is num_anchors * 4. - centerness (Tensor): Centerness for a single scale level, the - channel number is (N, num_anchors * 1, H, W). - """ - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.atss_cls(cls_feat) - # we just follow atss, not apply exp in bbox_pred - bbox_pred = scale(self.atss_reg(reg_feat)).float() - centerness = self.atss_centerness(reg_feat) - return cls_score, bbox_pred, centerness - - def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels, - label_weights, bbox_targets, img_metas, num_total_samples): - """Compute loss of a single scale level. - - Args: - cls_score (Tensor): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W). - bbox_pred (Tensor): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W). - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - bbox_targets (Tensor): BBox regression targets of each anchor - weight shape (N, num_total_anchors, 4). - num_total_samples (int): Number os positive samples that is - reduced over all GPUs. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - anchors = anchors.reshape(-1, 4) - cls_score = cls_score.permute(0, 2, 3, 1).reshape( - -1, self.cls_out_channels).contiguous() - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - centerness = centerness.permute(0, 2, 3, 1).reshape(-1) - bbox_targets = bbox_targets.reshape(-1, 4) - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - - # classification loss - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - - if len(pos_inds) > 0: - pos_bbox_targets = bbox_targets[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_anchors = anchors[pos_inds] - pos_centerness = centerness[pos_inds] - - centerness_targets = self.centerness_target( - pos_anchors, pos_bbox_targets) - pos_decode_bbox_pred = self.bbox_coder.decode( - pos_anchors, pos_bbox_pred) - - # regression loss - loss_bbox = self.loss_bbox( - pos_decode_bbox_pred, - pos_bbox_targets, - weight=centerness_targets, - avg_factor=1.0) - - # centerness loss - loss_centerness = self.loss_centerness( - pos_centerness, - centerness_targets, - avg_factor=num_total_samples) - - else: - loss_bbox = bbox_pred.sum() * 0 - loss_centerness = centerness.sum() * 0 - centerness_targets = bbox_targets.new_tensor(0.) - - return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) - def loss(self, - cls_scores, - bbox_preds, - centernesses, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - centernesses (list[Tensor]): Centerness for each scale - level with shape (N, num_anchors * 1, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg, - ori_anchors, ori_labels, ori_bbox_targets) = cls_reg_targets - num_total_samples = reduce_mean( - torch.tensor(num_total_pos, dtype=torch.float, - device=device)).item() - num_total_samples = max(num_total_samples, 1.0) - new_img_metas = [img_metas for _ in range(len(anchor_list))] - losses_cls, losses_bbox, loss_centerness,\ - bbox_avg_factor = multi_apply( - self.loss_single, - anchor_list, - cls_scores, - bbox_preds, - centernesses, - labels_list, - label_weights_list, - bbox_targets_list, - new_img_metas, - num_total_samples=num_total_samples) - - bbox_avg_factor = sum(bbox_avg_factor) - bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() - losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox)) - - pos_coords = (ori_anchors, ori_labels, ori_bbox_targets, 'atss') - return dict( - loss_cls=losses_cls, - loss_bbox=losses_bbox, - loss_centerness=loss_centerness, - pos_coords=pos_coords) - - def centerness_target(self, anchors, gts): - # only calculate pos centerness targets, otherwise there may be nan - anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 - anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 - l_ = anchors_cx - gts[:, 0] - t_ = anchors_cy - gts[:, 1] - r_ = gts[:, 2] - anchors_cx - b_ = gts[:, 3] - anchors_cy - - left_right = torch.stack([l_, r_], dim=1) - top_bottom = torch.stack([t_, b_], dim=1) - centerness = torch.sqrt( - (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * - (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) - assert not torch.isnan(centerness).any() - return centerness - - def get_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True): - """Get targets for ATSS head. - - This method is almost the same as `AnchorHead.get_targets()`. Besides - returning the targets as the parent method does, it also returns the - anchors as the first element of the returned tuple. - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - num_level_anchors_list = [num_level_anchors] * num_imgs - - # concat all level anchors and flags to a single tensor - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - anchor_list[i] = torch.cat(anchor_list[i]) - valid_flag_list[i] = torch.cat(valid_flag_list[i]) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - (all_anchors, all_labels, all_label_weights, all_bbox_targets, - all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, - anchor_list, - valid_flag_list, - num_level_anchors_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - ori_anchors = all_anchors - ori_labels = all_labels - ori_bbox_targets = all_bbox_targets - anchors_list = images_to_levels(all_anchors, num_level_anchors) - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors) - return (anchors_list, labels_list, label_weights_list, - bbox_targets_list, bbox_weights_list, num_total_pos, - num_total_neg, ori_anchors, ori_labels, ori_bbox_targets) - - def _get_target_single(self, - flat_anchors, - valid_flags, - num_level_anchors, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression, classification targets for anchors in a single - image. - - Args: - flat_anchors (Tensor): Multi-level anchors of the image, which are - concatenated into a single tensor of shape (num_anchors ,4) - valid_flags (Tensor): Multi level valid flags of the image, - which are concatenated into a single tensor of - shape (num_anchors,). - num_level_anchors Tensor): Number of anchors of each scale level. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - img_meta (dict): Meta info of the image. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: N is the number of total anchors in the image. - labels (Tensor): Labels of all anchors in the image with shape - (N,). - label_weights (Tensor): Label weights of all anchor in the - image with shape (N,). - bbox_targets (Tensor): BBox targets of all anchors in the - image with shape (N, 4). - bbox_weights (Tensor): BBox weights of all anchors in the - image with shape (N, 4) - pos_inds (Tensor): Indices of positive anchor with shape - (num_pos,). - neg_inds (Tensor): Indices of negative anchor with shape - (num_neg,). - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 7 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - - num_level_anchors_inside = self.get_num_level_anchors_inside( - num_level_anchors, inside_flags) - assign_result = self.assigner.assign(anchors, num_level_anchors_inside, - gt_bboxes, gt_bboxes_ignore, - gt_labels) - - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if self.reg_decoded_bbox: - pos_bbox_targets = sampling_result.pos_gt_bboxes - else: - pos_bbox_targets = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class since v2.5.0 - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - anchors = unmap(anchors, num_total_anchors, inside_flags) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - - return (anchors, labels, label_weights, bbox_targets, bbox_weights, - pos_inds, neg_inds) - - def get_num_level_anchors_inside(self, num_level_anchors, inside_flags): - split_inside_flags = torch.split(inside_flags, num_level_anchors) - num_level_anchors_inside = [ - int(flags.sum()) for flags in split_inside_flags - ] - return num_level_anchors_inside - - def forward_train(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None, - **kwargs): - """ - Args: - x (list[Tensor]): Features from FPN. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - proposal_cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used - - Returns: - tuple: - losses: (dict[str, Tensor]): A dictionary of loss components. - proposal_list (list[Tensor]): Proposals of each image. - """ - outs = self(x) - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, img_metas) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) - losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - if proposal_cfg is None: - return losses - else: - proposal_list = self.get_bboxes( - *outs, img_metas=img_metas, cfg=proposal_cfg) - return losses, proposal_list diff --git a/cv/detection/co-detr/pytorch/projects/models/co_deformable_detr_head.py b/cv/detection/co-detr/pytorch/projects/models/co_deformable_detr_head.py deleted file mode 100644 index bbc201b19bed729e75d3a1e833eb2a57b0cd9ac6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/co_deformable_detr_head.py +++ /dev/null @@ -1,1101 +0,0 @@ -import copy -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Linear, bias_init_with_prob, constant_init -from mmcv.runner import force_fp32 -from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, - build_assigner, build_sampler, multi_apply, - reduce_mean, bbox_overlaps) -from mmdet.models.utils.transformer import inverse_sigmoid -from mmdet.models.builder import HEADS -from mmdet.models.dense_heads.detr_head import DETRHead - -import sys -import numpy as np -from mmcv.ops import batched_nms -from mmdet.core import bbox_mapping_back, merge_aug_proposals -if sys.version_info >= (3, 7): - from mmdet.utils.contextmanagers import completed - - -@HEADS.register_module() -class CoDeformDETRHead(DETRHead): - def __init__(self, - *args, - max_pos_coords=300, - lambda_1=1, - with_box_refine=False, - as_two_stage=False, - mixed_selection=False, - transformer=None, - use_zero_padding=False, - **kwargs): - self.max_pos_coords = max_pos_coords - self.lambda_1 = lambda_1 - self.with_box_refine = with_box_refine - self.as_two_stage = as_two_stage - self.mixed_selection = mixed_selection - self.use_zero_padding = use_zero_padding - if self.as_two_stage: - transformer['as_two_stage'] = self.as_two_stage - if self.mixed_selection: - transformer['mixed_selection'] = self.mixed_selection - super(CoDeformDETRHead, self).__init__( - *args, transformer=transformer, **kwargs) - - - def _init_layers(self): - """Initialize classification branch and regression branch of head.""" - self.downsample = nn.Sequential( - nn.Conv2d(self.embed_dims, self.embed_dims, kernel_size=3, stride=2, padding=1), - nn.GroupNorm(32, self.embed_dims) - ) - fc_cls = Linear(self.embed_dims, self.cls_out_channels) - reg_branch = [] - for _ in range(self.num_reg_fcs): - reg_branch.append(Linear(self.embed_dims, self.embed_dims)) - reg_branch.append(nn.ReLU()) - reg_branch.append(Linear(self.embed_dims, 4)) - reg_branch = nn.Sequential(*reg_branch) - - def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - # last reg_branch is used to generate proposal from - # encode feature map when as_two_stage is True. - num_pred = (self.transformer.decoder.num_layers + 1) if \ - self.as_two_stage else self.transformer.decoder.num_layers - - if self.with_box_refine: - self.cls_branches = _get_clones(fc_cls, num_pred) - self.reg_branches = _get_clones(reg_branch, num_pred) - else: - self.cls_branches = nn.ModuleList( - [fc_cls for _ in range(num_pred)]) - self.reg_branches = nn.ModuleList( - [reg_branch for _ in range(num_pred)]) - - if not self.as_two_stage: - self.query_embedding = nn.Embedding(self.num_query, - self.embed_dims * 2) - elif self.mixed_selection: - self.query_embedding = nn.Embedding(self.num_query, - self.embed_dims) - - def init_weights(self): - """Initialize weights of the DeformDETR head.""" - self.transformer.init_weights() - if self.loss_cls.use_sigmoid: - bias_init = bias_init_with_prob(0.01) - for m in self.cls_branches: - nn.init.constant_(m.bias, bias_init) - for m in self.reg_branches: - constant_init(m[-1], 0, bias=0) - nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0) - if self.as_two_stage: - for m in self.reg_branches: - nn.init.constant_(m[-1].bias.data[2:], 0.0) - - def forward(self, mlvl_feats, img_metas): - """Forward function. - - Args: - mlvl_feats (tuple[Tensor]): Features from the upstream - network, each is a 4D-tensor with shape - (N, C, H, W). - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, \ - shape [nb_dec, bs, num_query, cls_out_channels]. Note \ - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression \ - head with normalized coordinate format (cx, cy, w, h). \ - Shape [nb_dec, bs, num_query, 4]. - enc_outputs_class (Tensor): The score of each point on encode \ - feature map, has shape (N, h*w, num_class). Only when \ - as_two_stage is True it would be returned, otherwise \ - `None` would be returned. - enc_outputs_coord (Tensor): The proposal generate from the \ - encode feature map, has shape (N, h*w, 4). Only when \ - as_two_stage is True it would be returned, otherwise \ - `None` would be returned. - """ - batch_size = mlvl_feats[0].size(0) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - img_masks = mlvl_feats[0].new_ones( - (batch_size, input_img_h, input_img_w)) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]['img_shape'] - img_masks[img_id, :img_h, :img_w] = 0 - - mlvl_masks = [] - mlvl_positional_encodings = [] - for feat in mlvl_feats: - mlvl_masks.append( - F.interpolate(img_masks[None], - size=feat.shape[-2:]).to(torch.bool).squeeze(0)) - mlvl_positional_encodings.append( - self.positional_encoding(mlvl_masks[-1])) - - query_embeds = None - if not self.as_two_stage or self.mixed_selection: - query_embeds = self.query_embedding.weight - - hs, init_reference, inter_references, \ - enc_outputs_class, enc_outputs_coord, enc_outputs = self.transformer( - mlvl_feats, - mlvl_masks, - query_embeds, - mlvl_positional_encodings, - reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 - cls_branches=self.cls_branches if self.as_two_stage else None, # noqa:E501 - return_encoder_output=True - ) - - outs = [] - num_level = len(mlvl_feats) - start = 0 - for lvl in range(num_level): - bs, c, h, w = mlvl_feats[lvl].shape - end = start + h*w - feat = enc_outputs[start:end].permute(1, 2, 0).contiguous() - start = end - outs.append(feat.reshape(bs, c, h, w)) - outs.append(self.downsample(outs[-1])) - - hs = hs.permute(0, 2, 1, 3) - outputs_classes = [] - outputs_coords = [] - - for lvl in range(hs.shape[0]): - if lvl == 0: - reference = init_reference - else: - reference = inter_references[lvl - 1] - reference = inverse_sigmoid(reference) - outputs_class = self.cls_branches[lvl](hs[lvl]) - tmp = self.reg_branches[lvl](hs[lvl]) - if reference.shape[-1] == 4: - tmp += reference - else: - assert reference.shape[-1] == 2 - tmp[..., :2] += reference - outputs_coord = tmp.sigmoid() - outputs_classes.append(outputs_class) - outputs_coords.append(outputs_coord) - - outputs_classes = torch.stack(outputs_classes) - outputs_coords = torch.stack(outputs_coords) - - if self.as_two_stage: - return outputs_classes, outputs_coords, \ - enc_outputs_class, \ - enc_outputs_coord.sigmoid(), outs - else: - return outputs_classes, outputs_coords, \ - None, None, outs - - def forward_aux(self, mlvl_feats, img_metas, aux_targets, head_idx): - """Forward function. - - Args: - mlvl_feats (tuple[Tensor]): Features from the upstream - network, each is a 4D-tensor with shape - (N, C, H, W). - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, \ - shape [nb_dec, bs, num_query, cls_out_channels]. Note \ - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression \ - head with normalized coordinate format (cx, cy, w, h). \ - Shape [nb_dec, bs, num_query, 4]. - enc_outputs_class (Tensor): The score of each point on encode \ - feature map, has shape (N, h*w, num_class). Only when \ - as_two_stage is True it would be returned, otherwise \ - `None` would be returned. - enc_outputs_coord (Tensor): The proposal generate from the \ - encode feature map, has shape (N, h*w, 4). Only when \ - as_two_stage is True it would be returned, otherwise \ - `None` would be returned. - """ - aux_coords, aux_labels, aux_targets, aux_label_weights, aux_bbox_weights, aux_feats, attn_masks = aux_targets - batch_size = mlvl_feats[0].size(0) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - img_masks = mlvl_feats[0].new_ones( - (batch_size, input_img_h, input_img_w)) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]['img_shape'] - img_masks[img_id, :img_h, :img_w] = 0 - - mlvl_masks = [] - mlvl_positional_encodings = [] - for feat in mlvl_feats: - mlvl_masks.append( - F.interpolate(img_masks[None], - size=feat.shape[-2:]).to(torch.bool).squeeze(0)) - mlvl_positional_encodings.append( - self.positional_encoding(mlvl_masks[-1])) - - query_embeds = None - hs, init_reference, inter_references = self.transformer.forward_aux( - mlvl_feats, - mlvl_masks, - query_embeds, - mlvl_positional_encodings, - aux_coords, - pos_feats=aux_feats, - reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 - cls_branches=self.cls_branches if self.as_two_stage else None, # noqa:E501 - return_encoder_output=True, - attn_masks=attn_masks, - head_idx=head_idx - ) - - hs = hs.permute(0, 2, 1, 3) - outputs_classes = [] - outputs_coords = [] - - for lvl in range(hs.shape[0]): - if lvl == 0: - reference = init_reference - else: - reference = inter_references[lvl - 1] - reference = inverse_sigmoid(reference) - outputs_class = self.cls_branches[lvl](hs[lvl]) - tmp = self.reg_branches[lvl](hs[lvl]) - if reference.shape[-1] == 4: - tmp += reference - else: - assert reference.shape[-1] == 2 - tmp[..., :2] += reference - outputs_coord = tmp.sigmoid() - outputs_classes.append(outputs_class) - outputs_coords.append(outputs_coord) - - outputs_classes = torch.stack(outputs_classes) - outputs_coords = torch.stack(outputs_coords) - - return outputs_classes, outputs_coords, \ - None, None - - def loss_single_aux(self, - cls_scores, - bbox_preds, - labels, - label_weights, - bbox_targets, - bbox_weights, - img_metas, - gt_bboxes_ignore_list=None): - """"Loss function for outputs from a single decoder layer of a single - feature level. - - Args: - cls_scores (Tensor): Box score logits from a single decoder layer - for all images. Shape [bs, num_query, cls_out_channels]. - bbox_preds (Tensor): Sigmoid outputs from a single decoder layer - for all images, with normalized coordinate (cx, cy, w, h) and - shape [bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components for outputs from - a single decoder layer. - """ - num_imgs = cls_scores.size(0) - num_q = cls_scores.size(1) - try: - labels = labels.reshape(num_imgs * num_q) - label_weights = label_weights.reshape(num_imgs * num_q) - bbox_targets = bbox_targets.reshape(num_imgs * num_q, 4) - bbox_weights = bbox_weights.reshape(num_imgs * num_q, 4) - except: - return cls_scores.mean()*0, cls_scores.mean()*0, cls_scores.mean()*0 - - bg_class_ind = self.num_classes - num_total_pos = len(((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1)) - num_total_neg = num_imgs*num_q - num_total_pos - - # classification loss - cls_scores = cls_scores.reshape(-1, self.cls_out_channels) - # construct weighted avg_factor to match with the official DETR repo - cls_avg_factor = num_total_pos * 1.0 + \ - num_total_neg * self.bg_cls_weight - if self.sync_cls_avg_factor: - cls_avg_factor = reduce_mean( - cls_scores.new_tensor([cls_avg_factor])) - cls_avg_factor = max(cls_avg_factor, 1) - loss_cls = self.loss_cls( - cls_scores, labels, label_weights, avg_factor=cls_avg_factor) - - # Compute the average number of gt boxes across all gpus, for - # normalization purposes - num_total_pos = loss_cls.new_tensor([num_total_pos]) - num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() - - # construct factors used for rescale bboxes - factors = [] - for img_meta, bbox_pred in zip(img_metas, bbox_preds): - img_h, img_w, _ = img_meta['img_shape'] - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0).repeat( - bbox_pred.size(0), 1) - factors.append(factor) - factors = torch.cat(factors, 0) - - # DETR regress the relative position of boxes (cxcywh) in the image, - # thus the learning target is normalized by the image size. So here - # we need to re-scale them for calculating IoU loss - bbox_preds = bbox_preds.reshape(-1, 4) - bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors - bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors - - # regression IoU loss, defaultly GIoU loss - loss_iou = self.loss_iou( - bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) - - # regression L1 loss - loss_bbox = self.loss_bbox( - bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) - return loss_cls*self.lambda_1, loss_bbox*self.lambda_1, loss_iou*self.lambda_1 - - def get_aux_targets(self, pos_coords, img_metas, mlvl_feats, head_idx): - coords, labels, targets = pos_coords[:3] - head_name = pos_coords[-1] - bs, c = len(coords), mlvl_feats[0].shape[1] - max_num_coords = 0 - all_feats = [] - for i in range(bs): - label = labels[i] - feats = [feat[i].reshape(c, -1).transpose(1, 0) for feat in mlvl_feats] - feats = torch.cat(feats, dim=0) - bg_class_ind = self.num_classes - pos_inds = ((label >= 0) - & (label < bg_class_ind)).nonzero().squeeze(1) - max_num_coords = max(max_num_coords, len(pos_inds)) - all_feats.append(feats) - max_num_coords = min(self.max_pos_coords, max_num_coords) - max_num_coords = max(9, max_num_coords) - - if self.use_zero_padding: - attn_masks = [] - label_weights = coords[0].new_zeros([bs, max_num_coords]) - else: - attn_masks = None - label_weights = coords[0].new_ones([bs, max_num_coords]) - bbox_weights = coords[0].new_zeros([bs, max_num_coords, 4]) - - aux_coords, aux_labels, aux_targets, aux_feats = [], [], [], [] - - for i in range(bs): - coord, label, target = coords[i], labels[i], targets[i] - feats = all_feats[i] - if 'rcnn' in head_name: - feats = pos_coords[-2][i] - num_coords_per_point = 1 - else: - num_coords_per_point = coord.shape[0] // feats.shape[0] - feats = feats.unsqueeze(1).repeat(1, num_coords_per_point, 1) - feats = feats.reshape(feats.shape[0]*num_coords_per_point, feats.shape[-1]) - img_meta = img_metas[i] - img_h, img_w, _ = img_meta['img_shape'] - factor = coord.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0) - bg_class_ind = self.num_classes - pos_inds = ((label >= 0) - & (label < bg_class_ind)).nonzero().squeeze(1) - neg_inds = ((label == bg_class_ind)).nonzero().squeeze(1) - if pos_inds.shape[0] > max_num_coords: - indices = torch.randperm(pos_inds.shape[0])[:max_num_coords].cuda() - pos_inds = pos_inds[indices] - - coord = bbox_xyxy_to_cxcywh(coord[pos_inds] / factor) - label = label[pos_inds] - target = bbox_xyxy_to_cxcywh(target[pos_inds] / factor) - feat = feats[pos_inds] - - if self.use_zero_padding: - label_weights[i][:len(label)] = 1 - bbox_weights[i][:len(label)] = 1 - attn_mask = torch.zeros([max_num_coords, max_num_coords,]).bool().to(coord.device) - else: - bbox_weights[i][:len(label)] = 1 - - if coord.shape[0] < max_num_coords: - padding_shape = max_num_coords-coord.shape[0] - if self.use_zero_padding: - padding_coord = coord.new_zeros([padding_shape, 4]) - padding_label = label.new_ones([padding_shape]) * self.num_classes - padding_target = target.new_zeros([padding_shape, 4]) - padding_feat = feat.new_zeros([padding_shape, c]) - attn_mask[coord.shape[0] :, 0 : coord.shape[0],] = True - attn_mask[:, coord.shape[0] :,] = True - else: - indices = torch.randperm(neg_inds.shape[0])[:padding_shape].cuda() - neg_inds = neg_inds[indices] - padding_coord = bbox_xyxy_to_cxcywh(coords[i][neg_inds] / factor) - padding_label = labels[i][neg_inds] - padding_target = bbox_xyxy_to_cxcywh(targets[i][neg_inds] / factor) - padding_feat = feats[neg_inds] - coord = torch.cat((coord, padding_coord), dim=0) - label = torch.cat((label, padding_label), dim=0) - target = torch.cat((target, padding_target), dim=0) - feat = torch.cat((feat, padding_feat), dim=0) - if self.use_zero_padding: - attn_masks.append(attn_mask.unsqueeze(0)) - aux_coords.append(coord.unsqueeze(0)) - aux_labels.append(label.unsqueeze(0)) - aux_targets.append(target.unsqueeze(0)) - aux_feats.append(feat.unsqueeze(0)) - - if self.use_zero_padding: - attn_masks = torch.cat(attn_masks, dim=0).unsqueeze(1).repeat(1, 8, 1, 1) - attn_masks = attn_masks.reshape(bs*8, max_num_coords, max_num_coords) - else: - attn_mask = None - - aux_coords = torch.cat(aux_coords, dim=0) - aux_labels = torch.cat(aux_labels, dim=0) - aux_targets = torch.cat(aux_targets, dim=0) - aux_feats = torch.cat(aux_feats, dim=0) - aux_label_weights = label_weights - aux_bbox_weights = bbox_weights - return (aux_coords, aux_labels, aux_targets, aux_label_weights, aux_bbox_weights, aux_feats, attn_masks) - - # over-write because img_metas are needed as inputs for bbox_head. - def forward_train_aux(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - pos_coords=None, - head_idx=0, - **kwargs): - """Forward function for training mode. - - Args: - x (list[Tensor]): Features from backbone. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - proposal_cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - aux_targets = self.get_aux_targets(pos_coords, img_metas, x, head_idx) - outs = self.forward_aux(x[:-1], img_metas, aux_targets, head_idx) - outs = outs + aux_targets - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, img_metas) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) - losses = self.loss_aux(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - return losses - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def loss_aux(self, - all_cls_scores, - all_bbox_preds, - enc_cls_scores, - enc_bbox_preds, - aux_coords, - aux_labels, - aux_targets, - aux_label_weights, - aux_bbox_weights, - aux_feats, - attn_masks, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore=None): - """"Loss function. - - Args: - all_cls_scores (Tensor): Classification score of all - decoder layers, has shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds (Tensor): Sigmoid regression - outputs of all decode layers. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - enc_cls_scores (Tensor): Classification scores of - points on encode feature map , has shape - (N, h*w, num_classes). Only be passed when as_two_stage is - True, otherwise is None. - enc_bbox_preds (Tensor): Regression results of each points - on the encode feature map, has shape (N, h*w, 4). Only be - passed when as_two_stage is True, otherwise is None. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore (list[Tensor], optional): Bounding boxes - which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - # gt_bboxes_ignore = None - # assert gt_bboxes_ignore is None, \ - # f'{self.__class__.__name__} only supports ' \ - # f'for gt_bboxes_ignore setting to None.' - - num_dec_layers = len(all_cls_scores) - all_labels = [aux_labels for _ in range(num_dec_layers)] - all_label_weights = [aux_label_weights for _ in range(num_dec_layers)] - all_bbox_targets = [aux_targets for _ in range(num_dec_layers)] - all_bbox_weights = [aux_bbox_weights for _ in range(num_dec_layers)] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - all_gt_bboxes_ignore_list = [ - gt_bboxes_ignore for _ in range(num_dec_layers) - ] - - losses_cls, losses_bbox, losses_iou = multi_apply( - self.loss_single_aux, all_cls_scores, all_bbox_preds, - all_labels, all_label_weights, all_bbox_targets, - all_bbox_weights, img_metas_list, all_gt_bboxes_ignore_list) - - loss_dict = dict() - # loss of proposal generated from encode feature map. - - # loss from the last decoder layer - loss_dict['loss_cls_aux'] = losses_cls[-1] - loss_dict['loss_bbox_aux'] = losses_bbox[-1] - loss_dict['loss_iou_aux'] = losses_iou[-1] - # loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], - losses_bbox[:-1], - losses_iou[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls_aux'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_bbox_aux'] = loss_bbox_i - loss_dict[f'd{num_dec_layer}.loss_iou_aux'] = loss_iou_i - num_dec_layer += 1 - return loss_dict - - # over-write because img_metas are needed as inputs for bbox_head. - def forward_train(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None, - **kwargs): - """Forward function for training mode. - - Args: - x (list[Tensor]): Features from backbone. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - proposal_cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert proposal_cfg is None, '"proposal_cfg" must be None' - outs = self(x, img_metas) - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, img_metas) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) - losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - enc_outputs = outs[-1] - return losses, enc_outputs - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def loss(self, - all_cls_scores, - all_bbox_preds, - enc_cls_scores, - enc_bbox_preds, - enc_outputs, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore=None): - """"Loss function. - - Args: - all_cls_scores (Tensor): Classification score of all - decoder layers, has shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds (Tensor): Sigmoid regression - outputs of all decode layers. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - enc_cls_scores (Tensor): Classification scores of - points on encode feature map , has shape - (N, h*w, num_classes). Only be passed when as_two_stage is - True, otherwise is None. - enc_bbox_preds (Tensor): Regression results of each points - on the encode feature map, has shape (N, h*w, 4). Only be - passed when as_two_stage is True, otherwise is None. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore (list[Tensor], optional): Bounding boxes - which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - # gt_bboxes_ignore = None - # assert gt_bboxes_ignore is None, \ - # f'{self.__class__.__name__} only supports ' \ - # f'for gt_bboxes_ignore setting to None.' - - num_dec_layers = len(all_cls_scores) - all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - all_gt_bboxes_ignore_list = [ - gt_bboxes_ignore for _ in range(num_dec_layers) - ] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - - losses_cls, losses_bbox, losses_iou = multi_apply( - self.loss_single, all_cls_scores, all_bbox_preds, - all_gt_bboxes_list, all_gt_labels_list, img_metas_list, - all_gt_bboxes_ignore_list) - - loss_dict = dict() - # loss of proposal generated from encode feature map. - if enc_cls_scores is not None: - binary_labels_list = [ - torch.zeros_like(gt_labels_list[i]) - for i in range(len(img_metas)) - ] - enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ - self.loss_single(enc_cls_scores, enc_bbox_preds, - gt_bboxes_list, binary_labels_list, - img_metas, gt_bboxes_ignore) - loss_dict['enc_loss_cls'] = enc_loss_cls - loss_dict['enc_loss_bbox'] = enc_losses_bbox - loss_dict['enc_loss_iou'] = enc_losses_iou - - # loss from the last decoder layer - loss_dict['loss_cls'] = losses_cls[-1] - loss_dict['loss_bbox'] = losses_bbox[-1] - loss_dict['loss_iou'] = losses_iou[-1] - # loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], - losses_bbox[:-1], - losses_iou[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i - loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i - num_dec_layer += 1 - return loss_dict - - @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) - def get_bboxes(self, - all_cls_scores, - all_bbox_preds, - enc_cls_scores, - enc_bbox_preds, - enc_outputs, - img_metas, - rescale=False, - with_nms=False): - """Transform network outputs for a batch into bbox predictions. - - Args: - all_cls_scores (Tensor): Classification score of all - decoder layers, has shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds (Tensor): Sigmoid regression - outputs of all decode layers. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - enc_cls_scores (Tensor): Classification scores of - points on encode feature map , has shape - (N, h*w, num_classes). Only be passed when as_two_stage is - True, otherwise is None. - enc_bbox_preds (Tensor): Regression results of each points - on the encode feature map, has shape (N, h*w, 4). Only be - passed when as_two_stage is True, otherwise is None. - img_metas (list[dict]): Meta information of each image. - rescale (bool, optional): If True, return boxes in original - image space. Default False. - - Returns: - list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ - The first item is an (n, 5) tensor, where the first 4 columns \ - are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ - 5-th column is a score between 0 and 1. The second item is a \ - (n,) tensor where each item is the predicted class label of \ - the corresponding box. - """ - cls_scores = all_cls_scores[-1] - bbox_preds = all_bbox_preds[-1] - result_list = [] - for img_id in range(len(img_metas)): - cls_score = cls_scores[img_id] - bbox_pred = bbox_preds[img_id] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - proposals = self._get_bboxes_single(cls_score, bbox_pred, - img_shape, scale_factor, - rescale, with_nms) - result_list.append(proposals) - return result_list - - - def loss_single(self, - cls_scores, - bbox_preds, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore_list=None): - """"Loss function for outputs from a single decoder layer of a single - feature level. - - Args: - cls_scores (Tensor): Box score logits from a single decoder layer - for all images. Shape [bs, num_query, cls_out_channels]. - bbox_preds (Tensor): Sigmoid outputs from a single decoder layer - for all images, with normalized coordinate (cx, cy, w, h) and - shape [bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components for outputs from - a single decoder layer. - """ - num_imgs = cls_scores.size(0) - cls_scores_list = [cls_scores[i] for i in range(num_imgs)] - bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] - cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, - gt_bboxes_list, gt_labels_list, - img_metas, gt_bboxes_ignore_list) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - labels = torch.cat(labels_list, 0) - label_weights = torch.cat(label_weights_list, 0) - bbox_targets = torch.cat(bbox_targets_list, 0) - bbox_weights = torch.cat(bbox_weights_list, 0) - - # classification loss - cls_scores = cls_scores.reshape(-1, self.cls_out_channels) - # construct weighted avg_factor to match with the official DETR repo - cls_avg_factor = num_total_pos * 1.0 + \ - num_total_neg * self.bg_cls_weight - if self.sync_cls_avg_factor: - cls_avg_factor = reduce_mean( - cls_scores.new_tensor([cls_avg_factor])) - cls_avg_factor = max(cls_avg_factor, 1) - loss_cls = self.loss_cls( - cls_scores, labels, label_weights, avg_factor=cls_avg_factor) - - # Compute the average number of gt boxes across all gpus, for - # normalization purposes - num_total_pos = loss_cls.new_tensor([num_total_pos]) - num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() - - # construct factors used for rescale bboxes - factors = [] - for img_meta, bbox_pred in zip(img_metas, bbox_preds): - img_h, img_w, _ = img_meta['img_shape'] - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0).repeat( - bbox_pred.size(0), 1) - factors.append(factor) - factors = torch.cat(factors, 0) - - # DETR regress the relative position of boxes (cxcywh) in the image, - # thus the learning target is normalized by the image size. So here - # we need to re-scale them for calculating IoU loss - bbox_preds = bbox_preds.reshape(-1, 4) - bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors - bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors - - # regression IoU loss, defaultly GIoU loss - loss_iou = self.loss_iou( - bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) - - # regression L1 loss - loss_bbox = self.loss_bbox( - bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) - return loss_cls, loss_bbox, loss_iou - - def get_targets(self, - cls_scores_list, - bbox_preds_list, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore_list=None): - """"Compute regression and classification targets for a batch image. - - Outputs from a single decoder layer of a single feature level are used. - - Args: - cls_scores_list (list[Tensor]): Box score logits from a single - decoder layer for each image with shape [num_query, - cls_out_channels]. - bbox_preds_list (list[Tensor]): Sigmoid outputs from a single - decoder layer for each image, with normalized coordinate - (cx, cy, w, h) and shape [num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - tuple: a tuple containing the following targets. - - - labels_list (list[Tensor]): Labels for all images. - - label_weights_list (list[Tensor]): Label weights for all \ - images. - - bbox_targets_list (list[Tensor]): BBox targets for all \ - images. - - bbox_weights_list (list[Tensor]): BBox weights for all \ - images. - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - """ - # assert gt_bboxes_ignore_list is None, \ - # 'Only supports for gt_bboxes_ignore setting to None.' - num_imgs = len(cls_scores_list) - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [ - gt_bboxes_ignore_list for _ in range(num_imgs) - ] - - (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, cls_scores_list, bbox_preds_list, - gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) - num_total_pos = sum((inds.numel() for inds in pos_inds_list)) - num_total_neg = sum((inds.numel() for inds in neg_inds_list)) - return (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - - def _get_target_single(self, - cls_score, - bbox_pred, - gt_bboxes, - gt_labels, - img_meta, - gt_bboxes_ignore=None): - """"Compute regression and classification targets for one image. - - Outputs from a single decoder layer of a single feature level are used. - - Args: - cls_score (Tensor): Box score logits from a single decoder layer - for one image. Shape [num_query, cls_out_channels]. - bbox_pred (Tensor): Sigmoid outputs from a single decoder layer - for one image, with normalized coordinate (cx, cy, w, h) and - shape [num_query, 4]. - gt_bboxes (Tensor): Ground truth bboxes for one image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (Tensor): Ground truth class indices for one image - with shape (num_gts, ). - img_meta (dict): Meta information for one image. - gt_bboxes_ignore (Tensor, optional): Bounding boxes - which can be ignored. Default None. - - Returns: - tuple[Tensor]: a tuple containing the following for one image. - - - labels (Tensor): Labels of each image. - - label_weights (Tensor]): Label weights of each image. - - bbox_targets (Tensor): BBox targets of each image. - - bbox_weights (Tensor): BBox weights of each image. - - pos_inds (Tensor): Sampled positive indices for each image. - - neg_inds (Tensor): Sampled negative indices for each image. - """ - - num_bboxes = bbox_pred.size(0) - ori_gt_bboxes_ignore = gt_bboxes_ignore - gt_bboxes_ignore = None - # assigner and sampler - assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, - gt_labels, img_meta, - gt_bboxes_ignore) - sampling_result = self.sampler.sample(assign_result, bbox_pred, - gt_bboxes) - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - - # label targets - labels = gt_bboxes.new_full((num_bboxes, ), - self.num_classes, - dtype=torch.long) - labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] - label_weights = gt_bboxes.new_ones(num_bboxes) - - # bbox targets - bbox_targets = torch.zeros_like(bbox_pred) - bbox_weights = torch.zeros_like(bbox_pred) - bbox_weights[pos_inds] = 1.0 - img_h, img_w, _ = img_meta['img_shape'] - - # DETR regress the relative position of boxes (cxcywh) in the image. - # Thus the learning target should be normalized by the image size, also - # the box format should be converted from defaultly x1y1x2y2 to cxcywh. - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0) - pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor - pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) - bbox_targets[pos_inds] = pos_gt_bboxes_targets - - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds) - - def _get_bboxes_single(self, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=False, - with_nms=False): - """Transform outputs from the last decoder layer into bbox predictions - for each image. - - Args: - cls_score (Tensor): Box score logits from the last decoder layer - for each image. Shape [num_query, cls_out_channels]. - bbox_pred (Tensor): Sigmoid outputs from the last decoder layer - for each image, with coordinate format (cx, cy, w, h) and - shape [num_query, 4]. - img_shape (tuple[int]): Shape of input image, (height, width, 3). - scale_factor (ndarray, optional): Scale factor of the image arange - as (w_scale, h_scale, w_scale, h_scale). - rescale (bool, optional): If True, return boxes in original image - space. Default False. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. - - - det_bboxes: Predicted bboxes with shape [num_query, 5], \ - where the first 4 columns are bounding box positions \ - (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \ - between 0 and 1. - - det_labels: Predicted labels of the corresponding box with \ - shape [num_query]. - """ - assert len(cls_score) == len(bbox_pred) - max_per_img = self.test_cfg.get('max_per_img', self.num_query) - score_thr = self.test_cfg.get('score_thr', 0) - if with_nms: - max_per_img = self.num_query - # exclude background - if self.loss_cls.use_sigmoid: - cls_score = cls_score.sigmoid() - scores, indexes = cls_score.view(-1).topk(max_per_img) - det_labels = indexes % self.num_classes - bbox_index = indexes // self.num_classes - bbox_pred = bbox_pred[bbox_index] - else: - scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) - scores, bbox_index = scores.topk(max_per_img) - bbox_pred = bbox_pred[bbox_index] - det_labels = det_labels[bbox_index] - - valid_mask = scores > score_thr - scores = scores[valid_mask] - bbox_pred = bbox_pred[valid_mask] - det_labels = det_labels[valid_mask] - - det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) - det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] - det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] - det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) - det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) - if rescale: - det_bboxes /= det_bboxes.new_tensor(scale_factor) - - if with_nms: - cfg = self.test_cfg - det_bboxes, keep_idxs = batched_nms(det_bboxes, scores, det_labels, cfg.nms) - det_bboxes = det_bboxes[:cfg.max_per_img] - det_labels = det_labels[keep_idxs][:cfg.max_per_img] - return det_bboxes, det_labels - - det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) - - return det_bboxes, det_labels - - def aug_test_bboxes(self, feats, img_metas, rescale=False): - """Test det bboxes with test-time augmentation. - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - Returns: - list[ndarray]: bbox results of each class - """ - raise ValueError('Not implemented') - - def simple_test_bboxes(self, feats, img_metas, rescale=False): - """Test det bboxes without test-time augmentation. - - Args: - feats (tuple[torch.Tensor]): Multi-level features from the - upstream network, each is a 4D-tensor. - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is ``bboxes`` with shape (n, 5), - where 5 represent (tl_x, tl_y, br_x, br_y, score). - The shape of the second tensor in the tuple is ``labels`` - with shape (n,) - """ - # forward of this head requires img_metas - with_nms = self.test_cfg.get('nms', None) - with_nms = True if with_nms is not None else False - outs = self.forward(feats, img_metas) - results_list = self.get_bboxes(*outs, img_metas, rescale=rescale, with_nms=with_nms) - return results_list \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/models/co_detr.py b/cv/detection/co-detr/pytorch/projects/models/co_detr.py deleted file mode 100644 index e55e406935b775fd1e94b1ff5442a1e1e6a62350..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/co_detr.py +++ /dev/null @@ -1,415 +0,0 @@ -import warnings - -import torch -import torch.nn as nn - -from mmdet.core import bbox2result -from mmdet.models.builder import DETECTORS, build_backbone, build_head, build_neck -from mmdet.models.detectors.base import BaseDetector - - -@DETECTORS.register_module() -class CoDETR(BaseDetector): - def __init__(self, - backbone, - neck=None, - query_head=None, - rpn_head=None, - roi_head=[None], - bbox_head=[None], - train_cfg=[None, None], - test_cfg=[None, None], - pretrained=[None, None], - init_cfg=None, - with_pos_coord=True, - with_attn_mask=True, - eval_module='detr', - eval_index=0): - super(CoDETR, self).__init__(init_cfg) - self.with_pos_coord = with_pos_coord - self.with_attn_mask = with_attn_mask - # Module for evaluation, ['detr', 'one-stage', 'two-stage'] - self.eval_module = eval_module - # Module index for evaluation - self.eval_index = eval_index - self.backbone = build_backbone(backbone) - - head_idx = 0 - - if neck is not None: - self.neck = build_neck(neck) - - if query_head is not None: - query_head.update(train_cfg=train_cfg[head_idx] if (train_cfg is not None and train_cfg[head_idx] is not None) else None) - query_head.update(test_cfg=test_cfg[head_idx]) - self.query_head = build_head(query_head) - self.query_head.init_weights() - head_idx += 1 - - if rpn_head is not None: - rpn_train_cfg = train_cfg[head_idx].rpn if (train_cfg is not None and train_cfg[head_idx] is not None) else None - rpn_head_ = rpn_head.copy() - rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg[head_idx].rpn) - self.rpn_head = build_head(rpn_head_) - self.rpn_head.init_weights() - - self.roi_head = nn.ModuleList() - for i in range(len(roi_head)): - if roi_head[i]: - rcnn_train_cfg = train_cfg[i+head_idx].rcnn if (train_cfg and train_cfg[i+head_idx] is not None) else None - roi_head[i].update(train_cfg=rcnn_train_cfg) - roi_head[i].update(test_cfg=test_cfg[i+head_idx].rcnn) - self.roi_head.append(build_head(roi_head[i])) - self.roi_head[-1].init_weights() - - self.bbox_head = nn.ModuleList() - for i in range(len(bbox_head)): - if bbox_head[i]: - bbox_head[i].update(train_cfg=train_cfg[i+head_idx+len(self.roi_head)] if (train_cfg and train_cfg[i+head_idx+len(self.roi_head)] is not None) else None) - bbox_head[i].update(test_cfg=test_cfg[i+head_idx+len(self.roi_head)]) - self.bbox_head.append(build_head(bbox_head[i])) - self.bbox_head[-1].init_weights() - - self.head_idx = head_idx - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - @property - def with_rpn(self): - """bool: whether the detector has RPN""" - return hasattr(self, 'rpn_head') and self.rpn_head is not None - - @property - def with_query_head(self): - """bool: whether the detector has a RoI head""" - return hasattr(self, 'query_head') and self.query_head is not None - - @property - def with_roi_head(self): - """bool: whether the detector has a RoI head""" - return hasattr(self, 'roi_head') and self.roi_head is not None and len(self.roi_head)>0 - - @property - def with_shared_head(self): - """bool: whether the detector has a shared head in the RoI Head""" - return hasattr(self, 'roi_head') and self.roi_head[0].with_shared_head - - @property - def with_bbox(self): - """bool: whether the detector has a bbox head""" - return ((hasattr(self, 'roi_head') and self.roi_head is not None and len(self.roi_head)>0) - or (hasattr(self, 'bbox_head') and self.bbox_head is not None and len(self.bbox_head)>0)) - - @property - def with_mask(self): - """bool: whether the detector has a mask head""" - return (hasattr(self, 'roi_head') and self.roi_head is not None and len(self.roi_head)>0 and self.roi_head[0].with_mask) - - def extract_feat(self, img, img_metas=None): - """Directly extract features from the backbone+neck.""" - x = self.backbone(img) - if self.with_neck: - x = self.neck(x) - return x - - # over-write `forward_dummy` because: - # the forward of bbox_head requires img_metas - def forward_dummy(self, img): - """Used for computing network flops. - - See `mmdetection/tools/analysis_tools/get_flops.py` - """ - warnings.warn('Warning! MultiheadAttention in DETR does not ' - 'support flops computation! Do not use the ' - 'results in your papers!') - - batch_size, _, height, width = img.shape - dummy_img_metas = [ - dict( - batch_input_shape=(height, width), - img_shape=(height, width, 3)) for _ in range(batch_size) - ] - x = self.extract_feat(img) - outs = self.query_head(x, dummy_img_metas) - return outs - - def forward_train(self, - img, - img_metas, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - proposals=None, - **kwargs): - """ - Args: - img (Tensor): of shape (N, C, H, W) encoding input images. - Typically these should be mean centered and std scaled. - - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - - gt_labels (list[Tensor]): class indices corresponding to each box - - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - proposals : override rpn proposals with custom proposals. Use when - `with_rpn` is False. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - batch_input_shape = tuple(img[0].size()[-2:]) - for img_meta in img_metas: - img_meta['batch_input_shape'] = batch_input_shape - - if not self.with_attn_mask: # remove attn mask for LSJ - for i in range(len(img_metas)): - input_img_h, input_img_w = img_metas[i]['batch_input_shape'] - img_metas[i]['img_shape'] = [input_img_h, input_img_w, 3] - - x = self.extract_feat(img, img_metas) - - losses = dict() - def upd_loss(losses, idx, weight=1): - new_losses = dict() - for k,v in losses.items(): - new_k = '{}{}'.format(k,idx) - if isinstance(v,list) or isinstance(v,tuple): - new_losses[new_k] = [i*weight for i in v] - else:new_losses[new_k] = v*weight - return new_losses - - # DETR encoder and decoder forward - if self.with_query_head: - bbox_losses, x = self.query_head.forward_train(x, img_metas, gt_bboxes, - gt_labels, gt_bboxes_ignore) - losses.update(bbox_losses) - - - # RPN forward and loss - if self.with_rpn: - proposal_cfg = self.train_cfg[self.head_idx].get('rpn_proposal', - self.test_cfg[self.head_idx].rpn) - rpn_losses, proposal_list = self.rpn_head.forward_train( - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=gt_bboxes_ignore, - proposal_cfg=proposal_cfg, - **kwargs) - losses.update(rpn_losses) - else: - proposal_list = proposals - - positive_coords = [] - for i in range(len(self.roi_head)): - roi_losses = self.roi_head[i].forward_train(x, img_metas, proposal_list, - gt_bboxes, gt_labels, - gt_bboxes_ignore, gt_masks, - **kwargs) - if self.with_pos_coord: - positive_coords.append(roi_losses.pop('pos_coords')) - else: - if 'pos_coords' in roi_losses.keys(): - tmp = roi_losses.pop('pos_coords') - roi_losses = upd_loss(roi_losses, idx=i) - losses.update(roi_losses) - - for i in range(len(self.bbox_head)): - bbox_losses = self.bbox_head[i].forward_train(x, img_metas, gt_bboxes, - gt_labels, gt_bboxes_ignore) - if self.with_pos_coord: - pos_coords = bbox_losses.pop('pos_coords') - positive_coords.append(pos_coords) - else: - if 'pos_coords' in bbox_losses.keys(): - tmp = bbox_losses.pop('pos_coords') - bbox_losses = upd_loss(bbox_losses, idx=i+len(self.roi_head)) - losses.update(bbox_losses) - - if self.with_pos_coord and len(positive_coords)>0: - for i in range(len(positive_coords)): - bbox_losses = self.query_head.forward_train_aux(x, img_metas, gt_bboxes, - gt_labels, gt_bboxes_ignore, positive_coords[i], i) - bbox_losses = upd_loss(bbox_losses, idx=i) - losses.update(bbox_losses) - - return losses - - - def simple_test_roi_head(self, img, img_metas, proposals=None, rescale=False): - """Test without augmentation.""" - - assert self.with_bbox, 'Bbox head must be implemented.' - batch_input_shape = tuple(img[0].size()[-2:]) - for img_meta in img_metas: - img_meta['batch_input_shape'] = batch_input_shape - if not self.with_attn_mask: # remove attn mask for LSJ - for i in range(len(img_metas)): - input_img_h, input_img_w = img_metas[i]['batch_input_shape'] - img_metas[i]['img_shape'] = [input_img_h, input_img_w, 3] - - x = self.extract_feat(img, img_metas) - if self.with_query_head: - results = self.query_head.forward(x, img_metas) - x = results[-1] - if proposals is None: - proposal_list = self.rpn_head.simple_test_rpn(x, img_metas) - else: - proposal_list = proposals - - return self.roi_head[self.eval_index].simple_test( - x, proposal_list, img_metas, rescale=rescale) - - def simple_test_query_head(self, img, img_metas, proposals=None, rescale=False): - """Test function without test-time augmentation. - - Args: - img (torch.Tensor): Images with shape (N, C, H, W). - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - index = 0 - batch_input_shape = tuple(img[0].size()[-2:]) - for img_meta in img_metas: - img_meta['batch_input_shape'] = batch_input_shape - if not self.with_attn_mask: # remove attn mask for LSJ - for i in range(len(img_metas)): - input_img_h, input_img_w = img_metas[i]['batch_input_shape'] - img_metas[i]['img_shape'] = [input_img_h, input_img_w, 3] - - x = self.extract_feat(img, img_metas) - results_list = self.query_head.simple_test( - x, img_metas, rescale=rescale) - bbox_results = [ - bbox2result(det_bboxes, det_labels, self.query_head.num_classes) - for det_bboxes, det_labels in results_list - ] - return bbox_results - - def simple_test_bbox_head(self, img, img_metas, proposals=None, rescale=False): - """Test function without test-time augmentation. - - Args: - img (torch.Tensor): Images with shape (N, C, H, W). - img_metas (list[dict]): List of image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - batch_input_shape = tuple(img[0].size()[-2:]) - for img_meta in img_metas: - img_meta['batch_input_shape'] = batch_input_shape - if not self.with_attn_mask: # remove attn mask for LSJ - for i in range(len(img_metas)): - input_img_h, input_img_w = img_metas[i]['batch_input_shape'] - img_metas[i]['img_shape'] = [input_img_h, input_img_w, 3] - - x = self.extract_feat(img, img_metas) - if self.with_query_head: - results = self.query_head.forward(x, img_metas) - x = results[-1] - results_list = self.bbox_head[self.eval_index].simple_test( - x, img_metas, rescale=rescale) - bbox_results = [ - bbox2result(det_bboxes, det_labels, self.bbox_head[self.eval_index].num_classes) - for det_bboxes, det_labels in results_list - ] - return bbox_results - - def simple_test(self, img, img_metas, proposals=None, rescale=False): - """Test without augmentation.""" - assert self.eval_module in ['detr', 'one-stage', 'two-stage'] - if self.with_bbox and self.eval_module=='one-stage': - return self.simple_test_bbox_head(img, img_metas, proposals, rescale) - if self.with_roi_head and self.eval_module=='two-stage': - return self.simple_test_roi_head(img, img_metas, proposals, rescale) - return self.simple_test_query_head(img, img_metas, proposals, rescale) - - def aug_test(self, imgs, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - imgs (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[list[np.ndarray]]: BBox results of each image and classes. - The outer list corresponds to each image. The inner list - corresponds to each class. - """ - assert hasattr(self.query_head, 'aug_test'), \ - f'{self.query_head.__class__.__name__}' \ - ' does not support test-time augmentation' - - feats = self.extract_feats(imgs) - results_list = self.query_head.aug_test( - feats, img_metas, rescale=rescale) - bbox_results = [ - bbox2result(det_bboxes, det_labels, self.query_head.num_classes) - for det_bboxes, det_labels in results_list - ] - return bbox_results - - def onnx_export(self, img, img_metas, with_nms=True): - """Test function without test time augmentation. - - Args: - img (torch.Tensor): input images. - img_metas (list[dict]): List of image information. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - x = self.extract_feat(img) - outs = self.query_head.forward_onnx(x, img_metas)[:2] - # get origin input shape to support onnx dynamic shape - - # get shape as tensor - img_shape = torch._shape_as_tensor(img)[2:] - img_metas[0]['img_shape_for_onnx'] = img_shape - # get pad input shape to support onnx dynamic shape for exporting - # `CornerNet` and `CentripetalNet`, which 'pad_shape' is used - # for inference - img_metas[0]['pad_shape_for_onnx'] = img_shape - - if len(outs) == 2: - # add dummy score_factor - outs = (*outs, None) - # TODO Can we change to `get_bboxes` when `onnx_export` fail - # TODO support NMS - # det_bboxes, det_labels = self.query_head.onnx_export( - # *outs, img_metas, with_nms=with_nms) - det_bboxes, det_labels = self.query_head.onnx_export(*outs, img_metas) - - return det_bboxes, det_labels \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/models/co_dino_head.py b/cv/detection/co-detr/pytorch/projects/models/co_dino_head.py deleted file mode 100644 index aee9700425915ba48aebe033d5b8aeaab9db32d6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/co_dino_head.py +++ /dev/null @@ -1,683 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, multi_apply, - reduce_mean, bbox_overlaps) -from mmdet.models.utils.transformer import inverse_sigmoid -from mmdet.models.builder import HEADS -from mmcv.ops import batched_nms -from projects.models import CoDeformDETRHead -from projects.models.query_denoising import build_dn_generator - -@HEADS.register_module() -class CoDINOHead(CoDeformDETRHead): - - def __init__(self, - *args, - num_query=900, - dn_cfg=None, - transformer=None, - **kwargs): - - if 'two_stage_num_proposals' in transformer: - assert transformer['two_stage_num_proposals'] == num_query, \ - 'two_stage_num_proposals must be equal to num_query for DINO' - else: - transformer['two_stage_num_proposals'] = num_query - super(CoDINOHead, self).__init__( - *args, num_query=num_query, transformer=transformer, **kwargs) - - assert self.as_two_stage, \ - 'as_two_stage must be True for DINO' - assert self.with_box_refine, \ - 'with_box_refine must be True for DINO' - self._init_layers() - self.init_denoising(dn_cfg) - - def _init_layers(self): - super()._init_layers() - self.query_embedding = None - # NOTE The original repo of DINO set the num_embeddings 92 for coco, - # 91 (0~90) of which represents target classes and the 92 (91) - # indicates [Unknown] class. However, the embedding of unknown class - # is not used in the original DINO - self.label_embedding = nn.Embedding(self.cls_out_channels, - self.embed_dims) - self.downsample = nn.Sequential( - nn.Conv2d(self.embed_dims, self.embed_dims, kernel_size=3, stride=2, padding=1), - nn.GroupNorm(32, self.embed_dims) - ) - - def init_denoising(self, dn_cfg): - if dn_cfg is not None: - dn_cfg['num_classes'] = self.num_classes - dn_cfg['num_queries'] = self.num_query - dn_cfg['hidden_dim'] = self.embed_dims - self.dn_generator = build_dn_generator(dn_cfg) - - def forward_train(self, - x, - img_metas, - gt_bboxes, - gt_labels=None, - gt_bboxes_ignore=None, - proposal_cfg=None, - **kwargs): - assert proposal_cfg is None, '"proposal_cfg" must be None' - assert self.dn_generator is not None, '"dn_cfg" must be set' - dn_label_query, dn_bbox_query, attn_mask, dn_meta = \ - self.dn_generator(gt_bboxes, gt_labels, - self.label_embedding, img_metas) - outs = self(x, img_metas, dn_label_query, dn_bbox_query, attn_mask) - if gt_labels is None: - loss_inputs = outs + (gt_bboxes, img_metas, dn_meta) - else: - loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, dn_meta) - losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) - enc_outputs = outs[-1] - return losses, enc_outputs - - def forward(self, - mlvl_feats, - img_metas, - dn_label_query=None, - dn_bbox_query=None, - attn_mask=None): - batch_size = mlvl_feats[0].size(0) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - img_masks = mlvl_feats[0].new_ones( - (batch_size, input_img_h, input_img_w)) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]['img_shape'] - img_masks[img_id, :img_h, :img_w] = 0 - - mlvl_masks = [] - mlvl_positional_encodings = [] - for feat in mlvl_feats: - mlvl_masks.append( - F.interpolate(img_masks[None], - size=feat.shape[-2:]).to(torch.bool).squeeze(0)) - mlvl_positional_encodings.append( - self.positional_encoding(mlvl_masks[-1])) - - query_embeds = None - hs, inter_references, topk_score, topk_anchor, enc_outputs = \ - self.transformer( - mlvl_feats, - mlvl_masks, - query_embeds, - mlvl_positional_encodings, - dn_label_query, - dn_bbox_query, - attn_mask, - reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 - cls_branches=self.cls_branches if self.as_two_stage else None # noqa:E501 - ) - outs = [] - num_level = len(mlvl_feats) - start = 0 - for lvl in range(num_level): - bs, c, h, w = mlvl_feats[lvl].shape - end = start + h*w - feat = enc_outputs[start:end].permute(1, 2, 0).contiguous() - start = end - outs.append(feat.reshape(bs, c, h, w)) - outs.append(self.downsample(outs[-1])) - - hs = hs.permute(0, 2, 1, 3) - - if dn_label_query is not None and dn_label_query.size(1) == 0: - # NOTE: If there is no target in the image, the parameters of - # label_embedding won't be used in producing loss, which raises - # RuntimeError when using distributed mode. - hs[0] += self.label_embedding.weight[0, 0] * 0.0 - - outputs_classes = [] - outputs_coords = [] - - for lvl in range(hs.shape[0]): - reference = inter_references[lvl] - reference = inverse_sigmoid(reference, eps=1e-3) - outputs_class = self.cls_branches[lvl](hs[lvl]) - tmp = self.reg_branches[lvl](hs[lvl]) - if reference.shape[-1] == 4: - tmp += reference - else: - assert reference.shape[-1] == 2 - tmp[..., :2] += reference - outputs_coord = tmp.sigmoid() - outputs_classes.append(outputs_class) - outputs_coords.append(outputs_coord) - - outputs_classes = torch.stack(outputs_classes) - outputs_coords = torch.stack(outputs_coords) - - return outputs_classes, outputs_coords, topk_score, topk_anchor, outs - - def loss(self, - all_cls_scores, - all_bbox_preds, - enc_topk_scores, - enc_topk_anchors, - enc_outputs, - gt_bboxes_list, - gt_labels_list, - img_metas, - dn_meta=None, - gt_bboxes_ignore=None): - # assert gt_bboxes_ignore is None, \ - # f'{self.__class__.__name__} only supports ' \ - # f'for gt_bboxes_ignore setting to None.' - - loss_dict = dict() - - # extract denoising and matching part of outputs - all_cls_scores, all_bbox_preds, dn_cls_scores, dn_bbox_preds = \ - self.extract_dn_outputs(all_cls_scores, all_bbox_preds, dn_meta) - - if enc_topk_scores is not None: - enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ - self.loss_single(enc_topk_scores, enc_topk_anchors, - gt_bboxes_list, gt_labels_list, - img_metas, gt_bboxes_ignore) - - # collate loss from encode feature maps - loss_dict['enc_loss_cls'] = enc_loss_cls - loss_dict['enc_loss_bbox'] = enc_losses_bbox - loss_dict['enc_loss_iou'] = enc_losses_iou - - # calculate loss from all decoder layers - num_dec_layers = len(all_cls_scores) - all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - all_gt_bboxes_ignore_list = [ - gt_bboxes_ignore for _ in range(num_dec_layers) - ] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - losses_cls, losses_bbox, losses_iou = multi_apply( - self.loss_single, all_cls_scores, all_bbox_preds, - all_gt_bboxes_list, all_gt_labels_list, img_metas_list, - all_gt_bboxes_ignore_list) - - # collate loss from the last decoder layer - loss_dict['loss_cls'] = losses_cls[-1] - loss_dict['loss_bbox'] = losses_bbox[-1] - loss_dict['loss_iou'] = losses_iou[-1] - - # collate loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], - losses_bbox[:-1], - losses_iou[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i - loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i - num_dec_layer += 1 - - if dn_cls_scores is not None: - # calculate denoising loss from all decoder layers - dn_meta = [dn_meta for _ in img_metas] - dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn( - dn_cls_scores, dn_bbox_preds, gt_bboxes_list, gt_labels_list, - img_metas, dn_meta) - # collate denoising loss - loss_dict['dn_loss_cls'] = dn_losses_cls[-1] - loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1] - loss_dict['dn_loss_iou'] = dn_losses_iou[-1] - num_dec_layer = 0 - for loss_cls_i, loss_bbox_i, loss_iou_i in zip( - dn_losses_cls[:-1], dn_losses_bbox[:-1], - dn_losses_iou[:-1]): - loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i - loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i - num_dec_layer += 1 - - return loss_dict - - def loss_dn(self, dn_cls_scores, dn_bbox_preds, gt_bboxes_list, - gt_labels_list, img_metas, dn_meta): - num_dec_layers = len(dn_cls_scores) - all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - img_metas_list = [img_metas for _ in range(num_dec_layers)] - dn_meta_list = [dn_meta for _ in range(num_dec_layers)] - return multi_apply(self.loss_dn_single, dn_cls_scores, dn_bbox_preds, - all_gt_bboxes_list, all_gt_labels_list, - img_metas_list, dn_meta_list) - - def loss_dn_single(self, dn_cls_scores, dn_bbox_preds, gt_bboxes_list, - gt_labels_list, img_metas, dn_meta): - num_imgs = dn_cls_scores.size(0) - bbox_preds_list = [dn_bbox_preds[i] for i in range(num_imgs)] - cls_reg_targets = self.get_dn_target(bbox_preds_list, gt_bboxes_list, - gt_labels_list, img_metas, - dn_meta) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - labels = torch.cat(labels_list, 0) - label_weights = torch.cat(label_weights_list, 0) - bbox_targets = torch.cat(bbox_targets_list, 0) - bbox_weights = torch.cat(bbox_weights_list, 0) - - # classification loss - cls_scores = dn_cls_scores.reshape(-1, self.cls_out_channels) - # construct weighted avg_factor to match with the official DETR repo - cls_avg_factor = \ - num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight - if self.sync_cls_avg_factor: - cls_avg_factor = reduce_mean( - cls_scores.new_tensor([cls_avg_factor])) - cls_avg_factor = max(cls_avg_factor, 1) - - if len(cls_scores) > 0: - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - scores = label_weights.new_zeros(labels.shape) - pos_bbox_targets = bbox_targets[pos_inds] - pos_decode_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets) - pos_bbox_pred = dn_bbox_preds.reshape(-1, 4)[pos_inds] - pos_decode_bbox_pred = bbox_cxcywh_to_xyxy(pos_bbox_pred) - scores[pos_inds] = bbox_overlaps( - pos_decode_bbox_pred.detach(), - pos_decode_bbox_targets, - is_aligned=True) - loss_cls = self.loss_cls( - cls_scores, (labels, scores), - weight=label_weights, - avg_factor=cls_avg_factor) - else: - loss_cls = torch.zeros( # TODO: How to better return zero loss - 1, - dtype=cls_scores.dtype, - device=cls_scores.device) - - # Compute the average number of gt boxes across all gpus, for - # normalization purposes - num_total_pos = loss_cls.new_tensor([num_total_pos]) - num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() - - # construct factors used for rescale bboxes - factors = [] - for img_meta, bbox_pred in zip(img_metas, dn_bbox_preds): - img_h, img_w, _ = img_meta['img_shape'] - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0).repeat( - bbox_pred.size(0), 1) - factors.append(factor) - factors = torch.cat(factors, 0) - - # DETR regress the relative position of boxes (cxcywh) in the image, - # thus the learning target is normalized by the image size. So here - # we need to re-scale them for calculating IoU loss - bbox_preds = dn_bbox_preds.reshape(-1, 4) - bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors - bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors - - # regression IoU loss, defaultly GIoU loss - loss_iou = self.loss_iou( - bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) - - # regression L1 loss - loss_bbox = self.loss_bbox( - bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) - return loss_cls, loss_bbox, loss_iou - - def get_dn_target(self, dn_bbox_preds_list, gt_bboxes_list, gt_labels_list, - img_metas, dn_meta): - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - pos_inds_list, - neg_inds_list) = multi_apply(self._get_dn_target_single, - dn_bbox_preds_list, gt_bboxes_list, - gt_labels_list, img_metas, dn_meta) - num_total_pos = sum((inds.numel() for inds in pos_inds_list)) - num_total_neg = sum((inds.numel() for inds in neg_inds_list)) - return (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - - def _get_dn_target_single(self, dn_bbox_pred, gt_bboxes, gt_labels, - img_meta, dn_meta): - num_groups = dn_meta['num_dn_group'] - pad_size = dn_meta['pad_size'] - assert pad_size % num_groups == 0 - single_pad = pad_size // num_groups - num_bboxes = dn_bbox_pred.size(0) - - if len(gt_labels) > 0: - t = torch.range(0, len(gt_labels) - 1).long().cuda() - t = t.unsqueeze(0).repeat(num_groups, 1) - pos_assigned_gt_inds = t.flatten() - pos_inds = (torch.tensor(range(num_groups)) * - single_pad).long().cuda().unsqueeze(1) + t - pos_inds = pos_inds.flatten() - else: - pos_inds = pos_assigned_gt_inds = torch.tensor([]).long().cuda() - neg_inds = pos_inds + single_pad // 2 - - # label targets - labels = gt_bboxes.new_full((num_bboxes, ), - self.num_classes, - dtype=torch.long) - labels[pos_inds] = gt_labels[pos_assigned_gt_inds] - label_weights = gt_bboxes.new_ones(num_bboxes) - - # bbox targets - bbox_targets = torch.zeros_like(dn_bbox_pred) - bbox_weights = torch.zeros_like(dn_bbox_pred) - bbox_weights[pos_inds] = 1.0 - img_h, img_w, _ = img_meta['img_shape'] - - # DETR regress the relative position of boxes (cxcywh) in the image. - # Thus the learning target should be normalized by the image size, also - # the box format should be converted from defaultly x1y1x2y2 to cxcywh. - factor = dn_bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0) - gt_bboxes_normalized = gt_bboxes / factor - gt_bboxes_targets = bbox_xyxy_to_cxcywh(gt_bboxes_normalized) - bbox_targets[pos_inds] = gt_bboxes_targets.repeat([num_groups, 1]) - - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds) - - @staticmethod - def extract_dn_outputs(all_cls_scores, all_bbox_preds, dn_meta): - if dn_meta is not None: - denoising_cls_scores = all_cls_scores[:, :, : - dn_meta['pad_size'], :] - denoising_bbox_preds = all_bbox_preds[:, :, : - dn_meta['pad_size'], :] - matching_cls_scores = all_cls_scores[:, :, dn_meta['pad_size']:, :] - matching_bbox_preds = all_bbox_preds[:, :, dn_meta['pad_size']:, :] - else: - denoising_cls_scores = None - denoising_bbox_preds = None - matching_cls_scores = all_cls_scores - matching_bbox_preds = all_bbox_preds - return (matching_cls_scores, matching_bbox_preds, denoising_cls_scores, - denoising_bbox_preds) - - def forward_aux(self, mlvl_feats, img_metas, aux_targets, head_idx): - """Forward function. - - Args: - mlvl_feats (tuple[Tensor]): Features from the upstream - network, each is a 4D-tensor with shape - (N, C, H, W). - img_metas (list[dict]): List of image information. - - Returns: - all_cls_scores (Tensor): Outputs from the classification head, \ - shape [nb_dec, bs, num_query, cls_out_channels]. Note \ - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression \ - head with normalized coordinate format (cx, cy, w, h). \ - Shape [nb_dec, bs, num_query, 4]. - enc_outputs_class (Tensor): The score of each point on encode \ - feature map, has shape (N, h*w, num_class). Only when \ - as_two_stage is True it would be returned, otherwise \ - `None` would be returned. - enc_outputs_coord (Tensor): The proposal generate from the \ - encode feature map, has shape (N, h*w, 4). Only when \ - as_two_stage is True it would be returned, otherwise \ - `None` would be returned. - """ - aux_coords, aux_labels, aux_targets, aux_label_weights, aux_bbox_weights, aux_feats, attn_masks = aux_targets - batch_size = mlvl_feats[0].size(0) - input_img_h, input_img_w = img_metas[0]['batch_input_shape'] - img_masks = mlvl_feats[0].new_ones( - (batch_size, input_img_h, input_img_w)) - for img_id in range(batch_size): - img_h, img_w, _ = img_metas[img_id]['img_shape'] - img_masks[img_id, :img_h, :img_w] = 0 - - mlvl_masks = [] - mlvl_positional_encodings = [] - for feat in mlvl_feats: - mlvl_masks.append( - F.interpolate(img_masks[None], - size=feat.shape[-2:]).to(torch.bool).squeeze(0)) - mlvl_positional_encodings.append( - self.positional_encoding(mlvl_masks[-1])) - - query_embeds = None - hs, inter_references = self.transformer.forward_aux( - mlvl_feats, - mlvl_masks, - query_embeds, - mlvl_positional_encodings, - aux_coords, - pos_feats=aux_feats, - reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 - cls_branches=self.cls_branches if self.as_two_stage else None, # noqa:E501 - return_encoder_output=True, - attn_masks=attn_masks, - head_idx=head_idx - ) - - hs = hs.permute(0, 2, 1, 3) - outputs_classes = [] - outputs_coords = [] - - for lvl in range(hs.shape[0]): - reference = inter_references[lvl] - reference = inverse_sigmoid(reference, eps=1e-3) - outputs_class = self.cls_branches[lvl](hs[lvl]) - tmp = self.reg_branches[lvl](hs[lvl]) - if reference.shape[-1] == 4: - tmp += reference - else: - assert reference.shape[-1] == 2 - tmp[..., :2] += reference - outputs_coord = tmp.sigmoid() - outputs_classes.append(outputs_class) - outputs_coords.append(outputs_coord) - - outputs_classes = torch.stack(outputs_classes) - outputs_coords = torch.stack(outputs_coords) - - return outputs_classes, outputs_coords, \ - None, None - - def loss_single(self, - cls_scores, - bbox_preds, - gt_bboxes_list, - gt_labels_list, - img_metas, - gt_bboxes_ignore_list=None): - """"Loss function for outputs from a single decoder layer of a single - feature level. - - Args: - cls_scores (Tensor): Box score logits from a single decoder layer - for all images. Shape [bs, num_query, cls_out_channels]. - bbox_preds (Tensor): Sigmoid outputs from a single decoder layer - for all images, with normalized coordinate (cx, cy, w, h) and - shape [bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components for outputs from - a single decoder layer. - """ - num_imgs = cls_scores.size(0) - cls_scores_list = [cls_scores[i] for i in range(num_imgs)] - bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] - cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, - gt_bboxes_list, gt_labels_list, - img_metas, gt_bboxes_ignore_list) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - labels = torch.cat(labels_list, 0) - label_weights = torch.cat(label_weights_list, 0) - bbox_targets = torch.cat(bbox_targets_list, 0) - bbox_weights = torch.cat(bbox_weights_list, 0) - - # classification loss - cls_scores = cls_scores.reshape(-1, self.cls_out_channels) - # construct weighted avg_factor to match with the official DETR repo - cls_avg_factor = num_total_pos * 1.0 + \ - num_total_neg * self.bg_cls_weight - if self.sync_cls_avg_factor: - cls_avg_factor = reduce_mean( - cls_scores.new_tensor([cls_avg_factor])) - cls_avg_factor = max(cls_avg_factor, 1) - - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - scores = label_weights.new_zeros(labels.shape) - pos_bbox_targets = bbox_targets[pos_inds] - pos_decode_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets) - pos_bbox_pred = bbox_preds.reshape(-1, 4)[pos_inds] - pos_decode_bbox_pred = bbox_cxcywh_to_xyxy(pos_bbox_pred) - scores[pos_inds] = bbox_overlaps( - pos_decode_bbox_pred.detach(), - pos_decode_bbox_targets, - is_aligned=True) - loss_cls = self.loss_cls( - cls_scores, (labels, scores), - weight=label_weights, - avg_factor=cls_avg_factor) - - # Compute the average number of gt boxes across all gpus, for - # normalization purposes - num_total_pos = loss_cls.new_tensor([num_total_pos]) - num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() - - # construct factors used for rescale bboxes - factors = [] - for img_meta, bbox_pred in zip(img_metas, bbox_preds): - img_h, img_w, _ = img_meta['img_shape'] - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0).repeat( - bbox_pred.size(0), 1) - factors.append(factor) - factors = torch.cat(factors, 0) - - # DETR regress the relative position of boxes (cxcywh) in the image, - # thus the learning target is normalized by the image size. So here - # we need to re-scale them for calculating IoU loss - bbox_preds = bbox_preds.reshape(-1, 4) - bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors - bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors - - # regression IoU loss, defaultly GIoU loss - loss_iou = self.loss_iou( - bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) - - # regression L1 loss - loss_bbox = self.loss_bbox( - bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) - return loss_cls, loss_bbox, loss_iou - - def loss_single_aux(self, - cls_scores, - bbox_preds, - labels, - label_weights, - bbox_targets, - bbox_weights, - img_metas, - gt_bboxes_ignore_list=None): - """"Loss function for outputs from a single decoder layer of a single - feature level. - - Args: - cls_scores (Tensor): Box score logits from a single decoder layer - for all images. Shape [bs, num_query, cls_out_channels]. - bbox_preds (Tensor): Sigmoid outputs from a single decoder layer - for all images, with normalized coordinate (cx, cy, w, h) and - shape [bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - img_metas (list[dict]): List of image meta information. - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - - Returns: - dict[str, Tensor]: A dictionary of loss components for outputs from - a single decoder layer. - """ - num_imgs = cls_scores.size(0) - num_q = cls_scores.size(1) - try: - labels = labels.reshape(num_imgs * num_q) - label_weights = label_weights.reshape(num_imgs * num_q) - bbox_targets = bbox_targets.reshape(num_imgs * num_q, 4) - bbox_weights = bbox_weights.reshape(num_imgs * num_q, 4) - except: - return cls_scores.mean()*0, cls_scores.mean()*0, cls_scores.mean()*0 - - bg_class_ind = self.num_classes - num_total_pos = len(((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1)) - num_total_neg = num_imgs*num_q - num_total_pos - - # classification loss - cls_scores = cls_scores.reshape(-1, self.cls_out_channels) - # construct weighted avg_factor to match with the official DETR repo - cls_avg_factor = num_total_pos * 1.0 + \ - num_total_neg * self.bg_cls_weight - if self.sync_cls_avg_factor: - cls_avg_factor = reduce_mean( - cls_scores.new_tensor([cls_avg_factor])) - cls_avg_factor = max(cls_avg_factor, 1) - - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - scores = label_weights.new_zeros(labels.shape) - pos_bbox_targets = bbox_targets[pos_inds] - pos_decode_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets) - pos_bbox_pred = bbox_preds.reshape(-1, 4)[pos_inds] - pos_decode_bbox_pred = bbox_cxcywh_to_xyxy(pos_bbox_pred) - scores[pos_inds] = bbox_overlaps( - pos_decode_bbox_pred.detach(), - pos_decode_bbox_targets, - is_aligned=True) - loss_cls = self.loss_cls( - cls_scores, (labels, scores), - weight=label_weights, - avg_factor=cls_avg_factor) - - # Compute the average number of gt boxes across all gpus, for - # normalization purposes - num_total_pos = loss_cls.new_tensor([num_total_pos]) - num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() - - # construct factors used for rescale bboxes - factors = [] - for img_meta, bbox_pred in zip(img_metas, bbox_preds): - img_h, img_w, _ = img_meta['img_shape'] - factor = bbox_pred.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0).repeat( - bbox_pred.size(0), 1) - factors.append(factor) - factors = torch.cat(factors, 0) - - # DETR regress the relative position of boxes (cxcywh) in the image, - # thus the learning target is normalized by the image size. So here - # we need to re-scale them for calculating IoU loss - bbox_preds = bbox_preds.reshape(-1, 4) - bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors - bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors - - # regression IoU loss, defaultly GIoU loss - loss_iou = self.loss_iou( - bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) - - # regression L1 loss - loss_bbox = self.loss_bbox( - bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) - return loss_cls*self.lambda_1, loss_bbox*self.lambda_1, loss_iou*self.lambda_1 diff --git a/cv/detection/co-detr/pytorch/projects/models/co_roi_head.py b/cv/detection/co-detr/pytorch/projects/models/co_roi_head.py deleted file mode 100644 index cf7fd361c58251b09f6262f54899fff5f5702e5b..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/co_roi_head.py +++ /dev/null @@ -1,421 +0,0 @@ -import torch -from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler -from mmdet.models.builder import HEADS, build_head, build_roi_extractor -from mmdet.models.roi_heads.base_roi_head import BaseRoIHead -from mmdet.models.roi_heads.test_mixins import BBoxTestMixin, MaskTestMixin - - -@HEADS.register_module() -class CoStandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): - """Simplest base roi head including one bbox head and one mask head.""" - - def init_assigner_sampler(self): - """Initialize assigner and sampler.""" - self.bbox_assigner = None - self.bbox_sampler = None - if self.train_cfg: - self.bbox_assigner = build_assigner(self.train_cfg.assigner) - self.bbox_sampler = build_sampler( - self.train_cfg.sampler, context=self) - - def init_bbox_head(self, bbox_roi_extractor, bbox_head): - """Initialize ``bbox_head``""" - self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor) - self.bbox_head = build_head(bbox_head) - - def init_mask_head(self, mask_roi_extractor, mask_head): - """Initialize ``mask_head``""" - if mask_roi_extractor is not None: - self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) - self.share_roi_extractor = False - else: - self.share_roi_extractor = True - self.mask_roi_extractor = self.bbox_roi_extractor - self.mask_head = build_head(mask_head) - - def forward_dummy(self, x, proposals): - """Dummy forward function.""" - # bbox head - outs = () - rois = bbox2roi([proposals]) - if self.with_bbox: - bbox_results = self._bbox_forward(x, rois) - outs = outs + (bbox_results['cls_score'], - bbox_results['bbox_pred']) - # mask head - if self.with_mask: - mask_rois = rois[:100] - mask_results = self._mask_forward(x, mask_rois) - outs = outs + (mask_results['mask_pred'], ) - return outs - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None, - **kwargs): - """ - Args: - x (list[Tensor]): list of multi-level img features. - img_metas (list[dict]): list of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - proposals (list[Tensors]): list of region proposals. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None | Tensor) : true segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # assign gts and sample proposals - if self.with_bbox or self.with_mask: - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - sampling_results = [] - for i in range(num_imgs): - assign_result = self.bbox_assigner.assign( - proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], - gt_labels[i]) - sampling_result = self.bbox_sampler.sample( - assign_result, - proposal_list[i], - gt_bboxes[i], - gt_labels[i], - feats=[lvl_feat[i][None] for lvl_feat in x]) - sampling_results.append(sampling_result) - - losses = dict() - # bbox head forward and loss - if self.with_bbox: - bbox_results = self._bbox_forward_train(x, sampling_results, - gt_bboxes, gt_labels, - img_metas) - losses.update(bbox_results['loss_bbox']) - - bbox_targets = bbox_results['bbox_targets'] - num_imgs = len(img_metas) - max_proposal = 2000 - for res in sampling_results: - max_proposal = min(max_proposal, res.bboxes.shape[0]) - ori_coords = bbox2roi([res.bboxes for res in sampling_results]) - ori_proposals, ori_labels, ori_bbox_targets, ori_bbox_feats = [], [], [], [] - for i in range(num_imgs): - idx = (ori_coords[:,0]==i).nonzero().squeeze(1) - idx = idx[:max_proposal] - ori_proposal = ori_coords[idx][:, 1:].unsqueeze(0) - ori_label = bbox_targets[0][idx].unsqueeze(0) - ori_bbox_target = bbox_targets[2][idx].unsqueeze(0) - ori_bbox_feat = bbox_results['bbox_feats'].mean(-1).mean(-1) - ori_bbox_feat = ori_bbox_feat[idx].unsqueeze(0) - ori_proposals.append(ori_proposal) - ori_labels.append(ori_label) - ori_bbox_targets.append(ori_bbox_target) - ori_bbox_feats.append(ori_bbox_feat) - ori_coords = torch.cat(ori_proposals, dim=0) - ori_labels = torch.cat(ori_labels, dim=0) - ori_bbox_targets = torch.cat(ori_bbox_targets, dim=0) - ori_bbox_feats = torch.cat(ori_bbox_feats, dim=0) - pos_coords = (ori_coords, ori_labels, ori_bbox_targets, ori_bbox_feats, 'rcnn') - losses.update(pos_coords=pos_coords) - - # mask head forward and loss - if self.with_mask: - mask_results = self._mask_forward_train(x, sampling_results, - bbox_results['bbox_feats'], - gt_masks, img_metas) - losses.update(mask_results['loss_mask']) - - return losses - - def _bbox_forward(self, x, rois): - """Box head forward function used in both training and testing.""" - # TODO: a more flexible way to decide which feature maps to use - bbox_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], rois) - if self.with_shared_head: - bbox_feats = self.shared_head(bbox_feats) - cls_score, bbox_pred = self.bbox_head(bbox_feats) - - bbox_results = dict( - cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) - return bbox_results - - def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, - img_metas): - """Run forward function and calculate loss for box head in training.""" - rois = bbox2roi([res.bboxes for res in sampling_results]) - bbox_results = self._bbox_forward(x, rois) - - bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, self.train_cfg) - loss_bbox = self.bbox_head.loss(bbox_results['cls_score'], - bbox_results['bbox_pred'], rois, - *bbox_targets) - bbox_results.update(loss_bbox=loss_bbox) - bbox_results.update(bbox_targets=bbox_targets) - return bbox_results - - def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, - img_metas): - """Run forward function and calculate loss for mask head in - training.""" - if not self.share_roi_extractor: - pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) - mask_results = self._mask_forward(x, pos_rois) - else: - pos_inds = [] - device = bbox_feats.device - for res in sampling_results: - pos_inds.append( - torch.ones( - res.pos_bboxes.shape[0], - device=device, - dtype=torch.uint8)) - pos_inds.append( - torch.zeros( - res.neg_bboxes.shape[0], - device=device, - dtype=torch.uint8)) - pos_inds = torch.cat(pos_inds) - - mask_results = self._mask_forward( - x, pos_inds=pos_inds, bbox_feats=bbox_feats) - - mask_targets = self.mask_head.get_targets(sampling_results, gt_masks, - self.train_cfg) - pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) - loss_mask = self.mask_head.loss(mask_results['mask_pred'], - mask_targets, pos_labels) - - mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets) - return mask_results - - def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): - """Mask head forward function used in both training and testing.""" - assert ((rois is not None) ^ - (pos_inds is not None and bbox_feats is not None)) - if rois is not None: - mask_feats = self.mask_roi_extractor( - x[:self.mask_roi_extractor.num_inputs], rois) - if self.with_shared_head: - mask_feats = self.shared_head(mask_feats) - else: - assert bbox_feats is not None - mask_feats = bbox_feats[pos_inds] - - mask_pred = self.mask_head(mask_feats) - mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats) - return mask_results - - async def async_simple_test(self, - x, - proposal_list, - img_metas, - proposals=None, - rescale=False): - """Async test without augmentation.""" - assert self.with_bbox, 'Bbox head must be implemented.' - - det_bboxes, det_labels = await self.async_test_bboxes( - x, img_metas, proposal_list, self.test_cfg, rescale=rescale) - bbox_results = bbox2result(det_bboxes, det_labels, - self.bbox_head.num_classes) - if not self.with_mask: - return bbox_results - else: - segm_results = await self.async_test_mask( - x, - img_metas, - det_bboxes, - det_labels, - rescale=rescale, - mask_test_cfg=self.test_cfg.get('mask')) - return bbox_results, segm_results - - def simple_test(self, - x, - proposal_list, - img_metas, - proposals=None, - rescale=False): - """Test without augmentation. - - Args: - x (tuple[Tensor]): Features from upstream network. Each - has shape (batch_size, c, h, w). - proposal_list (list(Tensor)): Proposals from rpn head. - Each has shape (num_proposals, 5), last dimension - 5 represent (x1, y1, x2, y2, score). - img_metas (list[dict]): Meta information of images. - rescale (bool): Whether to rescale the results to - the original image. Default: True. - - Returns: - list[list[np.ndarray]] or list[tuple]: When no mask branch, - it is bbox results of each image and classes with type - `list[list[np.ndarray]]`. The outer list - corresponds to each image. The inner list - corresponds to each class. When the model has mask branch, - it contains bbox results and mask results. - The outer list corresponds to each image, and first element - of tuple is bbox results, second element is mask results. - """ - assert self.with_bbox, 'Bbox head must be implemented.' - - det_bboxes, det_labels = self.simple_test_bboxes( - x, img_metas, proposal_list, self.test_cfg, rescale=rescale) - - bbox_results = [ - bbox2result(det_bboxes[i], det_labels[i], - self.bbox_head.num_classes) - for i in range(len(det_bboxes)) - ] - - if not self.with_mask: - return bbox_results - else: - segm_results = self.simple_test_mask( - x, img_metas, det_bboxes, det_labels, rescale=rescale) - return list(zip(bbox_results, segm_results)) - - def aug_test(self, x, proposal_list, img_metas, rescale=False): - """Test with augmentations. - - If rescale is False, then returned bboxes and masks will fit the scale - of imgs[0]. - """ - det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas, - proposal_list, - self.test_cfg) - if rescale: - _det_bboxes = det_bboxes - else: - _det_bboxes = det_bboxes.clone() - _det_bboxes[:, :4] *= det_bboxes.new_tensor( - img_metas[0][0]['scale_factor']) - bbox_results = bbox2result(_det_bboxes, det_labels, - self.bbox_head.num_classes) - - # det_bboxes always keep the original scale - if self.with_mask: - segm_results = self.aug_test_mask(x, img_metas, det_bboxes, - det_labels) - return [(bbox_results, segm_results)] - else: - return [bbox_results] - - def onnx_export(self, x, proposals, img_metas, rescale=False): - """Test without augmentation.""" - assert self.with_bbox, 'Bbox head must be implemented.' - det_bboxes, det_labels = self.bbox_onnx_export( - x, img_metas, proposals, self.test_cfg, rescale=rescale) - - if not self.with_mask: - return det_bboxes, det_labels - else: - segm_results = self.mask_onnx_export( - x, img_metas, det_bboxes, det_labels, rescale=rescale) - return det_bboxes, det_labels, segm_results - - def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs): - """Export mask branch to onnx which supports batch inference. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - img_metas (list[dict]): Image meta info. - det_bboxes (Tensor): Bboxes and corresponding scores. - has shape [N, num_bboxes, 5]. - det_labels (Tensor): class labels of - shape [N, num_bboxes]. - - Returns: - Tensor: The segmentation results of shape [N, num_bboxes, - image_height, image_width]. - """ - # image shapes of images in the batch - - if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes): - raise RuntimeError('[ONNX Error] Can not record MaskHead ' - 'as it has not been executed this time') - batch_size = det_bboxes.size(0) - # if det_bboxes is rescaled to the original image size, we need to - # rescale it back to the testing scale to obtain RoIs. - det_bboxes = det_bboxes[..., :4] - batch_index = torch.arange( - det_bboxes.size(0), device=det_bboxes.device).float().view( - -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1) - mask_rois = torch.cat([batch_index, det_bboxes], dim=-1) - mask_rois = mask_rois.view(-1, 5) - mask_results = self._mask_forward(x, mask_rois) - mask_pred = mask_results['mask_pred'] - max_shape = img_metas[0]['img_shape_for_onnx'] - num_det = det_bboxes.shape[1] - det_bboxes = det_bboxes.reshape(-1, 4) - det_labels = det_labels.reshape(-1) - segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes, - det_labels, self.test_cfg, - max_shape) - segm_results = segm_results.reshape(batch_size, num_det, max_shape[0], - max_shape[1]) - return segm_results - - def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg, - **kwargs): - """Export bbox branch to onnx which supports batch inference. - - Args: - x (tuple[Tensor]): Feature maps of all scale level. - img_metas (list[dict]): Image meta info. - proposals (Tensor): Region proposals with - batch dimension, has shape [N, num_bboxes, 5]. - rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. - - Returns: - tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5] - and class labels of shape [N, num_bboxes]. - """ - # get origin input shape to support onnx dynamic input shape - assert len( - img_metas - ) == 1, 'Only support one input image while in exporting to ONNX' - img_shapes = img_metas[0]['img_shape_for_onnx'] - - rois = proposals - - batch_index = torch.arange( - rois.size(0), device=rois.device).float().view(-1, 1, 1).expand( - rois.size(0), rois.size(1), 1) - - rois = torch.cat([batch_index, rois[..., :4]], dim=-1) - batch_size = rois.shape[0] - num_proposals_per_img = rois.shape[1] - - # Eliminate the batch dimension - rois = rois.view(-1, 5) - bbox_results = self._bbox_forward(x, rois) - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - - # Recover the batch dimension - rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) - cls_score = cls_score.reshape(batch_size, num_proposals_per_img, - cls_score.size(-1)) - - bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, - bbox_pred.size(-1)) - det_bboxes, det_labels = self.bbox_head.onnx_export( - rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg) - - return det_bboxes, det_labels diff --git a/cv/detection/co-detr/pytorch/projects/models/query_denoising.py b/cv/detection/co-detr/pytorch/projects/models/query_denoising.py deleted file mode 100644 index 03a5d657007cdb24915a3ce8d01905a6a0dd5037..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/query_denoising.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmdet.core import bbox_xyxy_to_cxcywh -from .transformer import inverse_sigmoid - - -class DnQueryGenerator: - - def __init__(self, - num_queries, - hidden_dim, - num_classes, - noise_scale=dict(label=0.5, box=0.4), - group_cfg=dict( - dynamic=True, num_groups=None, num_dn_queries=None)): - super(DnQueryGenerator, self).__init__() - self.num_queries = num_queries - self.hidden_dim = hidden_dim - self.num_classes = num_classes - self.label_noise_scale = noise_scale['label'] - self.box_noise_scale = noise_scale['box'] - self.dynamic_dn_groups = group_cfg.get('dynamic', False) - if self.dynamic_dn_groups: - assert 'num_dn_queries' in group_cfg, \ - 'num_dn_queries should be set when using ' \ - 'dynamic dn groups' - self.num_dn = group_cfg['num_dn_queries'] - else: - assert 'num_groups' in group_cfg, \ - 'num_groups should be set when using ' \ - 'static dn groups' - self.num_dn = group_cfg['num_groups'] - assert isinstance(self.num_dn, int) and self.num_dn >= 1, \ - f'Expected the num in group_cfg to have type int. ' \ - f'Found {type(self.num_dn)} ' - - def get_num_groups(self, group_queries=None): - """ - Args: - group_queries (int): Number of dn queries in one group. - """ - if self.dynamic_dn_groups: - assert group_queries is not None, \ - 'group_queries should be provided when using ' \ - 'dynamic dn groups' - if group_queries == 0: - num_groups = 1 - else: - num_groups = self.num_dn // group_queries - else: - num_groups = self.num_dn - if num_groups < 1: - num_groups = 1 - return int(num_groups) - - def __call__(self, - gt_bboxes, - gt_labels=None, - label_enc=None, - img_metas=None): - """ - - Args: - gt_bboxes (List[Tensor]): List of ground truth bboxes - of the image, shape of each (num_gts, 4). - gt_labels (List[Tensor]): List of ground truth labels - of the image, shape of each (num_gts,), if None, - TODO:noisy_label would be None. - - Returns: - TODO - """ - # TODO: temp only support for CDN - # TODO: temp assert gt_labels is not None and label_enc is not None - if gt_labels is not None: - assert len(gt_bboxes) == len(gt_labels), \ - f'the length of provided gt_labels ' \ - f'{len(gt_labels)} should be equal to' \ - f' that of gt_bboxes {len(gt_bboxes)}' - assert gt_labels is not None \ - and label_enc is not None \ - and img_metas is not None # TODO: adjust args - batch_size = len(gt_bboxes) - - # convert bbox - gt_bboxes_list = [] - for img_meta, bboxes in zip(img_metas, gt_bboxes): - img_h, img_w, _ = img_meta['img_shape'] - factor = bboxes.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0) - bboxes_normalized = bbox_xyxy_to_cxcywh(bboxes) / factor - gt_bboxes_list.append(bboxes_normalized) - gt_bboxes = gt_bboxes_list - - known = [torch.ones_like(labels) for labels in gt_labels] - known_num = [sum(k) for k in known] - - num_groups = self.get_num_groups(int(max(known_num))) - - unmask_bbox = unmask_label = torch.cat(known) - labels = torch.cat(gt_labels) - boxes = torch.cat(gt_bboxes) - batch_idx = torch.cat( - [torch.full_like(t.long(), i) for i, t in enumerate(gt_labels)]) - - known_indice = torch.nonzero(unmask_label + unmask_bbox) - known_indice = known_indice.view(-1) - - known_indice = known_indice.repeat(2 * num_groups, 1).view(-1) - known_labels = labels.repeat(2 * num_groups, 1).view(-1) - known_bid = batch_idx.repeat(2 * num_groups, 1).view(-1) - known_bboxs = boxes.repeat(2 * num_groups, 1) - known_labels_expand = known_labels.clone() - known_bbox_expand = known_bboxs.clone() - - if self.label_noise_scale > 0: - p = torch.rand_like(known_labels_expand.float()) - chosen_indice = torch.nonzero( - p < (self.label_noise_scale * 0.5)).view(-1) - new_label = torch.randint_like(chosen_indice, 0, self.num_classes) - known_labels_expand.scatter_(0, chosen_indice, new_label) - single_pad = int(max(known_num)) # TODO - - pad_size = int(single_pad * 2 * num_groups) - positive_idx = torch.tensor(range( - len(boxes))).long().cuda().unsqueeze(0).repeat(num_groups, 1) - positive_idx += (torch.tensor(range(num_groups)) * len(boxes) * - 2).long().cuda().unsqueeze(1) - positive_idx = positive_idx.flatten() - negative_idx = positive_idx + len(boxes) - if self.box_noise_scale > 0: - known_bbox_ = torch.zeros_like(known_bboxs) - known_bbox_[:, : 2] = \ - known_bboxs[:, : 2] - known_bboxs[:, 2:] / 2 - known_bbox_[:, 2:] = \ - known_bboxs[:, :2] + known_bboxs[:, 2:] / 2 - - diff = torch.zeros_like(known_bboxs) - diff[:, :2] = known_bboxs[:, 2:] / 2 - diff[:, 2:] = known_bboxs[:, 2:] / 2 - - rand_sign = torch.randint_like( - known_bboxs, low=0, high=2, dtype=torch.float32) - rand_sign = rand_sign * 2.0 - 1.0 - rand_part = torch.rand_like(known_bboxs) - rand_part[negative_idx] += 1.0 - rand_part *= rand_sign - known_bbox_ += \ - torch.mul(rand_part, diff).cuda() * self.box_noise_scale - known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0) - known_bbox_expand[:, :2] = \ - (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2 - known_bbox_expand[:, 2:] = \ - known_bbox_[:, 2:] - known_bbox_[:, :2] - - m = known_labels_expand.long().to('cuda') - input_label_embed = label_enc(m) - input_bbox_embed = inverse_sigmoid(known_bbox_expand, eps=1e-3) - - padding_label = torch.zeros(pad_size, self.hidden_dim).cuda() - padding_bbox = torch.zeros(pad_size, 4).cuda() - - input_query_label = padding_label.repeat(batch_size, 1, 1) - input_query_bbox = padding_bbox.repeat(batch_size, 1, 1) - - map_known_indice = torch.tensor([]).to('cuda') - if len(known_num): - map_known_indice = torch.cat( - [torch.tensor(range(num)) for num in known_num]) - map_known_indice = torch.cat([ - map_known_indice + single_pad * i - for i in range(2 * num_groups) - ]).long() - if len(known_bid): - input_query_label[(known_bid.long(), - map_known_indice)] = input_label_embed - input_query_bbox[(known_bid.long(), - map_known_indice)] = input_bbox_embed - - tgt_size = pad_size + self.num_queries - attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0 - # match query cannot see the reconstruct - attn_mask[pad_size:, :pad_size] = True - # reconstruct cannot see each other - for i in range(num_groups): - if i == 0: - attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), - single_pad * 2 * (i + 1):pad_size] = True - if i == num_groups - 1: - attn_mask[single_pad * 2 * i:single_pad * 2 * - (i + 1), :single_pad * i * 2] = True - else: - attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), - single_pad * 2 * (i + 1):pad_size] = True - attn_mask[single_pad * 2 * i:single_pad * 2 * - (i + 1), :single_pad * 2 * i] = True - - dn_meta = { - 'pad_size': pad_size, - 'num_dn_group': num_groups, - } - return input_query_label, input_query_bbox, attn_mask, dn_meta - - -class CdnQueryGenerator(DnQueryGenerator): - - def __init__(self, *args, **kwargs): - super(CdnQueryGenerator, self).__init__(*args, **kwargs) - - -def build_dn_generator(dn_args): - """ - - Args: - dn_args (dict): - - Returns: - - """ - if dn_args is None: - return None - type = dn_args.pop('type') - if type == 'DnQueryGenerator': - return DnQueryGenerator(**dn_args) - elif type == 'CdnQueryGenerator': - return CdnQueryGenerator(**dn_args) - else: - raise NotImplementedError(f'{type} is not supported yet') \ No newline at end of file diff --git a/cv/detection/co-detr/pytorch/projects/models/swin_transformer.py b/cv/detection/co-detr/pytorch/projects/models/swin_transformer.py deleted file mode 100644 index 2cb6c549e6e7216f11fc6f0e953230641b641b90..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/swin_transformer.py +++ /dev/null @@ -1,648 +0,0 @@ -# -------------------------------------------------------- -# Swin Transformer -# Copyright (c) 2021 Microsoft -# Licensed under The MIT License [see LICENSE for details] -# Written by Ze Liu, Yutong Lin, Yixuan Wei -# -------------------------------------------------------- - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -import numpy as np -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from mmcv_custom import load_checkpoint -from mmdet.utils import get_root_logger -from mmdet.models.builder import BACKBONES - -from mmcv.runner import BaseModule - -class Mlp(nn.Module): - """ Multilayer perceptron.""" - - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - """ Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """ Forward function. - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SwinTransformerBlock(nn.Module): - """ Swin Transformer Block. - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, num_heads, window_size=7, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - self.H = None - self.W = None - - def forward(self, x, mask_matrix): - """ Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - mask_matrix: Attention mask for cyclic shift. - """ - B, L, C = x.shape - H, W = self.H, self.W - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - attn_mask = mask_matrix - else: - shifted_x = x - attn_mask = None - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - -class PatchMerging(nn.Module): - """ Patch Merging Layer - Args: - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - def __init__(self, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x, H, W): - """ Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - x = x.view(B, H, W, C) - - # padding - pad_input = (H % 2 == 1) or (W % 2 == 1) - if pad_input: - x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - -class BasicLayer(nn.Module): - """ A basic Swin Transformer layer for one stage. - Args: - dim (int): Number of feature channels - depth (int): Depths of this stage. - num_heads (int): Number of attention head. - window_size (int): Local window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, - dim, - depth, - num_heads, - window_size=7, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop=0., - attn_drop=0., - drop_path=0., - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False): - super().__init__() - self.window_size = window_size - self.shift_size = window_size // 2 - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList([ - SwinTransformerBlock( - dim=dim, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer) - for i in range(depth)]) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, H, W): - """ Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - - # calculate attention mask for SW-MSA - Hp = int(np.ceil(H / self.window_size)) * self.window_size - Wp = int(np.ceil(W / self.window_size)) * self.window_size - img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - for blk in self.blocks: - blk.H, blk.W = H, W - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, attn_mask) - else: - x = blk(x, attn_mask) - if self.downsample is not None: - x_down = self.downsample(x, H, W) - Wh, Ww = (H + 1) // 2, (W + 1) // 2 - return x, H, W, x_down, Wh, Ww - else: - return x, H, W, x, H, W - - -class PatchEmbed(nn.Module): - """ Image to Patch Embedding - Args: - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - patch_size = to_2tuple(patch_size) - self.patch_size = patch_size - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - """Forward function.""" - # padding - _, _, H, W = x.size() - if W % self.patch_size[1] != 0: - x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) - if H % self.patch_size[0] != 0: - x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) - - x = self.proj(x) # B C Wh Ww - if self.norm is not None: - Wh, Ww = x.size(2), x.size(3) - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) - - return x - - -@BACKBONES.register_module() -class SwinTransformerV1(BaseModule): - """ Swin Transformer backbone. - A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/pdf/2103.14030 - Args: - pretrain_img_size (int): Input image size for training the pretrained model, - used in absolute postion embedding. Default 224. - patch_size (int | tuple(int)): Patch size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - depths (tuple[int]): Depths of each Swin Transformer stage. - num_heads (tuple[int]): Number of attention head of each stage. - window_size (int): Window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - drop_rate (float): Dropout rate. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Default: 0.2. - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. - patch_norm (bool): If True, add normalization after patch embedding. Default: True. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - pretrained (str, optional): model pretrained path. Default: None. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - pretrain_img_size=224, - patch_size=4, - in_chans=3, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - use_checkpoint=False, - pretrained=None, - init_cfg=None): - assert init_cfg is None, 'To prevent abnormal initialization ' \ - 'behavior, init_cfg is not allowed to be set' - super().__init__(init_cfg=init_cfg) - - self.pretrain_img_size = pretrain_img_size - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.pretrained = pretrained - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - - # absolute position embedding - if self.ape: - pretrain_img_size = to_2tuple(pretrain_img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]] - - self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])) - trunc_normal_(self.absolute_pos_embed, std=.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule - - # build layers - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = BasicLayer( - dim=int(embed_dim * 2 ** i_layer), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], - norm_layer=norm_layer, - downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, - use_checkpoint=use_checkpoint) - self.layers.append(layer) - - num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] - self.num_features = num_features - - # add a norm layer for each output - for i_layer in out_indices: - layer = norm_layer(num_features[i_layer]) - layer_name = f'norm{i_layer}' - self.add_module(layer_name, layer) - - self._freeze_stages() - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - - if self.frozen_stages >= 1 and self.ape: - self.absolute_pos_embed.requires_grad = False - - if self.frozen_stages >= 2: - self.pos_drop.eval() - for i in range(0, self.frozen_stages - 1): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - # def init_weights(self, pretrained=None): - # """Initialize the weights in backbone. - - # Args: - # pretrained (str, optional): Path to pre-trained weights. - # Defaults to None. - # """ - - # def _init_weights(m): - # if isinstance(m, nn.Linear): - # trunc_normal_(m.weight, std=.02) - # if isinstance(m, nn.Linear) and m.bias is not None: - # nn.init.constant_(m.bias, 0) - # elif isinstance(m, nn.LayerNorm): - # nn.init.constant_(m.bias, 0) - # nn.init.constant_(m.weight, 1.0) - - # if isinstance(pretrained, str): - # self.apply(_init_weights) - # logger = get_root_logger() - # load_checkpoint(self, pretrained, strict=False, logger=logger) - # elif pretrained is None: - # self.apply(_init_weights) - # else: - # raise TypeError('pretrained must be a str or None') - - def init_weights(self): - """Initialize the weights in backbone.""" - - def _init_weights(m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - if isinstance(self.pretrained, str): - self.apply(_init_weights) - logger = get_root_logger() - load_checkpoint(self, self.pretrained, strict=False, logger=logger) - elif self.pretrained is None: - self.apply(_init_weights) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic') - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - outs = [] - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - - if i in self.out_indices: - norm_layer = getattr(self, f'norm{i}') - x_out = norm_layer(x_out) - - out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() - outs.append(out) - - return tuple(outs) - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(SwinTransformerV1, self).train(mode) - self._freeze_stages() diff --git a/cv/detection/co-detr/pytorch/projects/models/transformer.py b/cv/detection/co-detr/pytorch/projects/models/transformer.py deleted file mode 100644 index a56cd0458c42cf494f11a49967b5dda9233be8d9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/projects/models/transformer.py +++ /dev/null @@ -1,778 +0,0 @@ -import math -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from mmcv.cnn import xavier_init -from mmcv.cnn.bricks.registry import TRANSFORMER_LAYER_SEQUENCE -from mmcv.cnn.bricks.transformer import TransformerLayerSequence - -from mmdet.models.utils.transformer import Transformer, DeformableDetrTransformer, DeformableDetrTransformerDecoder -from mmdet.models.utils.builder import TRANSFORMER - - -def inverse_sigmoid(x, eps=1e-5): - """Inverse function of sigmoid. - - Args: - x (Tensor): The tensor to do the - inverse. - eps (float): EPS avoid numerical - overflow. Defaults 1e-5. - Returns: - Tensor: The x has passed the inverse - function of sigmoid, has same - shape with input. - """ - x = x.clamp(min=0, max=1) - x1 = x.clamp(min=eps) - x2 = (1 - x).clamp(min=eps) - return torch.log(x1 / x2) - - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class CoDeformableDetrTransformerDecoder(TransformerLayerSequence): - """Implements the decoder in DETR transformer. - - Args: - return_intermediate (bool): Whether to return intermediate outputs. - coder_norm_cfg (dict): Config of last normalization layer. Default: - `LN`. - """ - - def __init__(self, *args, return_intermediate=False, look_forward_twice=False, **kwargs): - - super(CoDeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) - self.return_intermediate = return_intermediate - self.look_forward_twice = look_forward_twice - - def forward(self, - query, - *args, - reference_points=None, - valid_ratios=None, - reg_branches=None, - **kwargs): - """Forward function for `TransformerDecoder`. - - Args: - query (Tensor): Input query with shape - `(num_query, bs, embed_dims)`. - reference_points (Tensor): The reference - points of offset. has shape - (bs, num_query, 4) when as_two_stage, - otherwise has shape ((bs, num_query, 2). - valid_ratios (Tensor): The radios of valid - points on the feature map, has shape - (bs, num_levels, 2) - reg_branch: (obj:`nn.ModuleList`): Used for - refining the regression results. Only would - be passed when with_box_refine is True, - otherwise would be passed a `None`. - - Returns: - Tensor: Results with shape [1, num_query, bs, embed_dims] when - return_intermediate is `False`, otherwise it has shape - [num_layers, num_query, bs, embed_dims]. - """ - output = query - intermediate = [] - intermediate_reference_points = [] - for lid, layer in enumerate(self.layers): - if reference_points.shape[-1] == 4: - reference_points_input = reference_points[:, :, None] * \ - torch.cat([valid_ratios, valid_ratios], -1)[:, None] - else: - assert reference_points.shape[-1] == 2 - reference_points_input = reference_points[:, :, None] * \ - valid_ratios[:, None] - output = layer( - output, - *args, - reference_points=reference_points_input, - **kwargs) - output = output.permute(1, 0, 2) - - if reg_branches is not None: - tmp = reg_branches[lid](output) - if reference_points.shape[-1] == 4: - new_reference_points = tmp + inverse_sigmoid( - reference_points) - new_reference_points = new_reference_points.sigmoid() - else: - assert reference_points.shape[-1] == 2 - new_reference_points = tmp - new_reference_points[..., :2] = tmp[ - ..., :2] + inverse_sigmoid(reference_points) - new_reference_points = new_reference_points.sigmoid() - reference_points = new_reference_points.detach() - - output = output.permute(1, 0, 2) - if self.return_intermediate: - intermediate.append(output) - intermediate_reference_points.append( - new_reference_points - if self.look_forward_twice - else reference_points - ) - if self.return_intermediate: - return torch.stack(intermediate), torch.stack( - intermediate_reference_points) - - return output, reference_points - - -@TRANSFORMER.register_module() -class CoDeformableDetrTransformer(DeformableDetrTransformer): - """Implements the DeformableDETR transformer. - - Args: - as_two_stage (bool): Generate query from encoder features. - Default: False. - num_feature_levels (int): Number of feature maps from FPN: - Default: 4. - two_stage_num_proposals (int): Number of proposals when set - `as_two_stage` as True. Default: 300. - """ - - def __init__(self, - mixed_selection=True, - with_pos_coord=True, - with_coord_feat=True, - num_co_heads=1, - **kwargs): - self.mixed_selection = mixed_selection - self.with_pos_coord = with_pos_coord - self.with_coord_feat = with_coord_feat - self.num_co_heads = num_co_heads - super(CoDeformableDetrTransformer, self).__init__(**kwargs) - self._init_layers() - - def _init_layers(self): - """Initialize layers of the DeformableDetrTransformer.""" - if self.with_pos_coord: - if self.num_co_heads > 0: - # bug: this code should be 'self.head_pos_embed = nn.Embedding(self.num_co_heads, self.embed_dims)', we keep this bug for reproducing our results with ResNet-50. - # You can fix this bug when reproducing results with swin transformer. - self.head_pos_embed = nn.Embedding(self.num_co_heads, 1, 1, self.embed_dims) - self.aux_pos_trans = nn.ModuleList() - self.aux_pos_trans_norm = nn.ModuleList() - self.pos_feats_trans = nn.ModuleList() - self.pos_feats_norm = nn.ModuleList() - for i in range(self.num_co_heads): - self.aux_pos_trans.append(nn.Linear(self.embed_dims*2, self.embed_dims*2)) - self.aux_pos_trans_norm.append(nn.LayerNorm(self.embed_dims*2)) - if self.with_coord_feat: - self.pos_feats_trans.append(nn.Linear(self.embed_dims, self.embed_dims)) - self.pos_feats_norm.append(nn.LayerNorm(self.embed_dims)) - - def get_proposal_pos_embed(self, - proposals, - num_pos_feats=128, - temperature=10000): - """Get the position embedding of proposal.""" - num_pos_feats = self.embed_dims // 2 - scale = 2 * math.pi - dim_t = torch.arange( - num_pos_feats, dtype=torch.float32, device=proposals.device) - dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) - # N, L, 4 - proposals = proposals.sigmoid() * scale - # N, L, 4, 128 - pos = proposals[:, :, :, None] / dim_t - # N, L, 4, 64, 2 - pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), - dim=4).flatten(2) - return pos - - def forward(self, - mlvl_feats, - mlvl_masks, - query_embed, - mlvl_pos_embeds, - reg_branches=None, - cls_branches=None, - return_encoder_output=False, - attn_masks=None, - **kwargs): - """Forward function for `Transformer`. - - Args: - mlvl_feats (list(Tensor)): Input queries from - different level. Each element has shape - [bs, embed_dims, h, w]. - mlvl_masks (list(Tensor)): The key_padding_mask from - different level used for encoder and decoder, - each element has shape [bs, h, w]. - query_embed (Tensor): The query embedding for decoder, - with shape [num_query, c]. - mlvl_pos_embeds (list(Tensor)): The positional encoding - of feats from different level, has the shape - [bs, embed_dims, h, w]. - reg_branches (obj:`nn.ModuleList`): Regression heads for - feature maps from each decoder layer. Only would - be passed when - `with_box_refine` is True. Default to None. - cls_branches (obj:`nn.ModuleList`): Classification heads - for feature maps from each decoder layer. Only would - be passed when `as_two_stage` - is True. Default to None. - - - Returns: - tuple[Tensor]: results of decoder containing the following tensor. - - - inter_states: Outputs from decoder. If - return_intermediate_dec is True output has shape \ - (num_dec_layers, bs, num_query, embed_dims), else has \ - shape (1, bs, num_query, embed_dims). - - init_reference_out: The initial value of reference \ - points, has shape (bs, num_queries, 4). - - inter_references_out: The internal value of reference \ - points in decoder, has shape \ - (num_dec_layers, bs,num_query, embed_dims) - - enc_outputs_class: The classification score of \ - proposals generated from \ - encoder's feature maps, has shape \ - (batch, h*w, num_classes). \ - Only would be returned when `as_two_stage` is True, \ - otherwise None. - - enc_outputs_coord_unact: The regression results \ - generated from encoder's feature maps., has shape \ - (batch, h*w, 4). Only would \ - be returned when `as_two_stage` is True, \ - otherwise None. - """ - assert self.as_two_stage or query_embed is not None - - feat_flatten = [] - mask_flatten = [] - lvl_pos_embed_flatten = [] - spatial_shapes = [] - for lvl, (feat, mask, pos_embed) in enumerate( - zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): - bs, c, h, w = feat.shape - spatial_shape = (h, w) - spatial_shapes.append(spatial_shape) - feat = feat.flatten(2).transpose(1, 2) - mask = mask.flatten(1) - pos_embed = pos_embed.flatten(2).transpose(1, 2) - lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) - lvl_pos_embed_flatten.append(lvl_pos_embed) - feat_flatten.append(feat) - mask_flatten.append(mask) - feat_flatten = torch.cat(feat_flatten, 1) - mask_flatten = torch.cat(mask_flatten, 1) - lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=feat_flatten.device) - level_start_index = torch.cat((spatial_shapes.new_zeros( - (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) - valid_ratios = torch.stack( - [self.get_valid_ratio(m) for m in mlvl_masks], 1) - - reference_points = \ - self.get_reference_points(spatial_shapes, - valid_ratios, - device=feat.device) - - feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) - lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( - 1, 0, 2) # (H*W, bs, embed_dims) - memory = self.encoder( - query=feat_flatten, - key=None, - value=None, - query_pos=lvl_pos_embed_flatten, - query_key_padding_mask=mask_flatten, - spatial_shapes=spatial_shapes, - reference_points=reference_points, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - **kwargs) - - memory = memory.permute(1, 0, 2) - bs, _, c = memory.shape - if self.as_two_stage: - output_memory, output_proposals = \ - self.gen_encoder_output_proposals( - memory, mask_flatten, spatial_shapes) - enc_outputs_class = cls_branches[self.decoder.num_layers]( - output_memory) - enc_outputs_coord_unact = \ - reg_branches[ - self.decoder.num_layers](output_memory) + output_proposals - - topk = self.two_stage_num_proposals - topk = query_embed.shape[0] - topk_proposals = torch.topk( - enc_outputs_class[..., 0], topk, dim=1)[1] - topk_coords_unact = torch.gather( - enc_outputs_coord_unact, 1, - topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) - topk_coords_unact = topk_coords_unact.detach() - reference_points = topk_coords_unact.sigmoid() - init_reference_out = reference_points - pos_trans_out = self.pos_trans_norm( - self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) - - if not self.mixed_selection: - query_pos, query = torch.split(pos_trans_out, c, dim=2) - else: - # query_embed here is the content embed for deformable DETR - query = query_embed.unsqueeze(0).expand(bs, -1, -1) - query_pos, _ = torch.split(pos_trans_out, c, dim=2) - else: - query_pos, query = torch.split(query_embed, c, dim=1) - query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) - query = query.unsqueeze(0).expand(bs, -1, -1) - reference_points = self.reference_points(query_pos).sigmoid() - init_reference_out = reference_points - - # decoder - query = query.permute(1, 0, 2) - memory = memory.permute(1, 0, 2) - query_pos = query_pos.permute(1, 0, 2) - inter_states, inter_references = self.decoder( - query=query, - key=None, - value=memory, - query_pos=query_pos, - key_padding_mask=mask_flatten, - reference_points=reference_points, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - reg_branches=reg_branches, - attn_masks=attn_masks, - **kwargs) - - inter_references_out = inter_references - if self.as_two_stage: - if return_encoder_output: - return inter_states, init_reference_out,\ - inter_references_out, enc_outputs_class,\ - enc_outputs_coord_unact, memory - return inter_states, init_reference_out,\ - inter_references_out, enc_outputs_class,\ - enc_outputs_coord_unact - if return_encoder_output: - return inter_states, init_reference_out, \ - inter_references_out, None, None, memory - return inter_states, init_reference_out, \ - inter_references_out, None, None - - def forward_aux(self, - mlvl_feats, - mlvl_masks, - query_embed, - mlvl_pos_embeds, - pos_anchors, - pos_feats=None, - reg_branches=None, - cls_branches=None, - return_encoder_output=False, - attn_masks=None, - head_idx=0, - **kwargs): - feat_flatten = [] - mask_flatten = [] - spatial_shapes = [] - for lvl, (feat, mask, pos_embed) in enumerate( - zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): - bs, c, h, w = feat.shape - spatial_shape = (h, w) - spatial_shapes.append(spatial_shape) - feat = feat.flatten(2).transpose(1, 2) - mask = mask.flatten(1) - feat_flatten.append(feat) - mask_flatten.append(mask) - feat_flatten = torch.cat(feat_flatten, 1) - mask_flatten = torch.cat(mask_flatten, 1) - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=feat_flatten.device) - level_start_index = torch.cat((spatial_shapes.new_zeros( - (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) - valid_ratios = torch.stack( - [self.get_valid_ratio(m) for m in mlvl_masks], 1) - - feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) - - memory = feat_flatten - memory = memory.permute(1, 0, 2) - bs, _, c = memory.shape - - topk = pos_anchors.shape[1] - topk_coords_unact = inverse_sigmoid((pos_anchors)) - reference_points = pos_anchors - init_reference_out = reference_points - if self.num_co_heads > 0: - pos_trans_out = self.aux_pos_trans_norm[head_idx]( - self.aux_pos_trans[head_idx](self.get_proposal_pos_embed(topk_coords_unact))) - query_pos, query = torch.split(pos_trans_out, c, dim=2) - if self.with_coord_feat: - query = query + self.pos_feats_norm[head_idx](self.pos_feats_trans[head_idx](pos_feats)) - query_pos = query_pos + self.head_pos_embed.weight[head_idx] - - # decoder - query = query.permute(1, 0, 2) - memory = memory.permute(1, 0, 2) - query_pos = query_pos.permute(1, 0, 2) - inter_states, inter_references = self.decoder( - query=query, - key=None, - value=memory, - query_pos=query_pos, - key_padding_mask=mask_flatten, - reference_points=reference_points, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - reg_branches=reg_branches, - attn_masks=attn_masks, - **kwargs) - - inter_references_out = inter_references - return inter_states, init_reference_out, \ - inter_references_out - - -def build_MLP(input_dim, hidden_dim, output_dim, num_layers): - # TODO: It can be implemented by add an out_channel arg of - # mmcv.cnn.bricks.transformer.FFN - assert num_layers > 1, \ - f'num_layers should be greater than 1 but got {num_layers}' - h = [hidden_dim] * (num_layers - 1) - layers = list() - for n, k in zip([input_dim] + h[:-1], h): - layers.extend((nn.Linear(n, k), nn.ReLU())) - # Note that the relu func of MLP in original DETR repo is set - # 'inplace=False', however the ReLU cfg of FFN in mmdet is set - # 'inplace=True' by default. - layers.append(nn.Linear(hidden_dim, output_dim)) - return nn.Sequential(*layers) - -@TRANSFORMER_LAYER_SEQUENCE.register_module() -class DinoTransformerDecoder(DeformableDetrTransformerDecoder): - - def __init__(self, *args, **kwargs): - super(DinoTransformerDecoder, self).__init__(*args, **kwargs) - self._init_layers() - - def _init_layers(self): - self.ref_point_head = build_MLP(self.embed_dims * 2, self.embed_dims, - self.embed_dims, 2) - self.norm = nn.LayerNorm(self.embed_dims) - - @staticmethod - def gen_sineembed_for_position(pos_tensor, pos_feat): - # n_query, bs, _ = pos_tensor.size() - # sineembed_tensor = torch.zeros(n_query, bs, 256) - scale = 2 * math.pi - dim_t = torch.arange( - pos_feat, dtype=torch.float32, device=pos_tensor.device) - dim_t = 10000**(2 * (dim_t // 2) / pos_feat) - x_embed = pos_tensor[:, :, 0] * scale - y_embed = pos_tensor[:, :, 1] * scale - pos_x = x_embed[:, :, None] / dim_t - pos_y = y_embed[:, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), - dim=3).flatten(2) - pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), - dim=3).flatten(2) - if pos_tensor.size(-1) == 2: - pos = torch.cat((pos_y, pos_x), dim=2) - elif pos_tensor.size(-1) == 4: - w_embed = pos_tensor[:, :, 2] * scale - pos_w = w_embed[:, :, None] / dim_t - pos_w = torch.stack( - (pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), - dim=3).flatten(2) - - h_embed = pos_tensor[:, :, 3] * scale - pos_h = h_embed[:, :, None] / dim_t - pos_h = torch.stack( - (pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), - dim=3).flatten(2) - - pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) - else: - raise ValueError('Unknown pos_tensor shape(-1):{}'.format( - pos_tensor.size(-1))) - return pos - - def forward(self, - query, - *args, - reference_points=None, - valid_ratios=None, - reg_branches=None, - **kwargs): - output = query - intermediate = [] - intermediate_reference_points = [reference_points] - for lid, layer in enumerate(self.layers): - if reference_points.shape[-1] == 4: - reference_points_input = \ - reference_points[:, :, None] * torch.cat( - [valid_ratios, valid_ratios], -1)[:, None] - else: - assert reference_points.shape[-1] == 2 - reference_points_input = \ - reference_points[:, :, None] * valid_ratios[:, None] - - query_sine_embed = self.gen_sineembed_for_position( - reference_points_input[:, :, 0, :], self.embed_dims//2) - query_pos = self.ref_point_head(query_sine_embed) - - query_pos = query_pos.permute(1, 0, 2) - output = layer( - output, - *args, - query_pos=query_pos, - reference_points=reference_points_input, - **kwargs) - output = output.permute(1, 0, 2) - - if reg_branches is not None: - tmp = reg_branches[lid](output) - assert reference_points.shape[-1] == 4 - # TODO: should do earlier - new_reference_points = tmp + inverse_sigmoid( - reference_points, eps=1e-3) - new_reference_points = new_reference_points.sigmoid() - reference_points = new_reference_points.detach() - - output = output.permute(1, 0, 2) - if self.return_intermediate: - intermediate.append(self.norm(output)) - intermediate_reference_points.append(new_reference_points) - # NOTE this is for the "Look Forward Twice" module, - # in the DeformDETR, reference_points was appended. - - if self.return_intermediate: - return torch.stack(intermediate), torch.stack( - intermediate_reference_points) - - return output, reference_points - -@TRANSFORMER.register_module() -class CoDinoTransformer(CoDeformableDetrTransformer): - - def __init__(self, *args, **kwargs): - super(CoDinoTransformer, self).__init__(*args, **kwargs) - - def init_layers(self): - """Initialize layers of the DinoTransformer.""" - self.level_embeds = nn.Parameter( - torch.Tensor(self.num_feature_levels, self.embed_dims)) - self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) - self.enc_output_norm = nn.LayerNorm(self.embed_dims) - self.query_embed = nn.Embedding(self.two_stage_num_proposals, - self.embed_dims) - - def _init_layers(self): - if self.with_pos_coord: - if self.num_co_heads > 0: - self.aux_pos_trans = nn.ModuleList() - self.aux_pos_trans_norm = nn.ModuleList() - self.pos_feats_trans = nn.ModuleList() - self.pos_feats_norm = nn.ModuleList() - for i in range(self.num_co_heads): - self.aux_pos_trans.append(nn.Linear(self.embed_dims*2, self.embed_dims)) - self.aux_pos_trans_norm.append(nn.LayerNorm(self.embed_dims)) - if self.with_coord_feat: - self.pos_feats_trans.append(nn.Linear(self.embed_dims, self.embed_dims)) - self.pos_feats_norm.append(nn.LayerNorm(self.embed_dims)) - - def init_weights(self): - super().init_weights() - nn.init.normal_(self.query_embed.weight.data) - - def forward(self, - mlvl_feats, - mlvl_masks, - query_embed, - mlvl_pos_embeds, - dn_label_query, - dn_bbox_query, - attn_mask, - reg_branches=None, - cls_branches=None, - **kwargs): - assert self.as_two_stage and query_embed is None, \ - 'as_two_stage must be True for DINO' - - feat_flatten = [] - mask_flatten = [] - lvl_pos_embed_flatten = [] - spatial_shapes = [] - for lvl, (feat, mask, pos_embed) in enumerate( - zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): - bs, c, h, w = feat.shape - spatial_shape = (h, w) - spatial_shapes.append(spatial_shape) - feat = feat.flatten(2).transpose(1, 2) - mask = mask.flatten(1) - pos_embed = pos_embed.flatten(2).transpose(1, 2) - lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) - lvl_pos_embed_flatten.append(lvl_pos_embed) - feat_flatten.append(feat) - mask_flatten.append(mask) - feat_flatten = torch.cat(feat_flatten, 1) - mask_flatten = torch.cat(mask_flatten, 1) - lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=feat_flatten.device) - level_start_index = torch.cat((spatial_shapes.new_zeros( - (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) - valid_ratios = torch.stack( - [self.get_valid_ratio(m) for m in mlvl_masks], 1) - - reference_points = self.get_reference_points( - spatial_shapes, valid_ratios, device=feat.device) - - feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) - lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( - 1, 0, 2) # (H*W, bs, embed_dims) - memory = self.encoder( - query=feat_flatten, - key=None, - value=None, - query_pos=lvl_pos_embed_flatten, - query_key_padding_mask=mask_flatten, - spatial_shapes=spatial_shapes, - reference_points=reference_points, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - **kwargs) - memory = memory.permute(1, 0, 2) - bs, _, c = memory.shape - - output_memory, output_proposals = self.gen_encoder_output_proposals( - memory, mask_flatten, spatial_shapes) - enc_outputs_class = cls_branches[self.decoder.num_layers]( - output_memory) - enc_outputs_coord_unact = reg_branches[self.decoder.num_layers]( - output_memory) + output_proposals - cls_out_features = cls_branches[self.decoder.num_layers].out_features - topk = self.two_stage_num_proposals - # NOTE In DeformDETR, enc_outputs_class[..., 0] is used for topk TODO - topk_indices = torch.topk(enc_outputs_class.max(-1)[0], topk, dim=1)[1] - - topk_score = torch.gather( - enc_outputs_class, 1, - topk_indices.unsqueeze(-1).repeat(1, 1, cls_out_features)) - topk_coords_unact = torch.gather( - enc_outputs_coord_unact, 1, - topk_indices.unsqueeze(-1).repeat(1, 1, 4)) - topk_anchor = topk_coords_unact.sigmoid() - topk_coords_unact = topk_coords_unact.detach() - - query = self.query_embed.weight[:, None, :].repeat(1, bs, - 1).transpose(0, 1) - # NOTE the query_embed here is not spatial query as in DETR. - # It is actually content query, which is named tgt in other - # DETR-like models - if dn_label_query is not None: - query = torch.cat([dn_label_query, query], dim=1) - if dn_bbox_query is not None: - reference_points = torch.cat([dn_bbox_query, topk_coords_unact], - dim=1) - else: - reference_points = topk_coords_unact - reference_points = reference_points.sigmoid() - # decoder - query = query.permute(1, 0, 2) - memory = memory.permute(1, 0, 2) - inter_states, inter_references = self.decoder( - query=query, - key=None, - value=memory, - attn_masks=attn_mask, - key_padding_mask=mask_flatten, - reference_points=reference_points, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - reg_branches=reg_branches, - **kwargs) - - inter_references_out = inter_references - - return inter_states, inter_references_out, topk_score, topk_anchor, memory - - - def forward_aux(self, - mlvl_feats, - mlvl_masks, - query_embed, - mlvl_pos_embeds, - pos_anchors, - pos_feats=None, - reg_branches=None, - cls_branches=None, - return_encoder_output=False, - attn_masks=None, - head_idx=0, - **kwargs): - feat_flatten = [] - mask_flatten = [] - spatial_shapes = [] - for lvl, (feat, mask, pos_embed) in enumerate( - zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): - bs, c, h, w = feat.shape - spatial_shape = (h, w) - spatial_shapes.append(spatial_shape) - feat = feat.flatten(2).transpose(1, 2) - mask = mask.flatten(1) - feat_flatten.append(feat) - mask_flatten.append(mask) - feat_flatten = torch.cat(feat_flatten, 1) - mask_flatten = torch.cat(mask_flatten, 1) - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=feat_flatten.device) - level_start_index = torch.cat((spatial_shapes.new_zeros( - (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) - valid_ratios = torch.stack( - [self.get_valid_ratio(m) for m in mlvl_masks], 1) - - feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) - - memory = feat_flatten - #enc_inter = [feat.permute(1, 2, 0) for feat in enc_inter] - memory = memory.permute(1, 0, 2) - bs, _, c = memory.shape - - topk = pos_anchors.shape[1] - topk_coords_unact = inverse_sigmoid((pos_anchors)) - reference_points = (pos_anchors) - init_reference_out = reference_points - if self.num_co_heads > 0: - pos_trans_out = self.aux_pos_trans_norm[head_idx]( - self.aux_pos_trans[head_idx](self.get_proposal_pos_embed(topk_coords_unact))) - query = pos_trans_out - if self.with_coord_feat: - query = query + self.pos_feats_norm[head_idx](self.pos_feats_trans[head_idx](pos_feats)) - - # decoder - query = query.permute(1, 0, 2) - memory = memory.permute(1, 0, 2) - inter_states, inter_references = self.decoder( - query=query, - key=None, - value=memory, - attn_masks=None, - key_padding_mask=mask_flatten, - reference_points=reference_points, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - valid_ratios=valid_ratios, - reg_branches=reg_branches, - **kwargs) - - inter_references_out = inter_references - - return inter_states, inter_references_out diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/analyze_logs.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/analyze_logs.py deleted file mode 100644 index ca13ea806fdcfb43b3c906e85638130d6aaca15e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/analyze_logs.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import json -from collections import defaultdict - -import matplotlib.pyplot as plt -import numpy as np -import seaborn as sns - - -def cal_train_time(log_dicts, args): - for i, log_dict in enumerate(log_dicts): - print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') - all_times = [] - for epoch in log_dict.keys(): - if args.include_outliers: - all_times.append(log_dict[epoch]['time']) - else: - all_times.append(log_dict[epoch]['time'][1:]) - if not all_times: - raise KeyError( - 'Please reduce the log interval in the config so that' - 'interval is less than iterations of one epoch.') - all_times = np.array(all_times) - epoch_ave_time = all_times.mean(-1) - slowest_epoch = epoch_ave_time.argmax() - fastest_epoch = epoch_ave_time.argmin() - std_over_epoch = epoch_ave_time.std() - print(f'slowest epoch {slowest_epoch + 1}, ' - f'average time is {epoch_ave_time[slowest_epoch]:.4f}') - print(f'fastest epoch {fastest_epoch + 1}, ' - f'average time is {epoch_ave_time[fastest_epoch]:.4f}') - print(f'time std over epochs is {std_over_epoch:.4f}') - print(f'average iter time: {np.mean(all_times):.4f} s/iter') - print() - - -def plot_curve(log_dicts, args): - if args.backend is not None: - plt.switch_backend(args.backend) - sns.set_style(args.style) - # if legend is None, use {filename}_{key} as legend - legend = args.legend - if legend is None: - legend = [] - for json_log in args.json_logs: - for metric in args.keys: - legend.append(f'{json_log}_{metric}') - assert len(legend) == (len(args.json_logs) * len(args.keys)) - metrics = args.keys - - num_metrics = len(metrics) - for i, log_dict in enumerate(log_dicts): - epochs = list(log_dict.keys()) - for j, metric in enumerate(metrics): - print(f'plot curve of {args.json_logs[i]}, metric is {metric}') - if metric not in log_dict[epochs[int(args.eval_interval) - 1]]: - if 'mAP' in metric: - raise KeyError( - f'{args.json_logs[i]} does not contain metric ' - f'{metric}. Please check if "--no-validate" is ' - 'specified when you trained the model.') - raise KeyError( - f'{args.json_logs[i]} does not contain metric {metric}. ' - 'Please reduce the log interval in the config so that ' - 'interval is less than iterations of one epoch.') - - if 'mAP' in metric: - xs = [] - ys = [] - for epoch in epochs: - ys += log_dict[epoch][metric] - if 'val' in log_dict[epoch]['mode']: - xs.append(epoch) - plt.xlabel('epoch') - plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') - else: - xs = [] - ys = [] - num_iters_per_epoch = log_dict[epochs[0]]['iter'][-2] - for epoch in epochs: - iters = log_dict[epoch]['iter'] - if log_dict[epoch]['mode'][-1] == 'val': - iters = iters[:-1] - xs.append( - np.array(iters) + (epoch - 1) * num_iters_per_epoch) - ys.append(np.array(log_dict[epoch][metric][:len(iters)])) - xs = np.concatenate(xs) - ys = np.concatenate(ys) - plt.xlabel('iter') - plt.plot( - xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) - plt.legend() - if args.title is not None: - plt.title(args.title) - if args.out is None: - plt.show() - else: - print(f'save curve to: {args.out}') - plt.savefig(args.out) - plt.cla() - - -def add_plot_parser(subparsers): - parser_plt = subparsers.add_parser( - 'plot_curve', help='parser for plotting curves') - parser_plt.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') - parser_plt.add_argument( - '--keys', - type=str, - nargs='+', - default=['bbox_mAP'], - help='the metric that you want to plot') - parser_plt.add_argument( - '--start-epoch', - type=str, - default='1', - help='the epoch that you want to start') - parser_plt.add_argument( - '--eval-interval', - type=str, - default='1', - help='the eval interval when training') - parser_plt.add_argument('--title', type=str, help='title of figure') - parser_plt.add_argument( - '--legend', - type=str, - nargs='+', - default=None, - help='legend of each plot') - parser_plt.add_argument( - '--backend', type=str, default=None, help='backend of plt') - parser_plt.add_argument( - '--style', type=str, default='dark', help='style of plt') - parser_plt.add_argument('--out', type=str, default=None) - - -def add_time_parser(subparsers): - parser_time = subparsers.add_parser( - 'cal_train_time', - help='parser for computing the average time per training iteration') - parser_time.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') - parser_time.add_argument( - '--include-outliers', - action='store_true', - help='include the first value of every epoch when computing ' - 'the average time') - - -def parse_args(): - parser = argparse.ArgumentParser(description='Analyze Json Log') - # currently only support plot curve and calculate average train time - subparsers = parser.add_subparsers(dest='task', help='task parser') - add_plot_parser(subparsers) - add_time_parser(subparsers) - args = parser.parse_args() - return args - - -def load_json_logs(json_logs): - # load and convert json_logs to log_dict, key is epoch, value is a sub dict - # keys of sub dict is different metrics, e.g. memory, bbox_mAP - # value of sub dict is a list of corresponding values of all iterations - log_dicts = [dict() for _ in json_logs] - for json_log, log_dict in zip(json_logs, log_dicts): - with open(json_log, 'r') as log_file: - for i, line in enumerate(log_file): - log = json.loads(line.strip()) - # skip the first training info line - if i == 0: - continue - # skip lines without `epoch` field - if 'epoch' not in log: - continue - epoch = log.pop('epoch') - if epoch not in log_dict: - log_dict[epoch] = defaultdict(list) - for k, v in log.items(): - log_dict[epoch][k].append(v) - return log_dicts - - -def main(): - args = parse_args() - - json_logs = args.json_logs - for json_log in json_logs: - assert json_log.endswith('.json') - - log_dicts = load_json_logs(json_logs) - - eval(args.task)(log_dicts, args) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/analyze_results.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/analyze_results.py deleted file mode 100644 index 4d8b60c96da3fee1d62db0617b1baab686fa5313..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/analyze_results.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os.path as osp -from multiprocessing import Pool - -import mmcv -import numpy as np -from mmcv import Config, DictAction - -from mmdet.core.evaluation import eval_map -from mmdet.core.visualization import imshow_gt_det_bboxes -from mmdet.datasets import build_dataset, get_loading_pipeline -from mmdet.datasets.api_wrappers import pq_compute_single_core -from mmdet.utils import replace_cfg_vals, update_data_root - - -def bbox_map_eval(det_result, annotation, nproc=4): - """Evaluate mAP of single image det result. - - Args: - det_result (list[list]): [[cls1_det, cls2_det, ...], ...]. - The outer list indicates images, and the inner list indicates - per-class detected bboxes. - annotation (dict): Ground truth annotations where keys of - annotations are: - - - bboxes: numpy array of shape (n, 4) - - labels: numpy array of shape (n, ) - - bboxes_ignore (optional): numpy array of shape (k, 4) - - labels_ignore (optional): numpy array of shape (k, ) - - nproc (int): Processes used for computing mAP. - Default: 4. - - Returns: - float: mAP - """ - - # use only bbox det result - if isinstance(det_result, tuple): - bbox_det_result = [det_result[0]] - else: - bbox_det_result = [det_result] - # mAP - iou_thrs = np.linspace( - .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) - - processes = [] - workers = Pool(processes=nproc) - for thr in iou_thrs: - p = workers.apply_async(eval_map, (bbox_det_result, [annotation]), { - 'iou_thr': thr, - 'logger': 'silent', - 'nproc': 1 - }) - processes.append(p) - - workers.close() - workers.join() - - mean_aps = [] - for p in processes: - mean_aps.append(p.get()[0]) - - return sum(mean_aps) / len(mean_aps) - - -class ResultVisualizer: - """Display and save evaluation results. - - Args: - show (bool): Whether to show the image. Default: True. - wait_time (float): Value of waitKey param. Default: 0. - score_thr (float): Minimum score of bboxes to be shown. - Default: 0. - overlay_gt_pred (bool): Whether to plot gts and predictions on the - same image. If False, predictions and gts will be plotted on two - same image which will be concatenated in vertical direction. - The image above is drawn with gt, and the image below is drawn - with the prediction result. Default: False. - """ - - def __init__(self, - show=False, - wait_time=0, - score_thr=0, - overlay_gt_pred=False): - self.show = show - self.wait_time = wait_time - self.score_thr = score_thr - self.overlay_gt_pred = overlay_gt_pred - - def _save_image_gts_results(self, - dataset, - results, - performances, - out_dir=None): - """Display or save image with groung truths and predictions from a - model. - - Args: - dataset (Dataset): A PyTorch dataset. - results (list): Object detection or panoptic segmentation - results from test results pkl file. - performances (dict): A dict contains samples's indices - in dataset and model's performance on them. - out_dir (str, optional): The filename to write the image. - Defaults: None. - """ - mmcv.mkdir_or_exist(out_dir) - - for performance_info in performances: - index, performance = performance_info - data_info = dataset.prepare_train_img(index) - - # calc save file path - filename = data_info['filename'] - if data_info['img_prefix'] is not None: - filename = osp.join(data_info['img_prefix'], filename) - else: - filename = data_info['filename'] - fname, name = osp.splitext(osp.basename(filename)) - save_filename = fname + '_' + str(round(performance, 3)) + name - out_file = osp.join(out_dir, save_filename) - imshow_gt_det_bboxes( - data_info['img'], - data_info, - results[index], - dataset.CLASSES, - gt_bbox_color=dataset.PALETTE, - gt_text_color=(200, 200, 200), - gt_mask_color=dataset.PALETTE, - det_bbox_color=dataset.PALETTE, - det_text_color=(200, 200, 200), - det_mask_color=dataset.PALETTE, - show=self.show, - score_thr=self.score_thr, - wait_time=self.wait_time, - out_file=out_file, - overlay_gt_pred=self.overlay_gt_pred) - - def evaluate_and_show(self, - dataset, - results, - topk=20, - show_dir='work_dir'): - """Evaluate and show results. - - Args: - dataset (Dataset): A PyTorch dataset. - results (list): Object detection or panoptic segmentation - results from test results pkl file. - topk (int): Number of the highest topk and - lowest topk after evaluation index sorting. Default: 20. - show_dir (str, optional): The filename to write the image. - Default: 'work_dir' - eval_fn (callable, optional): Eval function, Default: None. - """ - - assert topk > 0 - if (topk * 2) > len(dataset): - topk = len(dataset) // 2 - - if isinstance(results[0], dict): - good_samples, bad_samples = self.panoptic_evaluate( - dataset, results, topk=topk) - elif isinstance(results[0], list): - good_samples, bad_samples = self.detection_evaluate( - dataset, results, topk=topk) - elif isinstance(results[0], tuple): - results_ = [result[0] for result in results] - good_samples, bad_samples = self.detection_evaluate( - dataset, results_, topk=topk) - else: - raise 'The format of result is not supported yet. ' \ - 'Current dict for panoptic segmentation and list ' \ - 'or tuple for object detection are supported.' - - good_dir = osp.abspath(osp.join(show_dir, 'good')) - bad_dir = osp.abspath(osp.join(show_dir, 'bad')) - self._save_image_gts_results(dataset, results, good_samples, good_dir) - self._save_image_gts_results(dataset, results, bad_samples, bad_dir) - - def detection_evaluate(self, dataset, results, topk=20, eval_fn=None): - """Evaluation for object detection. - - Args: - dataset (Dataset): A PyTorch dataset. - results (list): Object detection results from test - results pkl file. - topk (int): Number of the highest topk and - lowest topk after evaluation index sorting. Default: 20. - eval_fn (callable, optional): Eval function, Default: None. - - Returns: - tuple: A tuple contains good samples and bad samples. - good_mAPs (dict[int, float]): A dict contains good - samples's indices in dataset and model's - performance on them. - bad_mAPs (dict[int, float]): A dict contains bad - samples's indices in dataset and model's - performance on them. - """ - if eval_fn is None: - eval_fn = bbox_map_eval - else: - assert callable(eval_fn) - - prog_bar = mmcv.ProgressBar(len(results)) - _mAPs = {} - for i, (result, ) in enumerate(zip(results)): - # self.dataset[i] should not call directly - # because there is a risk of mismatch - data_info = dataset.prepare_train_img(i) - mAP = eval_fn(result, data_info['ann_info']) - _mAPs[i] = mAP - prog_bar.update() - # descending select topk image - _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1])) - good_mAPs = _mAPs[-topk:] - bad_mAPs = _mAPs[:topk] - - return good_mAPs, bad_mAPs - - def panoptic_evaluate(self, dataset, results, topk=20): - """Evaluation for panoptic segmentation. - - Args: - dataset (Dataset): A PyTorch dataset. - results (list): Panoptic segmentation results from test - results pkl file. - topk (int): Number of the highest topk and - lowest topk after evaluation index sorting. Default: 20. - - Returns: - tuple: A tuple contains good samples and bad samples. - good_pqs (dict[int, float]): A dict contains good - samples's indices in dataset and model's - performance on them. - bad_pqs (dict[int, float]): A dict contains bad - samples's indices in dataset and model's - performance on them. - """ - # image to annotations - gt_json = dataset.coco.img_ann_map - - result_files, tmp_dir = dataset.format_results(results) - pred_json = mmcv.load(result_files['panoptic'])['annotations'] - pred_folder = osp.join(tmp_dir.name, 'panoptic') - gt_folder = dataset.seg_prefix - - pqs = {} - prog_bar = mmcv.ProgressBar(len(results)) - for i in range(len(results)): - data_info = dataset.prepare_train_img(i) - image_id = data_info['img_info']['id'] - gt_ann = { - 'image_id': image_id, - 'segments_info': gt_json[image_id], - 'file_name': data_info['img_info']['segm_file'] - } - pred_ann = pred_json[i] - pq_stat = pq_compute_single_core( - i, [(gt_ann, pred_ann)], - gt_folder, - pred_folder, - dataset.categories, - dataset.file_client, - print_log=False) - pq_results, classwise_results = pq_stat.pq_average( - dataset.categories, isthing=None) - pqs[i] = pq_results['pq'] - prog_bar.update() - - if tmp_dir is not None: - tmp_dir.cleanup() - - # descending select topk image - pqs = list(sorted(pqs.items(), key=lambda kv: kv[1])) - good_pqs = pqs[-topk:] - bad_pqs = pqs[:topk] - - return good_pqs, bad_pqs - - -def parse_args(): - parser = argparse.ArgumentParser( - description='MMDet eval image prediction result for each') - parser.add_argument('config', help='test config file path') - parser.add_argument( - 'prediction_path', help='prediction path where test pkl result') - parser.add_argument( - 'show_dir', help='directory where painted images will be saved') - parser.add_argument('--show', action='store_true', help='show results') - parser.add_argument( - '--wait-time', - type=float, - default=0, - help='the interval of show (s), 0 is block') - parser.add_argument( - '--topk', - default=20, - type=int, - help='saved Number of the highest topk ' - 'and lowest topk after index sorting') - parser.add_argument( - '--show-score-thr', - type=float, - default=0, - help='score threshold (default: 0.)') - parser.add_argument( - '--overlay-gt-pred', - action='store_true', - help='whether to plot gts and predictions on the same image.' - 'If False, predictions and gts will be plotted on two same' - 'image which will be concatenated in vertical direction.' - 'The image above is drawn with gt, and the image below is' - 'drawn with the prediction result.') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - mmcv.check_file_exist(args.prediction_path) - - cfg = Config.fromfile(args.config) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - cfg.data.test.test_mode = True - - cfg.data.test.pop('samples_per_gpu', 0) - if cfg.data.train.type in ('MultiImageMixDataset', 'ClassBalancedDataset', - 'RepeatDataset', 'ConcatDataset'): - cfg.data.test.pipeline = get_loading_pipeline( - cfg.data.train.dataset.pipeline) - else: - cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline) - - dataset = build_dataset(cfg.data.test) - outputs = mmcv.load(args.prediction_path) - - result_visualizer = ResultVisualizer(args.show, args.wait_time, - args.show_score_thr, - args.overlay_gt_pred) - result_visualizer.evaluate_and_show( - dataset, outputs, topk=args.topk, show_dir=args.show_dir) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/benchmark.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/benchmark.py deleted file mode 100644 index c956968beed9d0239a64b11d86d6dbebf99dd948..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/benchmark.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import copy -import os -import time - -import torch -from mmcv import Config, DictAction -from mmcv.cnn import fuse_conv_bn -from mmcv.parallel import MMDistributedDataParallel -from mmcv.runner import init_dist, load_checkpoint, wrap_fp16_model - -from mmdet.datasets import (build_dataloader, build_dataset, - replace_ImageToTensor) -from mmdet.models import build_detector -from mmdet.utils import replace_cfg_vals, update_data_root - - -def parse_args(): - parser = argparse.ArgumentParser(description='MMDet benchmark a model') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument( - '--repeat-num', - type=int, - default=1, - help='number of repeat times of measurement for averaging the results') - parser.add_argument( - '--max-iter', type=int, default=2000, help='num of max iter') - parser.add_argument( - '--log-interval', type=int, default=50, help='interval of logging') - parser.add_argument( - '--fuse-conv-bn', - action='store_true', - help='Whether to fuse conv and bn, this will slightly increase' - 'the inference speed') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - return args - - -def measure_inference_speed(cfg, checkpoint, max_iter, log_interval, - is_fuse_conv_bn): - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - cfg.model.pretrained = None - cfg.data.test.test_mode = True - - # build the dataloader - samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) - if samples_per_gpu > 1: - # Replace 'ImageToTensor' to 'DefaultFormatBundle' - cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) - dataset = build_dataset(cfg.data.test) - data_loader = build_dataloader( - dataset, - samples_per_gpu=1, - # Because multiple processes will occupy additional CPU resources, - # FPS statistics will be more unstable when workers_per_gpu is not 0. - # It is reasonable to set workers_per_gpu to 0. - workers_per_gpu=0, - dist=True, - shuffle=False) - - # build the model and load checkpoint - cfg.model.train_cfg = None - model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) - fp16_cfg = cfg.get('fp16', None) - if fp16_cfg is not None: - wrap_fp16_model(model) - load_checkpoint(model, checkpoint, map_location='cpu') - if is_fuse_conv_bn: - model = fuse_conv_bn(model) - - model = MMDistributedDataParallel( - model.cuda(), - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False) - model.eval() - - # the first several iterations may be very slow so skip them - num_warmup = 5 - pure_inf_time = 0 - fps = 0 - - # benchmark with 2000 image and take the average - for i, data in enumerate(data_loader): - - torch.cuda.synchronize() - start_time = time.perf_counter() - - with torch.no_grad(): - model(return_loss=False, rescale=True, **data) - - torch.cuda.synchronize() - elapsed = time.perf_counter() - start_time - - if i >= num_warmup: - pure_inf_time += elapsed - if (i + 1) % log_interval == 0: - fps = (i + 1 - num_warmup) / pure_inf_time - print( - f'Done image [{i + 1:<3}/ {max_iter}], ' - f'fps: {fps:.1f} img / s, ' - f'times per image: {1000 / fps:.1f} ms / img', - flush=True) - - if (i + 1) == max_iter: - fps = (i + 1 - num_warmup) / pure_inf_time - print( - f'Overall fps: {fps:.1f} img / s, ' - f'times per image: {1000 / fps:.1f} ms / img', - flush=True) - break - return fps - - -def repeat_measure_inference_speed(cfg, - checkpoint, - max_iter, - log_interval, - is_fuse_conv_bn, - repeat_num=1): - assert repeat_num >= 1 - - fps_list = [] - - for _ in range(repeat_num): - # - cp_cfg = copy.deepcopy(cfg) - - fps_list.append( - measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval, - is_fuse_conv_bn)) - - if repeat_num > 1: - fps_list_ = [round(fps, 1) for fps in fps_list] - times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list] - mean_fps_ = sum(fps_list_) / len(fps_list_) - mean_times_pre_image_ = sum(times_pre_image_list_) / len( - times_pre_image_list_) - print( - f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, ' - f'times per image: ' - f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img', - flush=True) - return fps_list - - return fps_list[0] - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - if args.launcher == 'none': - raise NotImplementedError('Only supports distributed mode') - else: - init_dist(args.launcher, **cfg.dist_params) - - repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter, - args.log_interval, args.fuse_conv_bn, - args.repeat_num) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/coco_error_analysis.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/coco_error_analysis.py deleted file mode 100644 index 102ea4ebb294114199ad2574cde2a374ec4374c9..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/coco_error_analysis.py +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import os -from argparse import ArgumentParser -from multiprocessing import Pool - -import matplotlib.pyplot as plt -import numpy as np -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval - - -def makeplot(rs, ps, outDir, class_name, iou_type): - cs = np.vstack([ - np.ones((2, 3)), - np.array([0.31, 0.51, 0.74]), - np.array([0.75, 0.31, 0.30]), - np.array([0.36, 0.90, 0.38]), - np.array([0.50, 0.39, 0.64]), - np.array([1, 0.6, 0]), - ]) - areaNames = ['allarea', 'small', 'medium', 'large'] - types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN'] - for i in range(len(areaNames)): - area_ps = ps[..., i, 0] - figure_title = iou_type + '-' + class_name + '-' + areaNames[i] - aps = [ps_.mean() for ps_ in area_ps] - ps_curve = [ - ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps - ] - ps_curve.insert(0, np.zeros(ps_curve[0].shape)) - fig = plt.figure() - ax = plt.subplot(111) - for k in range(len(types)): - ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5) - ax.fill_between( - rs, - ps_curve[k], - ps_curve[k + 1], - color=cs[k], - label=str(f'[{aps[k]:.3f}]' + types[k]), - ) - plt.xlabel('recall') - plt.ylabel('precision') - plt.xlim(0, 1.0) - plt.ylim(0, 1.0) - plt.title(figure_title) - plt.legend() - # plt.show() - fig.savefig(outDir + f'/{figure_title}.png') - plt.close(fig) - - -def autolabel(ax, rects): - """Attach a text label above each bar in *rects*, displaying its height.""" - for rect in rects: - height = rect.get_height() - if height > 0 and height <= 1: # for percent values - text_label = '{:2.0f}'.format(height * 100) - else: - text_label = '{:2.0f}'.format(height) - ax.annotate( - text_label, - xy=(rect.get_x() + rect.get_width() / 2, height), - xytext=(0, 3), # 3 points vertical offset - textcoords='offset points', - ha='center', - va='bottom', - fontsize='x-small', - ) - - -def makebarplot(rs, ps, outDir, class_name, iou_type): - areaNames = ['allarea', 'small', 'medium', 'large'] - types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN'] - fig, ax = plt.subplots() - x = np.arange(len(areaNames)) # the areaNames locations - width = 0.60 # the width of the bars - rects_list = [] - figure_title = iou_type + '-' + class_name + '-' + 'ap bar plot' - for i in range(len(types) - 1): - type_ps = ps[i, ..., 0] - aps = [ps_.mean() for ps_ in type_ps.T] - rects_list.append( - ax.bar( - x - width / 2 + (i + 1) * width / len(types), - aps, - width / len(types), - label=types[i], - )) - - # Add some text for labels, title and custom x-axis tick labels, etc. - ax.set_ylabel('Mean Average Precision (mAP)') - ax.set_title(figure_title) - ax.set_xticks(x) - ax.set_xticklabels(areaNames) - ax.legend() - - # Add score texts over bars - for rects in rects_list: - autolabel(ax, rects) - - # Save plot - fig.savefig(outDir + f'/{figure_title}.png') - plt.close(fig) - - -def get_gt_area_group_numbers(cocoEval): - areaRng = cocoEval.params.areaRng - areaRngStr = [str(aRng) for aRng in areaRng] - areaRngLbl = cocoEval.params.areaRngLbl - areaRngStr2areaRngLbl = dict(zip(areaRngStr, areaRngLbl)) - areaRngLbl2Number = dict.fromkeys(areaRngLbl, 0) - for evalImg in cocoEval.evalImgs: - if evalImg: - for gtIgnore in evalImg['gtIgnore']: - if not gtIgnore: - aRngLbl = areaRngStr2areaRngLbl[str(evalImg['aRng'])] - areaRngLbl2Number[aRngLbl] += 1 - return areaRngLbl2Number - - -def make_gt_area_group_numbers_plot(cocoEval, outDir, verbose=True): - areaRngLbl2Number = get_gt_area_group_numbers(cocoEval) - areaRngLbl = areaRngLbl2Number.keys() - if verbose: - print('number of annotations per area group:', areaRngLbl2Number) - - # Init figure - fig, ax = plt.subplots() - x = np.arange(len(areaRngLbl)) # the areaNames locations - width = 0.60 # the width of the bars - figure_title = 'number of annotations per area group' - - rects = ax.bar(x, areaRngLbl2Number.values(), width) - - # Add some text for labels, title and custom x-axis tick labels, etc. - ax.set_ylabel('Number of annotations') - ax.set_title(figure_title) - ax.set_xticks(x) - ax.set_xticklabels(areaRngLbl) - - # Add score texts over bars - autolabel(ax, rects) - - # Save plot - fig.tight_layout() - fig.savefig(outDir + f'/{figure_title}.png') - plt.close(fig) - - -def make_gt_area_histogram_plot(cocoEval, outDir): - n_bins = 100 - areas = [ann['area'] for ann in cocoEval.cocoGt.anns.values()] - - # init figure - figure_title = 'gt annotation areas histogram plot' - fig, ax = plt.subplots() - - # Set the number of bins - ax.hist(np.sqrt(areas), bins=n_bins) - - # Add some text for labels, title and custom x-axis tick labels, etc. - ax.set_xlabel('Squareroot Area') - ax.set_ylabel('Number of annotations') - ax.set_title(figure_title) - - # Save plot - fig.tight_layout() - fig.savefig(outDir + f'/{figure_title}.png') - plt.close(fig) - - -def analyze_individual_category(k, - cocoDt, - cocoGt, - catId, - iou_type, - areas=None): - nm = cocoGt.loadCats(catId)[0] - print(f'--------------analyzing {k + 1}-{nm["name"]}---------------') - ps_ = {} - dt = copy.deepcopy(cocoDt) - nm = cocoGt.loadCats(catId)[0] - imgIds = cocoGt.getImgIds() - dt_anns = dt.dataset['annotations'] - select_dt_anns = [] - for ann in dt_anns: - if ann['category_id'] == catId: - select_dt_anns.append(ann) - dt.dataset['annotations'] = select_dt_anns - dt.createIndex() - # compute precision but ignore superclass confusion - gt = copy.deepcopy(cocoGt) - child_catIds = gt.getCatIds(supNms=[nm['supercategory']]) - for idx, ann in enumerate(gt.dataset['annotations']): - if ann['category_id'] in child_catIds and ann['category_id'] != catId: - gt.dataset['annotations'][idx]['ignore'] = 1 - gt.dataset['annotations'][idx]['iscrowd'] = 1 - gt.dataset['annotations'][idx]['category_id'] = catId - cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type) - cocoEval.params.imgIds = imgIds - cocoEval.params.maxDets = [100] - cocoEval.params.iouThrs = [0.1] - cocoEval.params.useCats = 1 - if areas: - cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], - [areas[0], areas[1]], [areas[1], areas[2]]] - cocoEval.evaluate() - cocoEval.accumulate() - ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :] - ps_['ps_supercategory'] = ps_supercategory - # compute precision but ignore any class confusion - gt = copy.deepcopy(cocoGt) - for idx, ann in enumerate(gt.dataset['annotations']): - if ann['category_id'] != catId: - gt.dataset['annotations'][idx]['ignore'] = 1 - gt.dataset['annotations'][idx]['iscrowd'] = 1 - gt.dataset['annotations'][idx]['category_id'] = catId - cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type) - cocoEval.params.imgIds = imgIds - cocoEval.params.maxDets = [100] - cocoEval.params.iouThrs = [0.1] - cocoEval.params.useCats = 1 - if areas: - cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], - [areas[0], areas[1]], [areas[1], areas[2]]] - cocoEval.evaluate() - cocoEval.accumulate() - ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :] - ps_['ps_allcategory'] = ps_allcategory - return k, ps_ - - -def analyze_results(res_file, - ann_file, - res_types, - out_dir, - extraplots=None, - areas=None): - for res_type in res_types: - assert res_type in ['bbox', 'segm'] - if areas: - assert len(areas) == 3, '3 integers should be specified as areas, \ - representing 3 area regions' - - directory = os.path.dirname(out_dir + '/') - if not os.path.exists(directory): - print(f'-------------create {out_dir}-----------------') - os.makedirs(directory) - - cocoGt = COCO(ann_file) - cocoDt = cocoGt.loadRes(res_file) - imgIds = cocoGt.getImgIds() - for res_type in res_types: - res_out_dir = out_dir + '/' + res_type + '/' - res_directory = os.path.dirname(res_out_dir) - if not os.path.exists(res_directory): - print(f'-------------create {res_out_dir}-----------------') - os.makedirs(res_directory) - iou_type = res_type - cocoEval = COCOeval( - copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type) - cocoEval.params.imgIds = imgIds - cocoEval.params.iouThrs = [0.75, 0.5, 0.1] - cocoEval.params.maxDets = [100] - if areas: - cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]], - [areas[0], areas[1]], - [areas[1], areas[2]]] - cocoEval.evaluate() - cocoEval.accumulate() - ps = cocoEval.eval['precision'] - ps = np.vstack([ps, np.zeros((4, *ps.shape[1:]))]) - catIds = cocoGt.getCatIds() - recThrs = cocoEval.params.recThrs - with Pool(processes=48) as pool: - args = [(k, cocoDt, cocoGt, catId, iou_type, areas) - for k, catId in enumerate(catIds)] - analyze_results = pool.starmap(analyze_individual_category, args) - for k, catId in enumerate(catIds): - nm = cocoGt.loadCats(catId)[0] - print(f'--------------saving {k + 1}-{nm["name"]}---------------') - analyze_result = analyze_results[k] - assert k == analyze_result[0] - ps_supercategory = analyze_result[1]['ps_supercategory'] - ps_allcategory = analyze_result[1]['ps_allcategory'] - # compute precision but ignore superclass confusion - ps[3, :, k, :, :] = ps_supercategory - # compute precision but ignore any class confusion - ps[4, :, k, :, :] = ps_allcategory - # fill in background and false negative errors and plot - ps[ps == -1] = 0 - ps[5, :, k, :, :] = ps[4, :, k, :, :] > 0 - ps[6, :, k, :, :] = 1.0 - makeplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type) - if extraplots: - makebarplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], - iou_type) - makeplot(recThrs, ps, res_out_dir, 'allclass', iou_type) - if extraplots: - makebarplot(recThrs, ps, res_out_dir, 'allclass', iou_type) - make_gt_area_group_numbers_plot( - cocoEval=cocoEval, outDir=res_out_dir, verbose=True) - make_gt_area_histogram_plot(cocoEval=cocoEval, outDir=res_out_dir) - - -def main(): - parser = ArgumentParser(description='COCO Error Analysis Tool') - parser.add_argument('result', help='result file (json format) path') - parser.add_argument('out_dir', help='dir to save analyze result images') - parser.add_argument( - '--ann', - default='data/coco/annotations/instances_val2017.json', - help='annotation file path') - parser.add_argument( - '--types', type=str, nargs='+', default=['bbox'], help='result types') - parser.add_argument( - '--extraplots', - action='store_true', - help='export extra bar/stat plots') - parser.add_argument( - '--areas', - type=int, - nargs='+', - default=[1024, 9216, 10000000000], - help='area regions') - args = parser.parse_args() - analyze_results( - args.result, - args.ann, - args.types, - out_dir=args.out_dir, - extraplots=args.extraplots, - areas=args.areas) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/confusion_matrix.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/confusion_matrix.py deleted file mode 100644 index 5b52ea4c0ff4321ad2e26f1baa3c66a027c9615f..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/confusion_matrix.py +++ /dev/null @@ -1,273 +0,0 @@ -import argparse -import os - -import matplotlib.pyplot as plt -import mmcv -import numpy as np -from matplotlib.ticker import MultipleLocator -from mmcv import Config, DictAction -from mmcv.ops import nms - -from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps -from mmdet.datasets import build_dataset -from mmdet.utils import replace_cfg_vals, update_data_root - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Generate confusion matrix from detection results') - parser.add_argument('config', help='test config file path') - parser.add_argument( - 'prediction_path', help='prediction path where test .pkl result') - parser.add_argument( - 'save_dir', help='directory where confusion matrix will be saved') - parser.add_argument( - '--show', action='store_true', help='show confusion matrix') - parser.add_argument( - '--color-theme', - default='plasma', - help='theme of the matrix color map') - parser.add_argument( - '--score-thr', - type=float, - default=0.3, - help='score threshold to filter detection bboxes') - parser.add_argument( - '--tp-iou-thr', - type=float, - default=0.5, - help='IoU threshold to be considered as matched') - parser.add_argument( - '--nms-iou-thr', - type=float, - default=None, - help='nms IoU threshold, only applied when users want to change the' - 'nms IoU threshold.') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - return args - - -def calculate_confusion_matrix(dataset, - results, - score_thr=0, - nms_iou_thr=None, - tp_iou_thr=0.5): - """Calculate the confusion matrix. - - Args: - dataset (Dataset): Test or val dataset. - results (list[ndarray]): A list of detection results in each image. - score_thr (float|optional): Score threshold to filter bboxes. - Default: 0. - nms_iou_thr (float|optional): nms IoU threshold, the detection results - have done nms in the detector, only applied when users want to - change the nms IoU threshold. Default: None. - tp_iou_thr (float|optional): IoU threshold to be considered as matched. - Default: 0.5. - """ - num_classes = len(dataset.CLASSES) - confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1]) - assert len(dataset) == len(results) - prog_bar = mmcv.ProgressBar(len(results)) - for idx, per_img_res in enumerate(results): - if isinstance(per_img_res, tuple): - res_bboxes, _ = per_img_res - else: - res_bboxes = per_img_res - ann = dataset.get_ann_info(idx) - gt_bboxes = ann['bboxes'] - labels = ann['labels'] - analyze_per_img_dets(confusion_matrix, gt_bboxes, labels, res_bboxes, - score_thr, tp_iou_thr, nms_iou_thr) - prog_bar.update() - return confusion_matrix - - -def analyze_per_img_dets(confusion_matrix, - gt_bboxes, - gt_labels, - result, - score_thr=0, - tp_iou_thr=0.5, - nms_iou_thr=None): - """Analyze detection results on each image. - - Args: - confusion_matrix (ndarray): The confusion matrix, - has shape (num_classes + 1, num_classes + 1). - gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4). - gt_labels (ndarray): Ground truth labels, has shape (num_gt). - result (ndarray): Detection results, has shape - (num_classes, num_bboxes, 5). - score_thr (float): Score threshold to filter bboxes. - Default: 0. - tp_iou_thr (float): IoU threshold to be considered as matched. - Default: 0.5. - nms_iou_thr (float|optional): nms IoU threshold, the detection results - have done nms in the detector, only applied when users want to - change the nms IoU threshold. Default: None. - """ - true_positives = np.zeros_like(gt_labels) - for det_label, det_bboxes in enumerate(result): - if nms_iou_thr: - det_bboxes, _ = nms( - det_bboxes[:, :4], - det_bboxes[:, -1], - nms_iou_thr, - score_threshold=score_thr) - ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes) - for i, det_bbox in enumerate(det_bboxes): - score = det_bbox[4] - det_match = 0 - if score >= score_thr: - for j, gt_label in enumerate(gt_labels): - if ious[i, j] >= tp_iou_thr: - det_match += 1 - if gt_label == det_label: - true_positives[j] += 1 # TP - confusion_matrix[gt_label, det_label] += 1 - if det_match == 0: # BG FP - confusion_matrix[-1, det_label] += 1 - for num_tp, gt_label in zip(true_positives, gt_labels): - if num_tp == 0: # FN - confusion_matrix[gt_label, -1] += 1 - - -def plot_confusion_matrix(confusion_matrix, - labels, - save_dir=None, - show=True, - title='Normalized Confusion Matrix', - color_theme='plasma'): - """Draw confusion matrix with matplotlib. - - Args: - confusion_matrix (ndarray): The confusion matrix. - labels (list[str]): List of class names. - save_dir (str|optional): If set, save the confusion matrix plot to the - given path. Default: None. - show (bool): Whether to show the plot. Default: True. - title (str): Title of the plot. Default: `Normalized Confusion Matrix`. - color_theme (str): Theme of the matrix color map. Default: `plasma`. - """ - # normalize the confusion matrix - per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis] - confusion_matrix = \ - confusion_matrix.astype(np.float32) / per_label_sums * 100 - - num_classes = len(labels) - fig, ax = plt.subplots( - figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180) - cmap = plt.get_cmap(color_theme) - im = ax.imshow(confusion_matrix, cmap=cmap) - plt.colorbar(mappable=im, ax=ax) - - title_font = {'weight': 'bold', 'size': 12} - ax.set_title(title, fontdict=title_font) - label_font = {'size': 10} - plt.ylabel('Ground Truth Label', fontdict=label_font) - plt.xlabel('Prediction Label', fontdict=label_font) - - # draw locator - xmajor_locator = MultipleLocator(1) - xminor_locator = MultipleLocator(0.5) - ax.xaxis.set_major_locator(xmajor_locator) - ax.xaxis.set_minor_locator(xminor_locator) - ymajor_locator = MultipleLocator(1) - yminor_locator = MultipleLocator(0.5) - ax.yaxis.set_major_locator(ymajor_locator) - ax.yaxis.set_minor_locator(yminor_locator) - - # draw grid - ax.grid(True, which='minor', linestyle='-') - - # draw label - ax.set_xticks(np.arange(num_classes)) - ax.set_yticks(np.arange(num_classes)) - ax.set_xticklabels(labels) - ax.set_yticklabels(labels) - - ax.tick_params( - axis='x', bottom=False, top=True, labelbottom=False, labeltop=True) - plt.setp( - ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor') - - # draw confution matrix value - for i in range(num_classes): - for j in range(num_classes): - ax.text( - j, - i, - '{}%'.format( - int(confusion_matrix[ - i, - j]) if not np.isnan(confusion_matrix[i, j]) else -1), - ha='center', - va='center', - color='w', - size=7) - - ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1 - - fig.tight_layout() - if save_dir is not None: - plt.savefig( - os.path.join(save_dir, 'confusion_matrix.png'), format='png') - if show: - plt.show() - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - results = mmcv.load(args.prediction_path) - assert isinstance(results, list) - if isinstance(results[0], list): - pass - elif isinstance(results[0], tuple): - results = [result[0] for result in results] - else: - raise TypeError('invalid type of prediction results') - - if isinstance(cfg.data.test, dict): - cfg.data.test.test_mode = True - elif isinstance(cfg.data.test, list): - for ds_cfg in cfg.data.test: - ds_cfg.test_mode = True - dataset = build_dataset(cfg.data.test) - - confusion_matrix = calculate_confusion_matrix(dataset, results, - args.score_thr, - args.nms_iou_thr, - args.tp_iou_thr) - plot_confusion_matrix( - confusion_matrix, - dataset.CLASSES + ('background', ), - save_dir=args.save_dir, - show=args.show, - color_theme=args.color_theme) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/eval_metric.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/eval_metric.py deleted file mode 100644 index 7caafe99df0f08c92e160c9878667d0e6b22b690..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/eval_metric.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse - -import mmcv -from mmcv import Config, DictAction - -from mmdet.datasets import build_dataset -from mmdet.utils import replace_cfg_vals, update_data_root - - -def parse_args(): - parser = argparse.ArgumentParser(description='Evaluate metric of the ' - 'results saved in pkl format') - parser.add_argument('config', help='Config of the model') - parser.add_argument('pkl_results', help='Results in pickle format') - parser.add_argument( - '--format-only', - action='store_true', - help='Format the output results without perform evaluation. It is' - 'useful when you want to format the result to a specific format and ' - 'submit it to the test server') - parser.add_argument( - '--eval', - type=str, - nargs='+', - help='Evaluation metrics, which depends on the dataset, e.g., "bbox",' - ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--eval-options', - nargs='+', - action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - assert args.eval or args.format_only, ( - 'Please specify at least one operation (eval/format the results) with ' - 'the argument "--eval", "--format-only"') - if args.eval and args.format_only: - raise ValueError('--eval and --format_only cannot be both specified') - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - cfg.data.test.test_mode = True - - dataset = build_dataset(cfg.data.test) - outputs = mmcv.load(args.pkl_results) - - kwargs = {} if args.eval_options is None else args.eval_options - if args.format_only: - dataset.format_results(outputs, **kwargs) - if args.eval: - eval_kwargs = cfg.get('evaluation', {}).copy() - # hard-code way to remove EvalHook args - for key in [ - 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', - 'rule' - ]: - eval_kwargs.pop(key, None) - eval_kwargs.update(dict(metric=args.eval, **kwargs)) - print(dataset.evaluate(outputs, **eval_kwargs)) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/get_flops.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/get_flops.py deleted file mode 100644 index 4df87323b4aa85fd437f3dc13e12c35817b40616..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/get_flops.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse - -import numpy as np -import torch -from mmcv import Config, DictAction - -from mmdet.models import build_detector - -try: - from mmcv.cnn import get_model_complexity_info -except ImportError: - raise ImportError('Please upgrade mmcv to >0.6.2') - - -def parse_args(): - parser = argparse.ArgumentParser(description='Train a detector') - parser.add_argument('config', help='train config file path') - parser.add_argument( - '--shape', - type=int, - nargs='+', - default=[1280, 800], - help='input image size') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--size-divisor', - type=int, - default=32, - help='Pad the input image, the minimum size that is divisible ' - 'by size_divisor, -1 means do not pad the image.') - args = parser.parse_args() - return args - - -def main(): - - args = parse_args() - - if len(args.shape) == 1: - h = w = args.shape[0] - elif len(args.shape) == 2: - h, w = args.shape - else: - raise ValueError('invalid input shape') - ori_shape = (3, h, w) - divisor = args.size_divisor - if divisor > 0: - h = int(np.ceil(h / divisor)) * divisor - w = int(np.ceil(w / divisor)) * divisor - - input_shape = (3, h, w) - - cfg = Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - model = build_detector( - cfg.model, - train_cfg=cfg.get('train_cfg'), - test_cfg=cfg.get('test_cfg')) - if torch.cuda.is_available(): - model.cuda() - model.eval() - - if hasattr(model, 'forward_dummy'): - model.forward = model.forward_dummy - else: - raise NotImplementedError( - 'FLOPs counter is currently not currently supported with {}'. - format(model.__class__.__name__)) - - flops, params = get_model_complexity_info(model, input_shape) - split_line = '=' * 30 - - if divisor > 0 and \ - input_shape != ori_shape: - print(f'{split_line}\nUse size divisor set input shape ' - f'from {ori_shape} to {input_shape}\n') - print(f'{split_line}\nInput shape: {input_shape}\n' - f'Flops: {flops}\nParams: {params}\n{split_line}') - print('!!!Please be cautious if you use the results in papers. ' - 'You may need to check if all ops are supported and verify that the ' - 'flops computation is correct.') - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/optimize_anchors.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/optimize_anchors.py deleted file mode 100644 index 421998f945ddcc44b6c1a8666fa84ce786631a26..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/optimize_anchors.py +++ /dev/null @@ -1,376 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""Optimize anchor settings on a specific dataset. - -This script provides two method to optimize YOLO anchors including k-means -anchor cluster and differential evolution. You can use ``--algorithm k-means`` -and ``--algorithm differential_evolution`` to switch two method. - -Example: - Use k-means anchor cluster:: - - python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ - --algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ - --output-dir ${OUTPUT_DIR} - Use differential evolution to optimize anchors:: - - python tools/analysis_tools/optimize_anchors.py ${CONFIG} \ - --algorithm differential_evolution \ - --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \ - --output-dir ${OUTPUT_DIR} -""" -import argparse -import os.path as osp - -import mmcv -import numpy as np -import torch -from mmcv import Config -from scipy.optimize import differential_evolution - -from mmdet.core import bbox_cxcywh_to_xyxy, bbox_overlaps, bbox_xyxy_to_cxcywh -from mmdet.datasets import build_dataset -from mmdet.utils import get_root_logger, replace_cfg_vals, update_data_root - - -def parse_args(): - parser = argparse.ArgumentParser(description='Optimize anchor parameters.') - parser.add_argument('config', help='Train config file path.') - parser.add_argument( - '--device', default='cuda:0', help='Device used for calculating.') - parser.add_argument( - '--input-shape', - type=int, - nargs='+', - default=[608, 608], - help='input image size') - parser.add_argument( - '--algorithm', - default='differential_evolution', - help='Algorithm used for anchor optimizing.' - 'Support k-means and differential_evolution for YOLO.') - parser.add_argument( - '--iters', - default=1000, - type=int, - help='Maximum iterations for optimizer.') - parser.add_argument( - '--output-dir', - default=None, - type=str, - help='Path to save anchor optimize result.') - - args = parser.parse_args() - return args - - -class BaseAnchorOptimizer: - """Base class for anchor optimizer. - - Args: - dataset (obj:`Dataset`): Dataset object. - input_shape (list[int]): Input image shape of the model. - Format in [width, height]. - logger (obj:`logging.Logger`): The logger for logging. - device (str, optional): Device used for calculating. - Default: 'cuda:0' - out_dir (str, optional): Path to save anchor optimize result. - Default: None - """ - - def __init__(self, - dataset, - input_shape, - logger, - device='cuda:0', - out_dir=None): - self.dataset = dataset - self.input_shape = input_shape - self.logger = logger - self.device = device - self.out_dir = out_dir - bbox_whs, img_shapes = self.get_whs_and_shapes() - ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape]) - - # resize to input shape - self.bbox_whs = bbox_whs / ratios - - def get_whs_and_shapes(self): - """Get widths and heights of bboxes and shapes of images. - - Returns: - tuple[np.ndarray]: Array of bbox shapes and array of image - shapes with shape (num_bboxes, 2) in [width, height] format. - """ - self.logger.info('Collecting bboxes from annotation...') - bbox_whs = [] - img_shapes = [] - prog_bar = mmcv.ProgressBar(len(self.dataset)) - for idx in range(len(self.dataset)): - ann = self.dataset.get_ann_info(idx) - data_info = self.dataset.data_infos[idx] - img_shape = np.array([data_info['width'], data_info['height']]) - gt_bboxes = ann['bboxes'] - for bbox in gt_bboxes: - wh = bbox[2:4] - bbox[0:2] - img_shapes.append(img_shape) - bbox_whs.append(wh) - prog_bar.update() - print('\n') - bbox_whs = np.array(bbox_whs) - img_shapes = np.array(img_shapes) - self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.') - return bbox_whs, img_shapes - - def get_zero_center_bbox_tensor(self): - """Get a tensor of bboxes centered at (0, 0). - - Returns: - Tensor: Tensor of bboxes with shape (num_bboxes, 4) - in [xmin, ymin, xmax, ymax] format. - """ - whs = torch.from_numpy(self.bbox_whs).to( - self.device, dtype=torch.float32) - bboxes = bbox_cxcywh_to_xyxy( - torch.cat([torch.zeros_like(whs), whs], dim=1)) - return bboxes - - def optimize(self): - raise NotImplementedError - - def save_result(self, anchors, path=None): - anchor_results = [] - for w, h in anchors: - anchor_results.append([round(w), round(h)]) - self.logger.info(f'Anchor optimize result:{anchor_results}') - if path: - json_path = osp.join(path, 'anchor_optimize_result.json') - mmcv.dump(anchor_results, json_path) - self.logger.info(f'Result saved in {json_path}') - - -class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer): - r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet. - `_. - - Args: - num_anchors (int) : Number of anchors. - iters (int): Maximum iterations for k-means. - """ - - def __init__(self, num_anchors, iters, **kwargs): - - super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs) - self.num_anchors = num_anchors - self.iters = iters - - def optimize(self): - anchors = self.kmeans_anchors() - self.save_result(anchors, self.out_dir) - - def kmeans_anchors(self): - self.logger.info( - f'Start cluster {self.num_anchors} YOLO anchors with K-means...') - bboxes = self.get_zero_center_bbox_tensor() - cluster_center_idx = torch.randint( - 0, bboxes.shape[0], (self.num_anchors, )).to(self.device) - - assignments = torch.zeros((bboxes.shape[0], )).to(self.device) - cluster_centers = bboxes[cluster_center_idx] - if self.num_anchors == 1: - cluster_centers = self.kmeans_maximization(bboxes, assignments, - cluster_centers) - anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() - anchors = sorted(anchors, key=lambda x: x[0] * x[1]) - return anchors - - prog_bar = mmcv.ProgressBar(self.iters) - for i in range(self.iters): - converged, assignments = self.kmeans_expectation( - bboxes, assignments, cluster_centers) - if converged: - self.logger.info(f'K-means process has converged at iter {i}.') - break - cluster_centers = self.kmeans_maximization(bboxes, assignments, - cluster_centers) - prog_bar.update() - print('\n') - avg_iou = bbox_overlaps(bboxes, - cluster_centers).max(1)[0].mean().item() - - anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy() - anchors = sorted(anchors, key=lambda x: x[0] * x[1]) - self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}') - - return anchors - - def kmeans_maximization(self, bboxes, assignments, centers): - """Maximization part of EM algorithm(Expectation-Maximization)""" - new_centers = torch.zeros_like(centers) - for i in range(centers.shape[0]): - mask = (assignments == i) - if mask.sum(): - new_centers[i, :] = bboxes[mask].mean(0) - return new_centers - - def kmeans_expectation(self, bboxes, assignments, centers): - """Expectation part of EM algorithm(Expectation-Maximization)""" - ious = bbox_overlaps(bboxes, centers) - closest = ious.argmax(1) - converged = (closest == assignments).all() - return converged, closest - - -class YOLODEAnchorOptimizer(BaseAnchorOptimizer): - """YOLO anchor optimizer using differential evolution algorithm. - - Args: - num_anchors (int) : Number of anchors. - iters (int): Maximum iterations for k-means. - strategy (str): The differential evolution strategy to use. - Should be one of: - - - 'best1bin' - - 'best1exp' - - 'rand1exp' - - 'randtobest1exp' - - 'currenttobest1exp' - - 'best2exp' - - 'rand2exp' - - 'randtobest1bin' - - 'currenttobest1bin' - - 'best2bin' - - 'rand2bin' - - 'rand1bin' - - Default: 'best1bin'. - population_size (int): Total population size of evolution algorithm. - Default: 15. - convergence_thr (float): Tolerance for convergence, the - optimizing stops when ``np.std(pop) <= abs(convergence_thr) - + convergence_thr * np.abs(np.mean(population_energies))``, - respectively. Default: 0.0001. - mutation (tuple[float]): Range of dithering randomly changes the - mutation constant. Default: (0.5, 1). - recombination (float): Recombination constant of crossover probability. - Default: 0.7. - """ - - def __init__(self, - num_anchors, - iters, - strategy='best1bin', - population_size=15, - convergence_thr=0.0001, - mutation=(0.5, 1), - recombination=0.7, - **kwargs): - - super(YOLODEAnchorOptimizer, self).__init__(**kwargs) - - self.num_anchors = num_anchors - self.iters = iters - self.strategy = strategy - self.population_size = population_size - self.convergence_thr = convergence_thr - self.mutation = mutation - self.recombination = recombination - - def optimize(self): - anchors = self.differential_evolution() - self.save_result(anchors, self.out_dir) - - def differential_evolution(self): - bboxes = self.get_zero_center_bbox_tensor() - - bounds = [] - for i in range(self.num_anchors): - bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])]) - - result = differential_evolution( - func=self.avg_iou_cost, - bounds=bounds, - args=(bboxes, ), - strategy=self.strategy, - maxiter=self.iters, - popsize=self.population_size, - tol=self.convergence_thr, - mutation=self.mutation, - recombination=self.recombination, - updating='immediate', - disp=True) - self.logger.info( - f'Anchor evolution finish. Average IOU: {1 - result.fun}') - anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])] - anchors = sorted(anchors, key=lambda x: x[0] * x[1]) - return anchors - - @staticmethod - def avg_iou_cost(anchor_params, bboxes): - assert len(anchor_params) % 2 == 0 - anchor_whs = torch.tensor( - [[w, h] - for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to( - bboxes.device, dtype=bboxes.dtype) - anchor_boxes = bbox_cxcywh_to_xyxy( - torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1)) - ious = bbox_overlaps(bboxes, anchor_boxes) - max_ious, _ = ious.max(1) - cost = 1 - max_ious.mean().item() - return cost - - -def main(): - logger = get_root_logger() - args = parse_args() - cfg = args.config - cfg = Config.fromfile(cfg) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - input_shape = args.input_shape - assert len(input_shape) == 2 - - anchor_type = cfg.model.bbox_head.anchor_generator.type - assert anchor_type == 'YOLOAnchorGenerator', \ - f'Only support optimize YOLOAnchor, but get {anchor_type}.' - - base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes - num_anchors = sum([len(sizes) for sizes in base_sizes]) - - train_data_cfg = cfg.data.train - while 'dataset' in train_data_cfg: - train_data_cfg = train_data_cfg['dataset'] - dataset = build_dataset(train_data_cfg) - - if args.algorithm == 'k-means': - optimizer = YOLOKMeansAnchorOptimizer( - dataset=dataset, - input_shape=input_shape, - device=args.device, - num_anchors=num_anchors, - iters=args.iters, - logger=logger, - out_dir=args.output_dir) - elif args.algorithm == 'differential_evolution': - optimizer = YOLODEAnchorOptimizer( - dataset=dataset, - input_shape=input_shape, - device=args.device, - num_anchors=num_anchors, - iters=args.iters, - logger=logger, - out_dir=args.output_dir) - else: - raise NotImplementedError( - f'Only support k-means and differential_evolution, ' - f'but get {args.algorithm}') - - optimizer.optimize() - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/robustness_eval.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/robustness_eval.py deleted file mode 100644 index da5ec289243b8740605909338fd2503d30858ceb..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/robustness_eval.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from argparse import ArgumentParser - -import mmcv -import numpy as np - - -def print_coco_results(results): - - def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100): - titleStr = 'Average Precision' if ap == 1 else 'Average Recall' - typeStr = '(AP)' if ap == 1 else '(AR)' - iouStr = '0.50:0.95' \ - if iouThr is None else f'{iouThr:0.2f}' - iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | ' - iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}' - print(iStr) - - stats = np.zeros((12, )) - stats[0] = _print(results[0], 1) - stats[1] = _print(results[1], 1, iouThr=.5) - stats[2] = _print(results[2], 1, iouThr=.75) - stats[3] = _print(results[3], 1, areaRng='small') - stats[4] = _print(results[4], 1, areaRng='medium') - stats[5] = _print(results[5], 1, areaRng='large') - stats[6] = _print(results[6], 0, maxDets=1) - stats[7] = _print(results[7], 0, maxDets=10) - stats[8] = _print(results[8], 0) - stats[9] = _print(results[9], 0, areaRng='small') - stats[10] = _print(results[10], 0, areaRng='medium') - stats[11] = _print(results[11], 0, areaRng='large') - - -def get_coco_style_results(filename, - task='bbox', - metric=None, - prints='mPC', - aggregate='benchmark'): - - assert aggregate in ['benchmark', 'all'] - - if prints == 'all': - prints = ['P', 'mPC', 'rPC'] - elif isinstance(prints, str): - prints = [prints] - for p in prints: - assert p in ['P', 'mPC', 'rPC'] - - if metric is None: - metrics = [ - 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', - 'ARs', 'ARm', 'ARl' - ] - elif isinstance(metric, list): - metrics = metric - else: - metrics = [metric] - - for metric_name in metrics: - assert metric_name in [ - 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', - 'ARs', 'ARm', 'ARl' - ] - - eval_output = mmcv.load(filename) - - num_distortions = len(list(eval_output.keys())) - results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32') - - for corr_i, distortion in enumerate(eval_output): - for severity in eval_output[distortion]: - for metric_j, metric_name in enumerate(metrics): - mAP = eval_output[distortion][severity][task][metric_name] - results[corr_i, severity, metric_j] = mAP - - P = results[0, 0, :] - if aggregate == 'benchmark': - mPC = np.mean(results[:15, 1:, :], axis=(0, 1)) - else: - mPC = np.mean(results[:, 1:, :], axis=(0, 1)) - rPC = mPC / P - - print(f'\nmodel: {osp.basename(filename)}') - if metric is None: - if 'P' in prints: - print(f'Performance on Clean Data [P] ({task})') - print_coco_results(P) - if 'mPC' in prints: - print(f'Mean Performance under Corruption [mPC] ({task})') - print_coco_results(mPC) - if 'rPC' in prints: - print(f'Relative Performance under Corruption [rPC] ({task})') - print_coco_results(rPC) - else: - if 'P' in prints: - print(f'Performance on Clean Data [P] ({task})') - for metric_i, metric_name in enumerate(metrics): - print(f'{metric_name:5} = {P[metric_i]:0.3f}') - if 'mPC' in prints: - print(f'Mean Performance under Corruption [mPC] ({task})') - for metric_i, metric_name in enumerate(metrics): - print(f'{metric_name:5} = {mPC[metric_i]:0.3f}') - if 'rPC' in prints: - print(f'Relative Performance under Corruption [rPC] ({task})') - for metric_i, metric_name in enumerate(metrics): - print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %') - - return results - - -def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'): - - assert aggregate in ['benchmark', 'all'] - - if prints == 'all': - prints = ['P', 'mPC', 'rPC'] - elif isinstance(prints, str): - prints = [prints] - for p in prints: - assert p in ['P', 'mPC', 'rPC'] - - eval_output = mmcv.load(filename) - - num_distortions = len(list(eval_output.keys())) - results = np.zeros((num_distortions, 6, 20), dtype='float32') - - for i, distortion in enumerate(eval_output): - for severity in eval_output[distortion]: - mAP = [ - eval_output[distortion][severity][j]['ap'] - for j in range(len(eval_output[distortion][severity])) - ] - results[i, severity, :] = mAP - - P = results[0, 0, :] - if aggregate == 'benchmark': - mPC = np.mean(results[:15, 1:, :], axis=(0, 1)) - else: - mPC = np.mean(results[:, 1:, :], axis=(0, 1)) - rPC = mPC / P - - print(f'\nmodel: {osp.basename(filename)}') - if 'P' in prints: - print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}') - if 'mPC' in prints: - print('Mean Performance under Corruption [mPC] in AP50 = ' - f'{np.mean(mPC):0.3f}') - if 'rPC' in prints: - print('Relative Performance under Corruption [rPC] in % = ' - f'{np.mean(rPC) * 100:0.1f}') - - return np.mean(results, axis=2, keepdims=True) - - -def get_results(filename, - dataset='coco', - task='bbox', - metric=None, - prints='mPC', - aggregate='benchmark'): - assert dataset in ['coco', 'voc', 'cityscapes'] - - if dataset in ['coco', 'cityscapes']: - results = get_coco_style_results( - filename, - task=task, - metric=metric, - prints=prints, - aggregate=aggregate) - elif dataset == 'voc': - if task != 'bbox': - print('Only bbox analysis is supported for Pascal VOC') - print('Will report bbox results\n') - if metric not in [None, ['AP'], ['AP50']]: - print('Only the AP50 metric is supported for Pascal VOC') - print('Will report AP50 metric\n') - results = get_voc_style_results( - filename, prints=prints, aggregate=aggregate) - - return results - - -def get_distortions_from_file(filename): - - eval_output = mmcv.load(filename) - - return get_distortions_from_results(eval_output) - - -def get_distortions_from_results(eval_output): - distortions = [] - for i, distortion in enumerate(eval_output): - distortions.append(distortion.replace('_', ' ')) - return distortions - - -def main(): - parser = ArgumentParser(description='Corruption Result Analysis') - parser.add_argument('filename', help='result file path') - parser.add_argument( - '--dataset', - type=str, - choices=['coco', 'voc', 'cityscapes'], - default='coco', - help='dataset type') - parser.add_argument( - '--task', - type=str, - nargs='+', - choices=['bbox', 'segm'], - default=['bbox'], - help='task to report') - parser.add_argument( - '--metric', - nargs='+', - choices=[ - None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', - 'AR100', 'ARs', 'ARm', 'ARl' - ], - default=None, - help='metric to report') - parser.add_argument( - '--prints', - type=str, - nargs='+', - choices=['P', 'mPC', 'rPC'], - default='mPC', - help='corruption benchmark metric to print') - parser.add_argument( - '--aggregate', - type=str, - choices=['all', 'benchmark'], - default='benchmark', - help='aggregate all results or only those \ - for benchmark corruptions') - - args = parser.parse_args() - - for task in args.task: - get_results( - args.filename, - dataset=args.dataset, - task=task, - metric=args.metric, - prints=args.prints, - aggregate=args.aggregate) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/analysis_tools/test_robustness.py b/cv/detection/co-detr/pytorch/tools/analysis_tools/test_robustness.py deleted file mode 100644 index 0c1ddbeec545297544b5c29c5eca49a777367f79..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/analysis_tools/test_robustness.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import copy -import os -import os.path as osp - -import mmcv -import torch -from mmcv import DictAction -from mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, - wrap_fp16_model) -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval - -from mmdet import datasets -from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test -from mmdet.core import eval_map -from mmdet.datasets import build_dataloader, build_dataset -from mmdet.models import build_detector -from tools.analysis_tools.robustness_eval import get_results - - -def coco_eval_with_return(result_files, - result_types, - coco, - max_dets=(100, 300, 1000)): - for res_type in result_types: - assert res_type in ['proposal', 'bbox', 'segm', 'keypoints'] - - if mmcv.is_str(coco): - coco = COCO(coco) - assert isinstance(coco, COCO) - - eval_results = {} - for res_type in result_types: - result_file = result_files[res_type] - assert result_file.endswith('.json') - - coco_dets = coco.loadRes(result_file) - img_ids = coco.getImgIds() - iou_type = 'bbox' if res_type == 'proposal' else res_type - cocoEval = COCOeval(coco, coco_dets, iou_type) - cocoEval.params.imgIds = img_ids - if res_type == 'proposal': - cocoEval.params.useCats = 0 - cocoEval.params.maxDets = list(max_dets) - cocoEval.evaluate() - cocoEval.accumulate() - cocoEval.summarize() - if res_type == 'segm' or res_type == 'bbox': - metric_names = [ - 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', - 'AR100', 'ARs', 'ARm', 'ARl' - ] - eval_results[res_type] = { - metric_names[i]: cocoEval.stats[i] - for i in range(len(metric_names)) - } - else: - eval_results[res_type] = cocoEval.stats - - return eval_results - - -def voc_eval_with_return(result_file, - dataset, - iou_thr=0.5, - logger='print', - only_ap=True): - det_results = mmcv.load(result_file) - annotations = [dataset.get_ann_info(i) for i in range(len(dataset))] - if hasattr(dataset, 'year') and dataset.year == 2007: - dataset_name = 'voc07' - else: - dataset_name = dataset.CLASSES - mean_ap, eval_results = eval_map( - det_results, - annotations, - scale_ranges=None, - iou_thr=iou_thr, - dataset=dataset_name, - logger=logger) - - if only_ap: - eval_results = [{ - 'ap': eval_results[i]['ap'] - } for i in range(len(eval_results))] - - return mean_ap, eval_results - - -def parse_args(): - parser = argparse.ArgumentParser(description='MMDet test detector') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument('--out', help='output result file') - parser.add_argument( - '--corruptions', - type=str, - nargs='+', - default='benchmark', - choices=[ - 'all', 'benchmark', 'noise', 'blur', 'weather', 'digital', - 'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise', - 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', - 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', - 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', - 'spatter', 'saturate' - ], - help='corruptions') - parser.add_argument( - '--severities', - type=int, - nargs='+', - default=[0, 1, 2, 3, 4, 5], - help='corruption severity levels') - parser.add_argument( - '--eval', - type=str, - nargs='+', - choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], - help='eval types') - parser.add_argument( - '--iou-thr', - type=float, - default=0.5, - help='IoU threshold for pascal voc evaluation') - parser.add_argument( - '--summaries', - type=bool, - default=False, - help='Print summaries for every corruption and severity') - parser.add_argument( - '--workers', type=int, default=32, help='workers per gpu') - parser.add_argument('--show', action='store_true', help='show results') - parser.add_argument( - '--show-dir', help='directory where painted images will be saved') - parser.add_argument( - '--show-score-thr', - type=float, - default=0.3, - help='score threshold (default: 0.3)') - parser.add_argument('--tmpdir', help='tmp dir for writing some results') - parser.add_argument('--seed', type=int, default=None, help='random seed') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - parser.add_argument( - '--final-prints', - type=str, - nargs='+', - choices=['P', 'mPC', 'rPC'], - default='mPC', - help='corruption benchmark metric to print at the end') - parser.add_argument( - '--final-prints-aggregate', - type=str, - choices=['all', 'benchmark'], - default='benchmark', - help='aggregate all results or only those for benchmark corruptions') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - return args - - -def main(): - args = parse_args() - - assert args.out or args.show or args.show_dir, \ - ('Please specify at least one operation (save or show the results) ' - 'with the argument "--out", "--show" or "show-dir"') - - if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): - raise ValueError('The output file must be a pkl file.') - - cfg = mmcv.Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - cfg.model.pretrained = None - cfg.data.test.test_mode = True - if args.workers == 0: - args.workers = cfg.data.workers_per_gpu - - # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': - distributed = False - else: - distributed = True - init_dist(args.launcher, **cfg.dist_params) - - # set random seeds - if args.seed is not None: - set_random_seed(args.seed) - - if 'all' in args.corruptions: - corruptions = [ - 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', - 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', - 'brightness', 'contrast', 'elastic_transform', 'pixelate', - 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', - 'saturate' - ] - elif 'benchmark' in args.corruptions: - corruptions = [ - 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', - 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', - 'brightness', 'contrast', 'elastic_transform', 'pixelate', - 'jpeg_compression' - ] - elif 'noise' in args.corruptions: - corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise'] - elif 'blur' in args.corruptions: - corruptions = [ - 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur' - ] - elif 'weather' in args.corruptions: - corruptions = ['snow', 'frost', 'fog', 'brightness'] - elif 'digital' in args.corruptions: - corruptions = [ - 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression' - ] - elif 'holdout' in args.corruptions: - corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate'] - elif 'None' in args.corruptions: - corruptions = ['None'] - args.severities = [0] - else: - corruptions = args.corruptions - - rank, _ = get_dist_info() - aggregated_results = {} - for corr_i, corruption in enumerate(corruptions): - aggregated_results[corruption] = {} - for sev_i, corruption_severity in enumerate(args.severities): - # evaluate severity 0 (= no corruption) only once - if corr_i > 0 and corruption_severity == 0: - aggregated_results[corruption][0] = \ - aggregated_results[corruptions[0]][0] - continue - - test_data_cfg = copy.deepcopy(cfg.data.test) - # assign corruption and severity - if corruption_severity > 0: - corruption_trans = dict( - type='Corrupt', - corruption=corruption, - severity=corruption_severity) - # TODO: hard coded "1", we assume that the first step is - # loading images, which needs to be fixed in the future - test_data_cfg['pipeline'].insert(1, corruption_trans) - - # print info - print(f'\nTesting {corruption} at severity {corruption_severity}') - - # build the dataloader - # TODO: support multiple images per gpu - # (only minor changes are needed) - dataset = build_dataset(test_data_cfg) - data_loader = build_dataloader( - dataset, - samples_per_gpu=1, - workers_per_gpu=args.workers, - dist=distributed, - shuffle=False) - - # build the model and load checkpoint - cfg.model.train_cfg = None - model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) - fp16_cfg = cfg.get('fp16', None) - if fp16_cfg is not None: - wrap_fp16_model(model) - checkpoint = load_checkpoint( - model, args.checkpoint, map_location='cpu') - # old versions did not save class info in checkpoints, - # this walkaround is for backward compatibility - if 'CLASSES' in checkpoint.get('meta', {}): - model.CLASSES = checkpoint['meta']['CLASSES'] - else: - model.CLASSES = dataset.CLASSES - - if not distributed: - model = MMDataParallel(model, device_ids=[0]) - show_dir = args.show_dir - if show_dir is not None: - show_dir = osp.join(show_dir, corruption) - show_dir = osp.join(show_dir, str(corruption_severity)) - if not osp.exists(show_dir): - osp.makedirs(show_dir) - outputs = single_gpu_test(model, data_loader, args.show, - show_dir, args.show_score_thr) - else: - model = MMDistributedDataParallel( - model.cuda(), - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False) - outputs = multi_gpu_test(model, data_loader, args.tmpdir) - - if args.out and rank == 0: - eval_results_filename = ( - osp.splitext(args.out)[0] + '_results' + - osp.splitext(args.out)[1]) - mmcv.dump(outputs, args.out) - eval_types = args.eval - if cfg.dataset_type == 'VOCDataset': - if eval_types: - for eval_type in eval_types: - if eval_type == 'bbox': - test_dataset = mmcv.runner.obj_from_dict( - cfg.data.test, datasets) - logger = 'print' if args.summaries else None - mean_ap, eval_results = \ - voc_eval_with_return( - args.out, test_dataset, - args.iou_thr, logger) - aggregated_results[corruption][ - corruption_severity] = eval_results - else: - print('\nOnly "bbox" evaluation \ - is supported for pascal voc') - else: - if eval_types: - print(f'Starting evaluate {" and ".join(eval_types)}') - if eval_types == ['proposal_fast']: - result_file = args.out - else: - if not isinstance(outputs[0], dict): - result_files = dataset.results2json( - outputs, args.out) - else: - for name in outputs[0]: - print(f'\nEvaluating {name}') - outputs_ = [out[name] for out in outputs] - result_file = args.out - + f'.{name}' - result_files = dataset.results2json( - outputs_, result_file) - eval_results = coco_eval_with_return( - result_files, eval_types, dataset.coco) - aggregated_results[corruption][ - corruption_severity] = eval_results - else: - print('\nNo task was selected for evaluation;' - '\nUse --eval to select a task') - - # save results after each evaluation - mmcv.dump(aggregated_results, eval_results_filename) - - if rank == 0: - # print final results - print('\nAggregated results:') - prints = args.final_prints - aggregate = args.final_prints_aggregate - - if cfg.dataset_type == 'VOCDataset': - get_results( - eval_results_filename, - dataset='voc', - prints=prints, - aggregate=aggregate) - else: - get_results( - eval_results_filename, - dataset='coco', - prints=prints, - aggregate=aggregate) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/dataset_converters/cityscapes.py b/cv/detection/co-detr/pytorch/tools/dataset_converters/cityscapes.py deleted file mode 100644 index c8e44b96c4ee89be68b60b57b823b6cb5e0de534..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/dataset_converters/cityscapes.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import glob -import os.path as osp - -import cityscapesscripts.helpers.labels as CSLabels -import mmcv -import numpy as np -import pycocotools.mask as maskUtils - - -def collect_files(img_dir, gt_dir): - suffix = 'leftImg8bit.png' - files = [] - for img_file in glob.glob(osp.join(img_dir, '**/*.png')): - assert img_file.endswith(suffix), img_file - inst_file = gt_dir + img_file[ - len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png' - # Note that labelIds are not converted to trainId for seg map - segm_file = gt_dir + img_file[ - len(img_dir):-len(suffix)] + 'gtFine_labelIds.png' - files.append((img_file, inst_file, segm_file)) - assert len(files), f'No images found in {img_dir}' - print(f'Loaded {len(files)} images from {img_dir}') - - return files - - -def collect_annotations(files, nproc=1): - print('Loading annotation images') - if nproc > 1: - images = mmcv.track_parallel_progress( - load_img_info, files, nproc=nproc) - else: - images = mmcv.track_progress(load_img_info, files) - - return images - - -def load_img_info(files): - img_file, inst_file, segm_file = files - inst_img = mmcv.imread(inst_file, 'unchanged') - # ids < 24 are stuff labels (filtering them first is about 5% faster) - unique_inst_ids = np.unique(inst_img[inst_img >= 24]) - anno_info = [] - for inst_id in unique_inst_ids: - # For non-crowd annotations, inst_id // 1000 is the label_id - # Crowd annotations have <1000 instance ids - label_id = inst_id // 1000 if inst_id >= 1000 else inst_id - label = CSLabels.id2label[label_id] - if not label.hasInstances or label.ignoreInEval: - continue - - category_id = label.id - iscrowd = int(inst_id < 1000) - mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F') - mask_rle = maskUtils.encode(mask[:, :, None])[0] - - area = maskUtils.area(mask_rle) - # convert to COCO style XYWH format - bbox = maskUtils.toBbox(mask_rle) - - # for json encoding - mask_rle['counts'] = mask_rle['counts'].decode() - - anno = dict( - iscrowd=iscrowd, - category_id=category_id, - bbox=bbox.tolist(), - area=area.tolist(), - segmentation=mask_rle) - anno_info.append(anno) - video_name = osp.basename(osp.dirname(img_file)) - img_info = dict( - # remove img_prefix for filename - file_name=osp.join(video_name, osp.basename(img_file)), - height=inst_img.shape[0], - width=inst_img.shape[1], - anno_info=anno_info, - segm_file=osp.join(video_name, osp.basename(segm_file))) - - return img_info - - -def cvt_annotations(image_infos, out_json_name): - out_json = dict() - img_id = 0 - ann_id = 0 - out_json['images'] = [] - out_json['categories'] = [] - out_json['annotations'] = [] - for image_info in image_infos: - image_info['id'] = img_id - anno_infos = image_info.pop('anno_info') - out_json['images'].append(image_info) - for anno_info in anno_infos: - anno_info['image_id'] = img_id - anno_info['id'] = ann_id - out_json['annotations'].append(anno_info) - ann_id += 1 - img_id += 1 - for label in CSLabels.labels: - if label.hasInstances and not label.ignoreInEval: - cat = dict(id=label.id, name=label.name) - out_json['categories'].append(cat) - - if len(out_json['annotations']) == 0: - out_json.pop('annotations') - - mmcv.dump(out_json, out_json_name) - return out_json - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert Cityscapes annotations to COCO format') - parser.add_argument('cityscapes_path', help='cityscapes data path') - parser.add_argument('--img-dir', default='leftImg8bit', type=str) - parser.add_argument('--gt-dir', default='gtFine', type=str) - parser.add_argument('-o', '--out-dir', help='output path') - parser.add_argument( - '--nproc', default=1, type=int, help='number of process') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - cityscapes_path = args.cityscapes_path - out_dir = args.out_dir if args.out_dir else cityscapes_path - mmcv.mkdir_or_exist(out_dir) - - img_dir = osp.join(cityscapes_path, args.img_dir) - gt_dir = osp.join(cityscapes_path, args.gt_dir) - - set_name = dict( - train='instancesonly_filtered_gtFine_train.json', - val='instancesonly_filtered_gtFine_val.json', - test='instancesonly_filtered_gtFine_test.json') - - for split, json_name in set_name.items(): - print(f'Converting {split} into {json_name}') - with mmcv.Timer( - print_tmpl='It took {}s to convert Cityscapes annotation'): - files = collect_files( - osp.join(img_dir, split), osp.join(gt_dir, split)) - image_infos = collect_annotations(files, nproc=args.nproc) - cvt_annotations(image_infos, osp.join(out_dir, json_name)) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/dataset_converters/images2coco.py b/cv/detection/co-detr/pytorch/tools/dataset_converters/images2coco.py deleted file mode 100644 index 1c4e2f14a23adc9c76faa87ed7da154917e188a7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/dataset_converters/images2coco.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os - -import mmcv -from PIL import Image - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert images to coco format without annotations') - parser.add_argument('img_path', help='The root path of images') - parser.add_argument( - 'classes', type=str, help='The text file name of storage class list') - parser.add_argument( - 'out', - type=str, - help='The output annotation json file name, The save dir is in the ' - 'same directory as img_path') - parser.add_argument( - '-e', - '--exclude-extensions', - type=str, - nargs='+', - help='The suffix of images to be excluded, such as "png" and "bmp"') - args = parser.parse_args() - return args - - -def collect_image_infos(path, exclude_extensions=None): - img_infos = [] - - images_generator = mmcv.scandir(path, recursive=True) - for image_path in mmcv.track_iter_progress(list(images_generator)): - if exclude_extensions is None or ( - exclude_extensions is not None - and not image_path.lower().endswith(exclude_extensions)): - image_path = os.path.join(path, image_path) - img_pillow = Image.open(image_path) - img_info = { - 'filename': image_path, - 'width': img_pillow.width, - 'height': img_pillow.height, - } - img_infos.append(img_info) - return img_infos - - -def cvt_to_coco_json(img_infos, classes): - image_id = 0 - coco = dict() - coco['images'] = [] - coco['type'] = 'instance' - coco['categories'] = [] - coco['annotations'] = [] - image_set = set() - - for category_id, name in enumerate(classes): - category_item = dict() - category_item['supercategory'] = str('none') - category_item['id'] = int(category_id) - category_item['name'] = str(name) - coco['categories'].append(category_item) - - for img_dict in img_infos: - file_name = img_dict['filename'] - assert file_name not in image_set - image_item = dict() - image_item['id'] = int(image_id) - image_item['file_name'] = str(file_name) - image_item['height'] = int(img_dict['height']) - image_item['width'] = int(img_dict['width']) - coco['images'].append(image_item) - image_set.add(file_name) - - image_id += 1 - return coco - - -def main(): - args = parse_args() - assert args.out.endswith( - 'json'), 'The output file name must be json suffix' - - # 1 load image list info - img_infos = collect_image_infos(args.img_path, args.exclude_extensions) - - # 2 convert to coco format data - classes = mmcv.list_from_file(args.classes) - coco_info = cvt_to_coco_json(img_infos, classes) - - # 3 dump - save_dir = os.path.join(args.img_path, '..', 'annotations') - mmcv.mkdir_or_exist(save_dir) - save_path = os.path.join(save_dir, args.out) - mmcv.dump(coco_info, save_path) - print(f'save json file: {save_path}') - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/dataset_converters/pascal_voc.py b/cv/detection/co-detr/pytorch/tools/dataset_converters/pascal_voc.py deleted file mode 100644 index 20f88019f7dffd9fb4a91c002a44825299680f7e..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/dataset_converters/pascal_voc.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os.path as osp -import xml.etree.ElementTree as ET - -import mmcv -import numpy as np - -from mmdet.core import voc_classes - -label_ids = {name: i for i, name in enumerate(voc_classes())} - - -def parse_xml(args): - xml_path, img_path = args - tree = ET.parse(xml_path) - root = tree.getroot() - size = root.find('size') - w = int(size.find('width').text) - h = int(size.find('height').text) - bboxes = [] - labels = [] - bboxes_ignore = [] - labels_ignore = [] - for obj in root.findall('object'): - name = obj.find('name').text - label = label_ids[name] - difficult = int(obj.find('difficult').text) - bnd_box = obj.find('bndbox') - bbox = [ - int(bnd_box.find('xmin').text), - int(bnd_box.find('ymin').text), - int(bnd_box.find('xmax').text), - int(bnd_box.find('ymax').text) - ] - if difficult: - bboxes_ignore.append(bbox) - labels_ignore.append(label) - else: - bboxes.append(bbox) - labels.append(label) - if not bboxes: - bboxes = np.zeros((0, 4)) - labels = np.zeros((0, )) - else: - bboxes = np.array(bboxes, ndmin=2) - 1 - labels = np.array(labels) - if not bboxes_ignore: - bboxes_ignore = np.zeros((0, 4)) - labels_ignore = np.zeros((0, )) - else: - bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 - labels_ignore = np.array(labels_ignore) - annotation = { - 'filename': img_path, - 'width': w, - 'height': h, - 'ann': { - 'bboxes': bboxes.astype(np.float32), - 'labels': labels.astype(np.int64), - 'bboxes_ignore': bboxes_ignore.astype(np.float32), - 'labels_ignore': labels_ignore.astype(np.int64) - } - } - return annotation - - -def cvt_annotations(devkit_path, years, split, out_file): - if not isinstance(years, list): - years = [years] - annotations = [] - for year in years: - filelist = osp.join(devkit_path, - f'VOC{year}/ImageSets/Main/{split}.txt') - if not osp.isfile(filelist): - print(f'filelist does not exist: {filelist}, ' - f'skip voc{year} {split}') - return - img_names = mmcv.list_from_file(filelist) - xml_paths = [ - osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml') - for img_name in img_names - ] - img_paths = [ - f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names - ] - part_annotations = mmcv.track_progress(parse_xml, - list(zip(xml_paths, img_paths))) - annotations.extend(part_annotations) - if out_file.endswith('json'): - annotations = cvt_to_coco_json(annotations) - mmcv.dump(annotations, out_file) - return annotations - - -def cvt_to_coco_json(annotations): - image_id = 0 - annotation_id = 0 - coco = dict() - coco['images'] = [] - coco['type'] = 'instance' - coco['categories'] = [] - coco['annotations'] = [] - image_set = set() - - def addAnnItem(annotation_id, image_id, category_id, bbox, difficult_flag): - annotation_item = dict() - annotation_item['segmentation'] = [] - - seg = [] - # bbox[] is x1,y1,x2,y2 - # left_top - seg.append(int(bbox[0])) - seg.append(int(bbox[1])) - # left_bottom - seg.append(int(bbox[0])) - seg.append(int(bbox[3])) - # right_bottom - seg.append(int(bbox[2])) - seg.append(int(bbox[3])) - # right_top - seg.append(int(bbox[2])) - seg.append(int(bbox[1])) - - annotation_item['segmentation'].append(seg) - - xywh = np.array( - [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]) - annotation_item['area'] = int(xywh[2] * xywh[3]) - if difficult_flag == 1: - annotation_item['ignore'] = 0 - annotation_item['iscrowd'] = 1 - else: - annotation_item['ignore'] = 0 - annotation_item['iscrowd'] = 0 - annotation_item['image_id'] = int(image_id) - annotation_item['bbox'] = xywh.astype(int).tolist() - annotation_item['category_id'] = int(category_id) - annotation_item['id'] = int(annotation_id) - coco['annotations'].append(annotation_item) - return annotation_id + 1 - - for category_id, name in enumerate(voc_classes()): - category_item = dict() - category_item['supercategory'] = str('none') - category_item['id'] = int(category_id) - category_item['name'] = str(name) - coco['categories'].append(category_item) - - for ann_dict in annotations: - file_name = ann_dict['filename'] - ann = ann_dict['ann'] - assert file_name not in image_set - image_item = dict() - image_item['id'] = int(image_id) - image_item['file_name'] = str(file_name) - image_item['height'] = int(ann_dict['height']) - image_item['width'] = int(ann_dict['width']) - coco['images'].append(image_item) - image_set.add(file_name) - - bboxes = ann['bboxes'][:, :4] - labels = ann['labels'] - for bbox_id in range(len(bboxes)): - bbox = bboxes[bbox_id] - label = labels[bbox_id] - annotation_id = addAnnItem( - annotation_id, image_id, label, bbox, difficult_flag=0) - - bboxes_ignore = ann['bboxes_ignore'][:, :4] - labels_ignore = ann['labels_ignore'] - for bbox_id in range(len(bboxes_ignore)): - bbox = bboxes_ignore[bbox_id] - label = labels_ignore[bbox_id] - annotation_id = addAnnItem( - annotation_id, image_id, label, bbox, difficult_flag=1) - - image_id += 1 - - return coco - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert PASCAL VOC annotations to mmdetection format') - parser.add_argument('devkit_path', help='pascal voc devkit path') - parser.add_argument('-o', '--out-dir', help='output path') - parser.add_argument( - '--out-format', - default='pkl', - choices=('pkl', 'coco'), - help='output format, "coco" indicates coco annotation format') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - devkit_path = args.devkit_path - out_dir = args.out_dir if args.out_dir else devkit_path - mmcv.mkdir_or_exist(out_dir) - - years = [] - if osp.isdir(osp.join(devkit_path, 'VOC2007')): - years.append('2007') - if osp.isdir(osp.join(devkit_path, 'VOC2012')): - years.append('2012') - if '2007' in years and '2012' in years: - years.append(['2007', '2012']) - if not years: - raise IOError(f'The devkit path {devkit_path} contains neither ' - '"VOC2007" nor "VOC2012" subfolder') - out_fmt = f'.{args.out_format}' - if args.out_format == 'coco': - out_fmt = '.json' - for year in years: - if year == '2007': - prefix = 'voc07' - elif year == '2012': - prefix = 'voc12' - elif year == ['2007', '2012']: - prefix = 'voc0712' - for split in ['train', 'val', 'trainval']: - dataset_name = prefix + '_' + split - print(f'processing {dataset_name} ...') - cvt_annotations(devkit_path, year, split, - osp.join(out_dir, dataset_name + out_fmt)) - if not isinstance(year, list): - dataset_name = prefix + '_test' - print(f'processing {dataset_name} ...') - cvt_annotations(devkit_path, year, 'test', - osp.join(out_dir, dataset_name + out_fmt)) - print('Done!') - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/deployment/mmdet2torchserve.py b/cv/detection/co-detr/pytorch/tools/deployment/mmdet2torchserve.py deleted file mode 100644 index 70a081a24644c6cfacf384c6780aa4b537fb96e1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/deployment/mmdet2torchserve.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from argparse import ArgumentParser, Namespace -from pathlib import Path -from tempfile import TemporaryDirectory - -import mmcv - -try: - from model_archiver.model_packaging import package_model - from model_archiver.model_packaging_utils import ModelExportUtils -except ImportError: - package_model = None - - -def mmdet2torchserve( - config_file: str, - checkpoint_file: str, - output_folder: str, - model_name: str, - model_version: str = '1.0', - force: bool = False, -): - """Converts MMDetection model (config + checkpoint) to TorchServe `.mar`. - - Args: - config_file: - In MMDetection config format. - The contents vary for each task repository. - checkpoint_file: - In MMDetection checkpoint format. - The contents vary for each task repository. - output_folder: - Folder where `{model_name}.mar` will be created. - The file created will be in TorchServe archive format. - model_name: - If not None, used for naming the `{model_name}.mar` file - that will be created under `output_folder`. - If None, `{Path(checkpoint_file).stem}` will be used. - model_version: - Model's version. - force: - If True, if there is an existing `{model_name}.mar` - file under `output_folder` it will be overwritten. - """ - mmcv.mkdir_or_exist(output_folder) - - config = mmcv.Config.fromfile(config_file) - - with TemporaryDirectory() as tmpdir: - config.dump(f'{tmpdir}/config.py') - - args = Namespace( - **{ - 'model_file': f'{tmpdir}/config.py', - 'serialized_file': checkpoint_file, - 'handler': f'{Path(__file__).parent}/mmdet_handler.py', - 'model_name': model_name or Path(checkpoint_file).stem, - 'version': model_version, - 'export_path': output_folder, - 'force': force, - 'requirements_file': None, - 'extra_files': None, - 'runtime': 'python', - 'archive_format': 'default' - }) - manifest = ModelExportUtils.generate_manifest_json(args) - package_model(args, manifest) - - -def parse_args(): - parser = ArgumentParser( - description='Convert MMDetection models to TorchServe `.mar` format.') - parser.add_argument('config', type=str, help='config file path') - parser.add_argument('checkpoint', type=str, help='checkpoint file path') - parser.add_argument( - '--output-folder', - type=str, - required=True, - help='Folder where `{model_name}.mar` will be created.') - parser.add_argument( - '--model-name', - type=str, - default=None, - help='If not None, used for naming the `{model_name}.mar`' - 'file that will be created under `output_folder`.' - 'If None, `{Path(checkpoint_file).stem}` will be used.') - parser.add_argument( - '--model-version', - type=str, - default='1.0', - help='Number used for versioning.') - parser.add_argument( - '-f', - '--force', - action='store_true', - help='overwrite the existing `{model_name}.mar`') - args = parser.parse_args() - - return args - - -if __name__ == '__main__': - args = parse_args() - - if package_model is None: - raise ImportError('`torch-model-archiver` is required.' - 'Try: pip install torch-model-archiver') - - mmdet2torchserve(args.config, args.checkpoint, args.output_folder, - args.model_name, args.model_version, args.force) diff --git a/cv/detection/co-detr/pytorch/tools/deployment/mmdet_handler.py b/cv/detection/co-detr/pytorch/tools/deployment/mmdet_handler.py deleted file mode 100644 index 18fc23017d1eac9ad75e5ba96a1876d278d93982..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/deployment/mmdet_handler.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import base64 -import os - -import mmcv -import torch -from ts.torch_handler.base_handler import BaseHandler - -from mmdet.apis import inference_detector, init_detector - - -class MMdetHandler(BaseHandler): - threshold = 0.5 - - def initialize(self, context): - properties = context.system_properties - self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' - self.device = torch.device(self.map_location + ':' + - str(properties.get('gpu_id')) if torch.cuda. - is_available() else self.map_location) - self.manifest = context.manifest - - model_dir = properties.get('model_dir') - serialized_file = self.manifest['model']['serializedFile'] - checkpoint = os.path.join(model_dir, serialized_file) - self.config_file = os.path.join(model_dir, 'config.py') - - self.model = init_detector(self.config_file, checkpoint, self.device) - self.initialized = True - - def preprocess(self, data): - images = [] - - for row in data: - image = row.get('data') or row.get('body') - if isinstance(image, str): - image = base64.b64decode(image) - image = mmcv.imfrombytes(image) - images.append(image) - - return images - - def inference(self, data, *args, **kwargs): - results = inference_detector(self.model, data) - return results - - def postprocess(self, data): - # Format output following the example ObjectDetectionHandler format - output = [] - for image_index, image_result in enumerate(data): - output.append([]) - if isinstance(image_result, tuple): - bbox_result, segm_result = image_result - if isinstance(segm_result, tuple): - segm_result = segm_result[0] # ms rcnn - else: - bbox_result, segm_result = image_result, None - - for class_index, class_result in enumerate(bbox_result): - class_name = self.model.CLASSES[class_index] - for bbox in class_result: - bbox_coords = bbox[:-1].tolist() - score = float(bbox[-1]) - if score >= self.threshold: - output[image_index].append({ - 'class_name': class_name, - 'bbox': bbox_coords, - 'score': score - }) - - return output diff --git a/cv/detection/co-detr/pytorch/tools/deployment/onnx2tensorrt.py b/cv/detection/co-detr/pytorch/tools/deployment/onnx2tensorrt.py deleted file mode 100644 index b59e52ae1992ff4fca2bdaeff63d6c35ee3646df..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/deployment/onnx2tensorrt.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -import os.path as osp -import warnings - -import numpy as np -import onnx -import torch -from mmcv import Config -from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine - -from mmdet.core.export import preprocess_example_input -from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector, - TensorRTDetector) -from mmdet.datasets import DATASETS - - -def get_GiB(x: int): - """return x GiB.""" - return x * (1 << 30) - - -def onnx2tensorrt(onnx_file, - trt_file, - input_config, - verify=False, - show=False, - workspace_size=1, - verbose=False): - import tensorrt as trt - onnx_model = onnx.load(onnx_file) - max_shape = input_config['max_shape'] - min_shape = input_config['min_shape'] - opt_shape = input_config['opt_shape'] - fp16_mode = False - # create trt engine and wrapper - opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]} - max_workspace_size = get_GiB(workspace_size) - trt_engine = onnx2trt( - onnx_model, - opt_shape_dict, - log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR, - fp16_mode=fp16_mode, - max_workspace_size=max_workspace_size) - save_dir, _ = osp.split(trt_file) - if save_dir: - os.makedirs(save_dir, exist_ok=True) - save_trt_engine(trt_engine, trt_file) - print(f'Successfully created TensorRT engine: {trt_file}') - - if verify: - # prepare input - one_img, one_meta = preprocess_example_input(input_config) - img_list, img_meta_list = [one_img], [[one_meta]] - img_list = [_.cuda().contiguous() for _ in img_list] - - # wrap ONNX and TensorRT model - onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0) - trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0) - - # inference with wrapped model - with torch.no_grad(): - onnx_results = onnx_model( - img_list, img_metas=img_meta_list, return_loss=False)[0] - trt_results = trt_model( - img_list, img_metas=img_meta_list, return_loss=False)[0] - - if show: - out_file_ort, out_file_trt = None, None - else: - out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png' - show_img = one_meta['show_img'] - score_thr = 0.3 - onnx_model.show_result( - show_img, - onnx_results, - score_thr=score_thr, - show=True, - win_name='ONNXRuntime', - out_file=out_file_ort) - trt_model.show_result( - show_img, - trt_results, - score_thr=score_thr, - show=True, - win_name='TensorRT', - out_file=out_file_trt) - with_mask = trt_model.with_masks - # compare a part of result - if with_mask: - compare_pairs = list(zip(onnx_results, trt_results)) - else: - compare_pairs = [(onnx_results, trt_results)] - err_msg = 'The numerical values are different between Pytorch' + \ - ' and ONNX, but it does not necessarily mean the' + \ - ' exported ONNX model is problematic.' - # check the numerical value - for onnx_res, pytorch_res in compare_pairs: - for o_res, p_res in zip(onnx_res, pytorch_res): - np.testing.assert_allclose( - o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg) - print('The numerical values are the same between Pytorch and ONNX') - - -def parse_normalize_cfg(test_pipeline): - transforms = None - for pipeline in test_pipeline: - if 'transforms' in pipeline: - transforms = pipeline['transforms'] - break - assert transforms is not None, 'Failed to find `transforms`' - norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize'] - assert len(norm_config_li) == 1, '`norm_config` should only have one' - norm_config = norm_config_li[0] - return norm_config - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert MMDetection models from ONNX to TensorRT') - parser.add_argument('config', help='test config file path') - parser.add_argument('model', help='Filename of input ONNX model') - parser.add_argument( - '--trt-file', - type=str, - default='tmp.trt', - help='Filename of output TensorRT engine') - parser.add_argument( - '--input-img', type=str, default='', help='Image for test') - parser.add_argument( - '--show', action='store_true', help='Whether to show output results') - parser.add_argument( - '--dataset', - type=str, - default='coco', - help='Dataset name. This argument is deprecated and will be \ - removed in future releases.') - parser.add_argument( - '--verify', - action='store_true', - help='Verify the outputs of ONNXRuntime and TensorRT') - parser.add_argument( - '--verbose', - action='store_true', - help='Whether to verbose logging messages while creating \ - TensorRT engine. Defaults to False.') - parser.add_argument( - '--to-rgb', - action='store_false', - help='Feed model with RGB or BGR image. Default is RGB. This \ - argument is deprecated and will be removed in future releases.') - parser.add_argument( - '--shape', - type=int, - nargs='+', - default=[400, 600], - help='Input size of the model') - parser.add_argument( - '--mean', - type=float, - nargs='+', - default=[123.675, 116.28, 103.53], - help='Mean value used for preprocess input data. This argument \ - is deprecated and will be removed in future releases.') - parser.add_argument( - '--std', - type=float, - nargs='+', - default=[58.395, 57.12, 57.375], - help='Variance value used for preprocess input data. \ - This argument is deprecated and will be removed in future releases.') - parser.add_argument( - '--min-shape', - type=int, - nargs='+', - default=None, - help='Minimum input size of the model in TensorRT') - parser.add_argument( - '--max-shape', - type=int, - nargs='+', - default=None, - help='Maximum input size of the model in TensorRT') - parser.add_argument( - '--workspace-size', - type=int, - default=1, - help='Max workspace size in GiB') - - args = parser.parse_args() - return args - - -if __name__ == '__main__': - - assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.' - args = parse_args() - warnings.warn( - 'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \ - parsed directly from config file and are deprecated and will be \ - removed in future releases.') - if not args.input_img: - args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg') - - cfg = Config.fromfile(args.config) - - def parse_shape(shape): - if len(shape) == 1: - shape = (1, 3, shape[0], shape[0]) - elif len(args.shape) == 2: - shape = (1, 3) + tuple(shape) - else: - raise ValueError('invalid input shape') - return shape - - if args.shape: - input_shape = parse_shape(args.shape) - else: - img_scale = cfg.test_pipeline[1]['img_scale'] - input_shape = (1, 3, img_scale[1], img_scale[0]) - - if not args.max_shape: - max_shape = input_shape - else: - max_shape = parse_shape(args.max_shape) - - if not args.min_shape: - min_shape = input_shape - else: - min_shape = parse_shape(args.min_shape) - - dataset = DATASETS.get(cfg.data.test['type']) - assert (dataset is not None) - CLASSES = dataset.CLASSES - normalize_cfg = parse_normalize_cfg(cfg.test_pipeline) - - input_config = { - 'min_shape': min_shape, - 'opt_shape': input_shape, - 'max_shape': max_shape, - 'input_shape': input_shape, - 'input_path': args.input_img, - 'normalize_cfg': normalize_cfg - } - # Create TensorRT engine - onnx2tensorrt( - args.model, - args.trt_file, - input_config, - verify=args.verify, - show=args.show, - workspace_size=args.workspace_size, - verbose=args.verbose) - - # Following strings of text style are from colorama package - bright_style, reset_style = '\x1b[1m', '\x1b[0m' - red_text, blue_text = '\x1b[31m', '\x1b[34m' - white_background = '\x1b[107m' - - msg = white_background + bright_style + red_text - msg += 'DeprecationWarning: This tool will be deprecated in future. ' - msg += blue_text + 'Welcome to use the unified model deployment toolbox ' - msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' - msg += reset_style - warnings.warn(msg) diff --git a/cv/detection/co-detr/pytorch/tools/deployment/pytorch2onnx.py b/cv/detection/co-detr/pytorch/tools/deployment/pytorch2onnx.py deleted file mode 100644 index ee856ccb646872cf2ffa657b6faf61c3ba37054d..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/deployment/pytorch2onnx.py +++ /dev/null @@ -1,343 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os.path as osp -import warnings -from functools import partial - -import numpy as np -import onnx -import torch -from mmcv import Config, DictAction - -from mmdet.core.export import build_model_from_cfg, preprocess_example_input -from mmdet.core.export.model_wrappers import ONNXRuntimeDetector - - -def pytorch2onnx(model, - input_img, - input_shape, - normalize_cfg, - opset_version=11, - show=False, - output_file='tmp.onnx', - verify=False, - test_img=None, - do_simplify=False, - dynamic_export=None, - skip_postprocess=False): - - input_config = { - 'input_shape': input_shape, - 'input_path': input_img, - 'normalize_cfg': normalize_cfg - } - # prepare input - one_img, one_meta = preprocess_example_input(input_config) - img_list, img_meta_list = [one_img], [[one_meta]] - - if skip_postprocess: - warnings.warn('Not all models support export onnx without post ' - 'process, especially two stage detectors!') - model.forward = model.forward_dummy - torch.onnx.export( - model, - one_img, - output_file, - input_names=['input'], - export_params=True, - keep_initializers_as_inputs=True, - do_constant_folding=True, - verbose=show, - opset_version=opset_version) - - print(f'Successfully exported ONNX model without ' - f'post process: {output_file}') - return - - # replace original forward function - origin_forward = model.forward - model.forward = partial( - model.forward, - img_metas=img_meta_list, - return_loss=False, - rescale=False) - - output_names = ['dets', 'labels'] - if model.with_mask: - output_names.append('masks') - input_name = 'input' - dynamic_axes = None - if dynamic_export: - dynamic_axes = { - input_name: { - 0: 'batch', - 2: 'height', - 3: 'width' - }, - 'dets': { - 0: 'batch', - 1: 'num_dets', - }, - 'labels': { - 0: 'batch', - 1: 'num_dets', - }, - } - if model.with_mask: - dynamic_axes['masks'] = {0: 'batch', 1: 'num_dets'} - - torch.onnx.export( - model, - img_list, - output_file, - input_names=[input_name], - output_names=output_names, - export_params=True, - keep_initializers_as_inputs=True, - do_constant_folding=True, - verbose=show, - opset_version=opset_version, - dynamic_axes=dynamic_axes) - - model.forward = origin_forward - - if do_simplify: - import onnxsim - - from mmdet import digit_version - - min_required_version = '0.4.0' - assert digit_version(onnxsim.__version__) >= digit_version( - min_required_version - ), f'Requires to install onnxsim>={min_required_version}' - - model_opt, check_ok = onnxsim.simplify(output_file) - if check_ok: - onnx.save(model_opt, output_file) - print(f'Successfully simplified ONNX model: {output_file}') - else: - warnings.warn('Failed to simplify ONNX model.') - print(f'Successfully exported ONNX model: {output_file}') - - if verify: - # check by onnx - onnx_model = onnx.load(output_file) - onnx.checker.check_model(onnx_model) - - # wrap onnx model - onnx_model = ONNXRuntimeDetector(output_file, model.CLASSES, 0) - if dynamic_export: - # scale up to test dynamic shape - h, w = [int((_ * 1.5) // 32 * 32) for _ in input_shape[2:]] - h, w = min(1344, h), min(1344, w) - input_config['input_shape'] = (1, 3, h, w) - - if test_img is None: - input_config['input_path'] = input_img - - # prepare input once again - one_img, one_meta = preprocess_example_input(input_config) - img_list, img_meta_list = [one_img], [[one_meta]] - - # get pytorch output - with torch.no_grad(): - pytorch_results = model( - img_list, - img_metas=img_meta_list, - return_loss=False, - rescale=True)[0] - - img_list = [_.cuda().contiguous() for _ in img_list] - if dynamic_export: - img_list = img_list + [_.flip(-1).contiguous() for _ in img_list] - img_meta_list = img_meta_list * 2 - # get onnx output - onnx_results = onnx_model( - img_list, img_metas=img_meta_list, return_loss=False)[0] - # visualize predictions - score_thr = 0.3 - if show: - out_file_ort, out_file_pt = None, None - else: - out_file_ort, out_file_pt = 'show-ort.png', 'show-pt.png' - - show_img = one_meta['show_img'] - model.show_result( - show_img, - pytorch_results, - score_thr=score_thr, - show=True, - win_name='PyTorch', - out_file=out_file_pt) - onnx_model.show_result( - show_img, - onnx_results, - score_thr=score_thr, - show=True, - win_name='ONNXRuntime', - out_file=out_file_ort) - - # compare a part of result - if model.with_mask: - compare_pairs = list(zip(onnx_results, pytorch_results)) - else: - compare_pairs = [(onnx_results, pytorch_results)] - err_msg = 'The numerical values are different between Pytorch' + \ - ' and ONNX, but it does not necessarily mean the' + \ - ' exported ONNX model is problematic.' - # check the numerical value - for onnx_res, pytorch_res in compare_pairs: - for o_res, p_res in zip(onnx_res, pytorch_res): - np.testing.assert_allclose( - o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg) - print('The numerical values are the same between Pytorch and ONNX') - - -def parse_normalize_cfg(test_pipeline): - transforms = None - for pipeline in test_pipeline: - if 'transforms' in pipeline: - transforms = pipeline['transforms'] - break - assert transforms is not None, 'Failed to find `transforms`' - norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize'] - assert len(norm_config_li) == 1, '`norm_config` should only have one' - norm_config = norm_config_li[0] - return norm_config - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Convert MMDetection models to ONNX') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument('--input-img', type=str, help='Images for input') - parser.add_argument( - '--show', - action='store_true', - help='Show onnx graph and detection outputs') - parser.add_argument('--output-file', type=str, default='tmp.onnx') - parser.add_argument('--opset-version', type=int, default=11) - parser.add_argument( - '--test-img', type=str, default=None, help='Images for test') - parser.add_argument( - '--dataset', - type=str, - default='coco', - help='Dataset name. This argument is deprecated and will be removed \ - in future releases.') - parser.add_argument( - '--verify', - action='store_true', - help='verify the onnx model output against pytorch output') - parser.add_argument( - '--simplify', - action='store_true', - help='Whether to simplify onnx model.') - parser.add_argument( - '--shape', - type=int, - nargs='+', - default=[800, 1216], - help='input image size') - parser.add_argument( - '--mean', - type=float, - nargs='+', - default=[123.675, 116.28, 103.53], - help='mean value used for preprocess input data.This argument \ - is deprecated and will be removed in future releases.') - parser.add_argument( - '--std', - type=float, - nargs='+', - default=[58.395, 57.12, 57.375], - help='variance value used for preprocess input data. ' - 'This argument is deprecated and will be removed in future releases.') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='Override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--dynamic-export', - action='store_true', - help='Whether to export onnx with dynamic axis.') - parser.add_argument( - '--skip-postprocess', - action='store_true', - help='Whether to export model without post process. Experimental ' - 'option. We do not guarantee the correctness of the exported ' - 'model.') - args = parser.parse_args() - return args - - -if __name__ == '__main__': - args = parse_args() - warnings.warn('Arguments like `--mean`, `--std`, `--dataset` would be \ - parsed directly from config file and are deprecated and \ - will be removed in future releases.') - - assert args.opset_version == 11, 'MMDet only support opset 11 now' - - try: - from mmcv.onnx.symbolic import register_extra_symbolics - except ModuleNotFoundError: - raise NotImplementedError('please update mmcv to version>=v1.0.4') - register_extra_symbolics(args.opset_version) - - cfg = Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - if args.shape is None: - img_scale = cfg.test_pipeline[1]['img_scale'] - input_shape = (1, 3, img_scale[1], img_scale[0]) - elif len(args.shape) == 1: - input_shape = (1, 3, args.shape[0], args.shape[0]) - elif len(args.shape) == 2: - input_shape = (1, 3) + tuple(args.shape) - else: - raise ValueError('invalid input shape') - - # build the model and load checkpoint - model = build_model_from_cfg(args.config, args.checkpoint, - args.cfg_options) - - if not args.input_img: - args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg') - - normalize_cfg = parse_normalize_cfg(cfg.test_pipeline) - - # convert model to onnx file - pytorch2onnx( - model, - args.input_img, - input_shape, - normalize_cfg, - opset_version=args.opset_version, - show=args.show, - output_file=args.output_file, - verify=args.verify, - test_img=args.test_img, - do_simplify=args.simplify, - dynamic_export=args.dynamic_export, - skip_postprocess=args.skip_postprocess) - - # Following strings of text style are from colorama package - bright_style, reset_style = '\x1b[1m', '\x1b[0m' - red_text, blue_text = '\x1b[31m', '\x1b[34m' - white_background = '\x1b[107m' - - msg = white_background + bright_style + red_text - msg += 'DeprecationWarning: This tool will be deprecated in future. ' - msg += blue_text + 'Welcome to use the unified model deployment toolbox ' - msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' - msg += reset_style - warnings.warn(msg) diff --git a/cv/detection/co-detr/pytorch/tools/deployment/test.py b/cv/detection/co-detr/pytorch/tools/deployment/test.py deleted file mode 100644 index db8d696a3e8bc2957fdf14933bbeac41c251fa56..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/deployment/test.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import warnings - -import mmcv -from mmcv import Config, DictAction -from mmcv.parallel import MMDataParallel - -from mmdet.apis import single_gpu_test -from mmdet.datasets import (build_dataloader, build_dataset, - replace_ImageToTensor) -from mmdet.utils import compat_cfg - - -def parse_args(): - parser = argparse.ArgumentParser( - description='MMDet test (and eval) an ONNX model using ONNXRuntime') - parser.add_argument('config', help='test config file path') - parser.add_argument('model', help='Input model file') - parser.add_argument('--out', help='output result file in pickle format') - parser.add_argument( - '--format-only', - action='store_true', - help='Format the output results without perform evaluation. It is' - 'useful when you want to format the result to a specific format and ' - 'submit it to the test server') - parser.add_argument( - '--backend', - required=True, - choices=['onnxruntime', 'tensorrt'], - help='Backend for input model to run. ') - parser.add_argument( - '--eval', - type=str, - nargs='+', - help='evaluation metrics, which depends on the dataset, e.g., "bbox",' - ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') - parser.add_argument('--show', action='store_true', help='show results') - parser.add_argument( - '--show-dir', help='directory where painted images will be saved') - parser.add_argument( - '--show-score-thr', - type=float, - default=0.3, - help='score threshold (default: 0.3)') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--eval-options', - nargs='+', - action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function') - - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - assert args.out or args.eval or args.format_only or args.show \ - or args.show_dir, \ - ('Please specify at least one operation (save/eval/format/show the ' - 'results / save the results) with the argument "--out", "--eval"' - ', "--format-only", "--show" or "--show-dir"') - - if args.eval and args.format_only: - raise ValueError('--eval and --format_only cannot be both specified') - - if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): - raise ValueError('The output file must be a pkl file.') - - cfg = Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - cfg = compat_cfg(cfg) - # in case the test dataset is concatenated - samples_per_gpu = 1 - if isinstance(cfg.data.test, dict): - cfg.data.test.test_mode = True - samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) - if samples_per_gpu > 1: - # Replace 'ImageToTensor' to 'DefaultFormatBundle' - cfg.data.test.pipeline = replace_ImageToTensor( - cfg.data.test.pipeline) - elif isinstance(cfg.data.test, list): - for ds_cfg in cfg.data.test: - ds_cfg.test_mode = True - samples_per_gpu = max( - [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) - if samples_per_gpu > 1: - for ds_cfg in cfg.data.test: - ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) - - # build the dataloader - dataset = build_dataset(cfg.data.test) - data_loader = build_dataloader( - dataset, - samples_per_gpu=samples_per_gpu, - workers_per_gpu=cfg.data.workers_per_gpu, - dist=False, - shuffle=False) - - if args.backend == 'onnxruntime': - from mmdet.core.export.model_wrappers import ONNXRuntimeDetector - model = ONNXRuntimeDetector( - args.model, class_names=dataset.CLASSES, device_id=0) - elif args.backend == 'tensorrt': - from mmdet.core.export.model_wrappers import TensorRTDetector - model = TensorRTDetector( - args.model, class_names=dataset.CLASSES, device_id=0) - - model = MMDataParallel(model, device_ids=[0]) - outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, - args.show_score_thr) - - if args.out: - print(f'\nwriting results to {args.out}') - mmcv.dump(outputs, args.out) - kwargs = {} if args.eval_options is None else args.eval_options - if args.format_only: - dataset.format_results(outputs, **kwargs) - if args.eval: - eval_kwargs = cfg.get('evaluation', {}).copy() - # hard-code way to remove EvalHook args - for key in [ - 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', - 'rule' - ]: - eval_kwargs.pop(key, None) - eval_kwargs.update(dict(metric=args.eval, **kwargs)) - print(dataset.evaluate(outputs, **eval_kwargs)) - - -if __name__ == '__main__': - main() - - # Following strings of text style are from colorama package - bright_style, reset_style = '\x1b[1m', '\x1b[0m' - red_text, blue_text = '\x1b[31m', '\x1b[34m' - white_background = '\x1b[107m' - - msg = white_background + bright_style + red_text - msg += 'DeprecationWarning: This tool will be deprecated in future. ' - msg += blue_text + 'Welcome to use the unified model deployment toolbox ' - msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' - msg += reset_style - warnings.warn(msg) diff --git a/cv/detection/co-detr/pytorch/tools/deployment/test_torchserver.py b/cv/detection/co-detr/pytorch/tools/deployment/test_torchserver.py deleted file mode 100644 index dd45234bed927632a71f323cd5dc38f05aee0f96..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/deployment/test_torchserver.py +++ /dev/null @@ -1,74 +0,0 @@ -from argparse import ArgumentParser - -import numpy as np -import requests - -from mmdet.apis import inference_detector, init_detector, show_result_pyplot -from mmdet.core import bbox2result - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument('img', help='Image file') - parser.add_argument('config', help='Config file') - parser.add_argument('checkpoint', help='Checkpoint file') - parser.add_argument('model_name', help='The model name in the server') - parser.add_argument( - '--inference-addr', - default='127.0.0.1:8080', - help='Address and port of the inference server') - parser.add_argument( - '--device', default='cuda:0', help='Device used for inference') - parser.add_argument( - '--score-thr', type=float, default=0.5, help='bbox score threshold') - args = parser.parse_args() - return args - - -def parse_result(input, model_class): - bbox = [] - label = [] - score = [] - for anchor in input: - bbox.append(anchor['bbox']) - label.append(model_class.index(anchor['class_name'])) - score.append([anchor['score']]) - bboxes = np.append(bbox, score, axis=1) - labels = np.array(label) - result = bbox2result(bboxes, labels, len(model_class)) - return result - - -def main(args): - # build the model from a config file and a checkpoint file - model = init_detector(args.config, args.checkpoint, device=args.device) - # test a single image - model_result = inference_detector(model, args.img) - for i, anchor_set in enumerate(model_result): - anchor_set = anchor_set[anchor_set[:, 4] >= 0.5] - model_result[i] = anchor_set - # show the results - show_result_pyplot( - model, - args.img, - model_result, - score_thr=args.score_thr, - title='pytorch_result') - url = 'http://' + args.inference_addr + '/predictions/' + args.model_name - with open(args.img, 'rb') as image: - response = requests.post(url, image) - server_result = parse_result(response.json(), model.CLASSES) - show_result_pyplot( - model, - args.img, - server_result, - score_thr=args.score_thr, - title='server_result') - - for i in range(len(model.CLASSES)): - assert np.allclose(model_result[i], server_result[i]) - - -if __name__ == '__main__': - args = parse_args() - main(args) diff --git a/cv/detection/co-detr/pytorch/tools/dist_test.sh b/cv/detection/co-detr/pytorch/tools/dist_test.sh deleted file mode 100644 index 6c6b681fdeb08ea319a74646dd765375a652d5b5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/dist_test.sh +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) 2023, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. - -CONFIG=$1 -CHECKPOINT=$2 -GPUS=$3 -PORT=${PORT:-29500} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -python3 -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ - $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} diff --git a/cv/detection/co-detr/pytorch/tools/dist_train.sh b/cv/detection/co-detr/pytorch/tools/dist_train.sh deleted file mode 100644 index 76a5aa7012db772c851d95fa8ccd834c22c401f7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/dist_train.sh +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2023, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. - -CONFIG=$1 -GPUS=$2 -WORKDIR=$3 - -PORT=${PORT:-29500} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH -echo $PYTHONPATH -python3 -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ - $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:4} --work-dir $WORKDIR diff --git a/cv/detection/co-detr/pytorch/tools/misc/browse_dataset.py b/cv/detection/co-detr/pytorch/tools/misc/browse_dataset.py deleted file mode 100644 index d9fb285122024785f204f0215368707440791453..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/misc/browse_dataset.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -from collections import Sequence -from pathlib import Path - -import mmcv -import numpy as np -from mmcv import Config, DictAction - -from mmdet.core.utils import mask2ndarray -from mmdet.core.visualization import imshow_det_bboxes -from mmdet.datasets.builder import build_dataset -from mmdet.utils import replace_cfg_vals, update_data_root - - -def parse_args(): - parser = argparse.ArgumentParser(description='Browse a dataset') - parser.add_argument('config', help='train config file path') - parser.add_argument( - '--skip-type', - type=str, - nargs='+', - default=['DefaultFormatBundle', 'Normalize', 'Collect'], - help='skip some useless pipeline') - parser.add_argument( - '--output-dir', - default=None, - type=str, - help='If there is no display interface, you can save it') - parser.add_argument('--not-show', default=False, action='store_true') - parser.add_argument( - '--show-interval', - type=float, - default=2, - help='the interval of show (s)') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - return args - - -def retrieve_data_cfg(config_path, skip_type, cfg_options): - - def skip_pipeline_steps(config): - config['pipeline'] = [ - x for x in config.pipeline if x['type'] not in skip_type - ] - - cfg = Config.fromfile(config_path) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - if cfg_options is not None: - cfg.merge_from_dict(cfg_options) - train_data_cfg = cfg.data.train - while 'dataset' in train_data_cfg and train_data_cfg[ - 'type'] != 'MultiImageMixDataset': - train_data_cfg = train_data_cfg['dataset'] - - if isinstance(train_data_cfg, Sequence): - [skip_pipeline_steps(c) for c in train_data_cfg] - else: - skip_pipeline_steps(train_data_cfg) - - return cfg - - -def main(): - args = parse_args() - cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options) - - if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']: - cfg.data.train.pipeline = [ - p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale' - ] - dataset = build_dataset(cfg.data.train) - - progress_bar = mmcv.ProgressBar(len(dataset)) - - for item in dataset: - filename = os.path.join(args.output_dir, - Path(item['filename']).name - ) if args.output_dir is not None else None - - gt_bboxes = item['gt_bboxes'] - gt_labels = item['gt_labels'] - gt_masks = item.get('gt_masks', None) - if gt_masks is not None: - gt_masks = mask2ndarray(gt_masks) - - gt_seg = item.get('gt_semantic_seg', None) - if gt_seg is not None: - pad_value = 255 # the padding value of gt_seg - sem_labels = np.unique(gt_seg) - all_labels = np.concatenate((gt_labels, sem_labels), axis=0) - all_labels, counts = np.unique(all_labels, return_counts=True) - stuff_labels = all_labels[np.logical_and(counts < 2, - all_labels != pad_value)] - stuff_masks = gt_seg[None] == stuff_labels[:, None, None] - gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0) - gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)), - axis=0) - # If you need to show the bounding boxes, - # please comment the following line - gt_bboxes = None - - imshow_det_bboxes( - item['img'], - gt_bboxes, - gt_labels, - gt_masks, - class_names=dataset.CLASSES, - show=not args.not_show, - wait_time=args.show_interval, - out_file=filename, - bbox_color=dataset.PALETTE, - text_color=(200, 200, 200), - mask_color=dataset.PALETTE) - - progress_bar.update() - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/misc/download_dataset.py b/cv/detection/co-detr/pytorch/tools/misc/download_dataset.py deleted file mode 100644 index 09c777db36903a63c0c0572db60deac26d1cb24c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/misc/download_dataset.py +++ /dev/null @@ -1,102 +0,0 @@ -import argparse -from itertools import repeat -from multiprocessing.pool import ThreadPool -from pathlib import Path -from tarfile import TarFile -from zipfile import ZipFile - -import torch - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Download datasets for training') - parser.add_argument( - '--dataset-name', type=str, help='dataset name', default='coco2017') - parser.add_argument( - '--save-dir', - type=str, - help='the dir to save dataset', - default='data/coco') - parser.add_argument( - '--unzip', - action='store_true', - help='whether unzip dataset or not, zipped files will be saved') - parser.add_argument( - '--delete', - action='store_true', - help='delete the download zipped files') - parser.add_argument( - '--threads', type=int, help='number of threading', default=4) - args = parser.parse_args() - return args - - -def download(url, dir, unzip=True, delete=False, threads=1): - - def download_one(url, dir): - f = dir / Path(url).name - if Path(url).is_file(): - Path(url).rename(f) - elif not f.exists(): - print('Downloading {} to {}'.format(url, f)) - torch.hub.download_url_to_file(url, f, progress=True) - if unzip and f.suffix in ('.zip', '.tar'): - print('Unzipping {}'.format(f.name)) - if f.suffix == '.zip': - ZipFile(f).extractall(path=dir) - elif f.suffix == '.tar': - TarFile(f).extractall(path=dir) - if delete: - f.unlink() - print('Delete {}'.format(f)) - - dir = Path(dir) - if threads > 1: - pool = ThreadPool(threads) - pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) - pool.close() - pool.join() - else: - for u in [url] if isinstance(url, (str, Path)) else url: - download_one(u, dir) - - -def main(): - args = parse_args() - path = Path(args.save_dir) - if not path.exists(): - path.mkdir(parents=True, exist_ok=True) - data2url = dict( - # TODO: Support for downloading Panoptic Segmentation of COCO - coco2017=[ - 'http://images.cocodataset.org/zips/train2017.zip', - 'http://images.cocodataset.org/zips/val2017.zip', - 'http://images.cocodataset.org/zips/test2017.zip', - 'http://images.cocodataset.org/annotations/' + - 'annotations_trainval2017.zip' - ], - lvis=[ - 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa - 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa - ], - voc2007=[ - 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa - 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa - 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa - ], - ) - url = data2url.get(args.dataset_name, None) - if url is None: - print('Only support COCO, VOC, and LVIS now!') - return - download( - url, - dir=path, - unzip=args.unzip, - delete=args.delete, - threads=args.threads) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/misc/gen_coco_panoptic_test_info.py b/cv/detection/co-detr/pytorch/tools/misc/gen_coco_panoptic_test_info.py deleted file mode 100644 index 5ad315dcbf63a1298a76283d92fef9149729abde..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/misc/gen_coco_panoptic_test_info.py +++ /dev/null @@ -1,34 +0,0 @@ -import argparse -import os.path as osp - -import mmcv - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Generate COCO test image information ' - 'for COCO panoptic segmentation.') - parser.add_argument('data_root', help='Path to COCO annotation directory.') - args = parser.parse_args() - - return args - - -def main(): - args = parse_args() - data_root = args.data_root - val_info = mmcv.load(osp.join(data_root, 'panoptic_val2017.json')) - test_old_info = mmcv.load( - osp.join(data_root, 'image_info_test-dev2017.json')) - - # replace categories from image_info_test-dev2017.json - # with categories from panoptic_val2017.json which - # has attribute `isthing`. - test_info = test_old_info - test_info.update({'categories': val_info['categories']}) - mmcv.dump(test_info, - osp.join(data_root, 'panoptic_image_info_test-dev2017.json')) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/misc/get_image_metas.py b/cv/detection/co-detr/pytorch/tools/misc/get_image_metas.py deleted file mode 100644 index a9957d9d856c52f80f82e91155c80f632609f6ed..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/misc/get_image_metas.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -"""Get test image metas on a specific dataset. - -Here is an example to run this script. - -Example: - python tools/misc/get_image_metas.py ${CONFIG} \ - --out ${OUTPUT FILE NAME} -""" -import argparse -import csv -import os.path as osp -from multiprocessing import Pool - -import mmcv -from mmcv import Config - - -def parse_args(): - parser = argparse.ArgumentParser(description='Collect image metas') - parser.add_argument('config', help='Config file path') - parser.add_argument( - '--out', - default='validation-image-metas.pkl', - help='The output image metas file name. The save dir is in the ' - 'same directory as `dataset.ann_file` path') - parser.add_argument( - '--nproc', - default=4, - type=int, - help='Processes used for get image metas') - args = parser.parse_args() - return args - - -def get_metas_from_csv_style_ann_file(ann_file): - data_infos = [] - cp_filename = None - with open(ann_file, 'r') as f: - reader = csv.reader(f) - for i, line in enumerate(reader): - if i == 0: - continue - img_id = line[0] - filename = f'{img_id}.jpg' - if filename != cp_filename: - data_infos.append(dict(filename=filename)) - cp_filename = filename - return data_infos - - -def get_metas_from_txt_style_ann_file(ann_file): - with open(ann_file) as f: - lines = f.readlines() - i = 0 - data_infos = [] - while i < len(lines): - filename = lines[i].rstrip() - data_infos.append(dict(filename=filename)) - skip_lines = int(lines[i + 2]) + 3 - i += skip_lines - return data_infos - - -def get_image_metas(data_info, img_prefix): - file_client = mmcv.FileClient(backend='disk') - filename = data_info.get('filename', None) - if filename is not None: - if img_prefix is not None: - filename = osp.join(img_prefix, filename) - img_bytes = file_client.get(filename) - img = mmcv.imfrombytes(img_bytes, flag='color') - meta = dict(filename=filename, ori_shape=img.shape) - else: - raise NotImplementedError('Missing `filename` in data_info') - return meta - - -def main(): - args = parse_args() - assert args.out.endswith('pkl'), 'The output file name must be pkl suffix' - - # load config files - cfg = Config.fromfile(args.config) - ann_file = cfg.data.test.ann_file - img_prefix = cfg.data.test.img_prefix - - print(f'{"-" * 5} Start Processing {"-" * 5}') - if ann_file.endswith('csv'): - data_infos = get_metas_from_csv_style_ann_file(ann_file) - elif ann_file.endswith('txt'): - data_infos = get_metas_from_txt_style_ann_file(ann_file) - else: - shuffix = ann_file.split('.')[-1] - raise NotImplementedError('File name must be csv or txt suffix but ' - f'get {shuffix}') - - print(f'Successfully load annotation file from {ann_file}') - print(f'Processing {len(data_infos)} images...') - pool = Pool(args.nproc) - # get image metas with multiple processes - image_metas = pool.starmap( - get_image_metas, - zip(data_infos, [img_prefix for _ in range(len(data_infos))]), - ) - pool.close() - - # save image metas - root_path = cfg.data.test.ann_file.rsplit('/', 1)[0] - save_path = osp.join(root_path, args.out) - mmcv.dump(image_metas, save_path) - print(f'Image meta file save to: {save_path}') - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/misc/print_config.py b/cv/detection/co-detr/pytorch/tools/misc/print_config.py deleted file mode 100644 index f10f5384a6a04867a349ae782da7dc2852230fe7..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/misc/print_config.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import warnings - -from mmcv import Config, DictAction - -from mmdet.utils import replace_cfg_vals, update_data_root - - -def parse_args(): - parser = argparse.ArgumentParser(description='Print the whole config') - parser.add_argument('config', help='config file path') - parser.add_argument( - '--options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file (deprecate), ' - 'change to --cfg-options instead.') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - - if args.options and args.cfg_options: - raise ValueError( - '--options and --cfg-options cannot be both ' - 'specified, --options is deprecated in favor of --cfg-options') - if args.options: - warnings.warn('--options is deprecated in favor of --cfg-options') - args.cfg_options = args.options - - return args - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - print(f'Config:\n{cfg.pretty_text}') - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/misc/split_coco.py b/cv/detection/co-detr/pytorch/tools/misc/split_coco.py deleted file mode 100644 index 78cc655034d88cd67d5a69581369b8df94bfb912..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/misc/split_coco.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os.path as osp - -import mmcv -import numpy as np - -prog_description = '''K-Fold coco split. - -To split coco data for semi-supervised object detection: - python tools/misc/split_coco.py -''' - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - '--data-root', - type=str, - help='The data root of coco dataset.', - default='./data/coco/') - parser.add_argument( - '--out-dir', - type=str, - help='The output directory of coco semi-supervised annotations.', - default='./data/coco_semi_annos/') - parser.add_argument( - '--labeled-percent', - type=float, - nargs='+', - help='The percentage of labeled data in the training set.', - default=[1, 2, 5, 10]) - parser.add_argument( - '--fold', - type=int, - help='K-fold cross validation for semi-supervised object detection.', - default=5) - args = parser.parse_args() - return args - - -def split_coco(data_root, out_dir, percent, fold): - """Split COCO data for Semi-supervised object detection. - - Args: - data_root (str): The data root of coco dataset. - out_dir (str): The output directory of coco semi-supervised - annotations. - percent (float): The percentage of labeled data in the training set. - fold (int): The fold of dataset and set as random seed for data split. - """ - - def save_anns(name, images, annotations): - sub_anns = dict() - sub_anns['images'] = images - sub_anns['annotations'] = annotations - sub_anns['licenses'] = anns['licenses'] - sub_anns['categories'] = anns['categories'] - sub_anns['info'] = anns['info'] - - mmcv.mkdir_or_exist(out_dir) - mmcv.dump(sub_anns, f'{out_dir}/{name}.json') - - # set random seed with the fold - np.random.seed(fold) - ann_file = osp.join(data_root, 'annotations/instances_train2017.json') - anns = mmcv.load(ann_file) - - image_list = anns['images'] - labeled_total = int(percent / 100. * len(image_list)) - labeled_inds = set( - np.random.choice(range(len(image_list)), size=labeled_total)) - labeled_ids, labeled_images, unlabeled_images = [], [], [] - - for i in range(len(image_list)): - if i in labeled_inds: - labeled_images.append(image_list[i]) - labeled_ids.append(image_list[i]['id']) - else: - unlabeled_images.append(image_list[i]) - - # get all annotations of labeled images - labeled_ids = set(labeled_ids) - labeled_annotations, unlabeled_annotations = [], [] - - for ann in anns['annotations']: - if ann['image_id'] in labeled_ids: - labeled_annotations.append(ann) - else: - unlabeled_annotations.append(ann) - - # save labeled and unlabeled - labeled_name = f'instances_train2017.{fold}@{percent}' - unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled' - - save_anns(labeled_name, labeled_images, labeled_annotations) - save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations) - - -def multi_wrapper(args): - return split_coco(*args) - - -if __name__ == '__main__': - args = parse_args() - arguments_list = [(args.data_root, args.out_dir, p, f) - for f in range(1, args.fold + 1) - for p in args.labeled_percent] - mmcv.track_parallel_progress(multi_wrapper, arguments_list, args.fold) diff --git a/cv/detection/co-detr/pytorch/tools/model_converters/detectron2pytorch.py b/cv/detection/co-detr/pytorch/tools/model_converters/detectron2pytorch.py deleted file mode 100644 index b7264d53d24b2602ec420546d37dabbd31cb46b6..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/model_converters/detectron2pytorch.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -from collections import OrderedDict - -import mmcv -import torch - -arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)} - - -def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names): - # detectron replace bn with affine channel layer - state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name + - '_b']) - state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name + - '_s']) - bn_size = state_dict[torch_name + '.weight'].size() - state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size) - state_dict[torch_name + '.running_var'] = torch.ones(bn_size) - converted_names.add(caffe_name + '_b') - converted_names.add(caffe_name + '_s') - - -def convert_conv_fc(blobs, state_dict, caffe_name, torch_name, - converted_names): - state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name + - '_w']) - converted_names.add(caffe_name + '_w') - if caffe_name + '_b' in blobs: - state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name + - '_b']) - converted_names.add(caffe_name + '_b') - - -def convert(src, dst, depth): - """Convert keys in detectron pretrained ResNet models to pytorch style.""" - # load arch_settings - if depth not in arch_settings: - raise ValueError('Only support ResNet-50 and ResNet-101 currently') - block_nums = arch_settings[depth] - # load caffe model - caffe_model = mmcv.load(src, encoding='latin1') - blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model - # convert to pytorch style - state_dict = OrderedDict() - converted_names = set() - convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names) - convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names) - for i in range(1, len(block_nums) + 1): - for j in range(block_nums[i - 1]): - if j == 0: - convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1', - f'layer{i}.{j}.downsample.0', converted_names) - convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn', - f'layer{i}.{j}.downsample.1', converted_names) - for k, letter in enumerate(['a', 'b', 'c']): - convert_conv_fc(blobs, state_dict, - f'res{i + 1}_{j}_branch2{letter}', - f'layer{i}.{j}.conv{k+1}', converted_names) - convert_bn(blobs, state_dict, - f'res{i + 1}_{j}_branch2{letter}_bn', - f'layer{i}.{j}.bn{k + 1}', converted_names) - # check if all layers are converted - for key in blobs: - if key not in converted_names: - print(f'Not Convert: {key}') - # save checkpoint - checkpoint = dict() - checkpoint['state_dict'] = state_dict - torch.save(checkpoint, dst) - - -def main(): - parser = argparse.ArgumentParser(description='Convert model keys') - parser.add_argument('src', help='src detectron model path') - parser.add_argument('dst', help='save path') - parser.add_argument('depth', type=int, help='ResNet model depth') - args = parser.parse_args() - convert(args.src, args.dst, args.depth) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/model_converters/publish_model.py b/cv/detection/co-detr/pytorch/tools/model_converters/publish_model.py deleted file mode 100644 index 219fcdf3c568f2a330b46d93b08b0aa2cb7bd5a3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/model_converters/publish_model.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import subprocess - -import torch - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Process a checkpoint to be published') - parser.add_argument('in_file', help='input checkpoint filename') - parser.add_argument('out_file', help='output checkpoint filename') - args = parser.parse_args() - return args - - -def process_checkpoint(in_file, out_file): - checkpoint = torch.load(in_file, map_location='cpu') - # remove optimizer for smaller file size - if 'optimizer' in checkpoint: - del checkpoint['optimizer'] - # if it is necessary to remove some sensitive data in checkpoint['meta'], - # add the code here. - if torch.__version__ >= '1.6': - torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) - else: - torch.save(checkpoint, out_file) - sha = subprocess.check_output(['sha256sum', out_file]).decode() - if out_file.endswith('.pth'): - out_file_name = out_file[:-4] - else: - out_file_name = out_file - final_file = out_file_name + f'-{sha[:8]}.pth' - subprocess.Popen(['mv', out_file, final_file]) - - -def main(): - args = parse_args() - process_checkpoint(args.in_file, args.out_file) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/model_converters/regnet2mmdet.py b/cv/detection/co-detr/pytorch/tools/model_converters/regnet2mmdet.py deleted file mode 100644 index fbf8c8f33a90839fef055aea0a775e76ff84afd3..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/model_converters/regnet2mmdet.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -from collections import OrderedDict - -import torch - - -def convert_stem(model_key, model_weight, state_dict, converted_names): - new_key = model_key.replace('stem.conv', 'conv1') - new_key = new_key.replace('stem.bn', 'bn1') - state_dict[new_key] = model_weight - converted_names.add(model_key) - print(f'Convert {model_key} to {new_key}') - - -def convert_head(model_key, model_weight, state_dict, converted_names): - new_key = model_key.replace('head.fc', 'fc') - state_dict[new_key] = model_weight - converted_names.add(model_key) - print(f'Convert {model_key} to {new_key}') - - -def convert_reslayer(model_key, model_weight, state_dict, converted_names): - split_keys = model_key.split('.') - layer, block, module = split_keys[:3] - block_id = int(block[1:]) - layer_name = f'layer{int(layer[1:])}' - block_name = f'{block_id - 1}' - - if block_id == 1 and module == 'bn': - new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' - elif block_id == 1 and module == 'proj': - new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' - elif module == 'f': - if split_keys[3] == 'a_bn': - module_name = 'bn1' - elif split_keys[3] == 'b_bn': - module_name = 'bn2' - elif split_keys[3] == 'c_bn': - module_name = 'bn3' - elif split_keys[3] == 'a': - module_name = 'conv1' - elif split_keys[3] == 'b': - module_name = 'conv2' - elif split_keys[3] == 'c': - module_name = 'conv3' - new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' - else: - raise ValueError(f'Unsupported conversion of key {model_key}') - print(f'Convert {model_key} to {new_key}') - state_dict[new_key] = model_weight - converted_names.add(model_key) - - -def convert(src, dst): - """Convert keys in pycls pretrained RegNet models to mmdet style.""" - # load caffe model - regnet_model = torch.load(src) - blobs = regnet_model['model_state'] - # convert to pytorch style - state_dict = OrderedDict() - converted_names = set() - for key, weight in blobs.items(): - if 'stem' in key: - convert_stem(key, weight, state_dict, converted_names) - elif 'head' in key: - convert_head(key, weight, state_dict, converted_names) - elif key.startswith('s'): - convert_reslayer(key, weight, state_dict, converted_names) - - # check if all layers are converted - for key in blobs: - if key not in converted_names: - print(f'not converted: {key}') - # save checkpoint - checkpoint = dict() - checkpoint['state_dict'] = state_dict - torch.save(checkpoint, dst) - - -def main(): - parser = argparse.ArgumentParser(description='Convert model keys') - parser.add_argument('src', help='src detectron model path') - parser.add_argument('dst', help='save path') - args = parser.parse_args() - convert(args.src, args.dst) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/model_converters/selfsup2mmdet.py b/cv/detection/co-detr/pytorch/tools/model_converters/selfsup2mmdet.py deleted file mode 100644 index bc8cce1bd1cde22d09bd200b813bf67b4d066892..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/model_converters/selfsup2mmdet.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -from collections import OrderedDict - -import torch - - -def moco_convert(src, dst): - """Convert keys in pycls pretrained moco models to mmdet style.""" - # load caffe model - moco_model = torch.load(src) - blobs = moco_model['state_dict'] - # convert to pytorch style - state_dict = OrderedDict() - for k, v in blobs.items(): - if not k.startswith('module.encoder_q.'): - continue - old_k = k - k = k.replace('module.encoder_q.', '') - state_dict[k] = v - print(old_k, '->', k) - # save checkpoint - checkpoint = dict() - checkpoint['state_dict'] = state_dict - torch.save(checkpoint, dst) - - -def main(): - parser = argparse.ArgumentParser(description='Convert model keys') - parser.add_argument('src', help='src detectron model path') - parser.add_argument('dst', help='save path') - parser.add_argument( - '--selfsup', type=str, choices=['moco', 'swav'], help='save path') - args = parser.parse_args() - if args.selfsup == 'moco': - moco_convert(args.src, args.dst) - elif args.selfsup == 'swav': - print('SWAV does not need to convert the keys') - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/model_converters/upgrade_model_version.py b/cv/detection/co-detr/pytorch/tools/model_converters/upgrade_model_version.py deleted file mode 100644 index 36ee607ce78726cf180c4f638ec13b3acd75d3a5..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/model_converters/upgrade_model_version.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import re -import tempfile -from collections import OrderedDict - -import torch -from mmcv import Config - - -def is_head(key): - valid_head_list = [ - 'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head' - ] - - return any(key.startswith(h) for h in valid_head_list) - - -def parse_config(config_strings): - temp_file = tempfile.NamedTemporaryFile() - config_path = f'{temp_file.name}.py' - with open(config_path, 'w') as f: - f.write(config_strings) - - config = Config.fromfile(config_path) - is_two_stage = True - is_ssd = False - is_retina = False - reg_cls_agnostic = False - if 'rpn_head' not in config.model: - is_two_stage = False - # check whether it is SSD - if config.model.bbox_head.type == 'SSDHead': - is_ssd = True - elif config.model.bbox_head.type == 'RetinaHead': - is_retina = True - elif isinstance(config.model['bbox_head'], list): - reg_cls_agnostic = True - elif 'reg_class_agnostic' in config.model.bbox_head: - reg_cls_agnostic = config.model.bbox_head \ - .reg_class_agnostic - temp_file.close() - return is_two_stage, is_ssd, is_retina, reg_cls_agnostic - - -def reorder_cls_channel(val, num_classes=81): - # bias - if val.dim() == 1: - new_val = torch.cat((val[1:], val[:1]), dim=0) - # weight - else: - out_channels, in_channels = val.shape[:2] - # conv_cls for softmax output - if out_channels != num_classes and out_channels % num_classes == 0: - new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:]) - new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1) - new_val = new_val.reshape(val.size()) - # fc_cls - elif out_channels == num_classes: - new_val = torch.cat((val[1:], val[:1]), dim=0) - # agnostic | retina_cls | rpn_cls - else: - new_val = val - - return new_val - - -def truncate_cls_channel(val, num_classes=81): - - # bias - if val.dim() == 1: - if val.size(0) % num_classes == 0: - new_val = val[:num_classes - 1] - else: - new_val = val - # weight - else: - out_channels, in_channels = val.shape[:2] - # conv_logits - if out_channels % num_classes == 0: - new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:] - new_val = new_val.reshape(-1, *val.shape[1:]) - # agnostic - else: - new_val = val - - return new_val - - -def truncate_reg_channel(val, num_classes=81): - # bias - if val.dim() == 1: - # fc_reg | rpn_reg - if val.size(0) % num_classes == 0: - new_val = val.reshape(num_classes, -1)[:num_classes - 1] - new_val = new_val.reshape(-1) - # agnostic - else: - new_val = val - # weight - else: - out_channels, in_channels = val.shape[:2] - # fc_reg | rpn_reg - if out_channels % num_classes == 0: - new_val = val.reshape(num_classes, -1, in_channels, - *val.shape[2:])[1:] - new_val = new_val.reshape(-1, *val.shape[1:]) - # agnostic - else: - new_val = val - - return new_val - - -def convert(in_file, out_file, num_classes): - """Convert keys in checkpoints. - - There can be some breaking changes during the development of mmdetection, - and this tool is used for upgrading checkpoints trained with old versions - to the latest one. - """ - checkpoint = torch.load(in_file) - in_state_dict = checkpoint.pop('state_dict') - out_state_dict = OrderedDict() - meta_info = checkpoint['meta'] - is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config( - '#' + meta_info['config']) - if meta_info['mmdet_version'] <= '0.5.3' and is_retina: - upgrade_retina = True - else: - upgrade_retina = False - - # MMDetection v2.5.0 unifies the class order in RPN - # if the model is trained in version=2.5.0 - if meta_info['mmdet_version'] < '2.5.0': - upgrade_rpn = True - else: - upgrade_rpn = False - - for key, val in in_state_dict.items(): - new_key = key - new_val = val - if is_two_stage and is_head(key): - new_key = 'roi_head.{}'.format(key) - - # classification - if upgrade_rpn: - m = re.search( - r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|' - r'fovea_cls).(weight|bias)', new_key) - else: - m = re.search( - r'(conv_cls|retina_cls|fc_cls|fcos_cls|' - r'fovea_cls).(weight|bias)', new_key) - if m is not None: - print(f'reorder cls channels of {new_key}') - new_val = reorder_cls_channel(val, num_classes) - - # regression - if upgrade_rpn: - m = re.search(r'(fc_reg).(weight|bias)', new_key) - else: - m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key) - if m is not None and not reg_cls_agnostic: - print(f'truncate regression channels of {new_key}') - new_val = truncate_reg_channel(val, num_classes) - - # mask head - m = re.search(r'(conv_logits).(weight|bias)', new_key) - if m is not None: - print(f'truncate mask prediction channels of {new_key}') - new_val = truncate_cls_channel(val, num_classes) - - m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) - # Legacy issues in RetinaNet since V1.x - # Use ConvModule instead of nn.Conv2d in RetinaNet - # cls_convs.0.weight -> cls_convs.0.conv.weight - if m is not None and upgrade_retina: - param = m.groups()[1] - new_key = key.replace(param, f'conv.{param}') - out_state_dict[new_key] = val - print(f'rename the name of {key} to {new_key}') - continue - - m = re.search(r'(cls_convs).\d.(weight|bias)', key) - if m is not None and is_ssd: - print(f'reorder cls channels of {new_key}') - new_val = reorder_cls_channel(val, num_classes) - - out_state_dict[new_key] = new_val - checkpoint['state_dict'] = out_state_dict - torch.save(checkpoint, out_file) - - -def main(): - parser = argparse.ArgumentParser(description='Upgrade model version') - parser.add_argument('in_file', help='input checkpoint file') - parser.add_argument('out_file', help='output checkpoint file') - parser.add_argument( - '--num-classes', - type=int, - default=81, - help='number of classes of the original model') - args = parser.parse_args() - convert(args.in_file, args.out_file, args.num_classes) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/model_converters/upgrade_ssd_version.py b/cv/detection/co-detr/pytorch/tools/model_converters/upgrade_ssd_version.py deleted file mode 100644 index befff455183e4a867bce638bcb98ea6ccd0bdb5c..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/model_converters/upgrade_ssd_version.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import tempfile -from collections import OrderedDict - -import torch -from mmcv import Config - - -def parse_config(config_strings): - temp_file = tempfile.NamedTemporaryFile() - config_path = f'{temp_file.name}.py' - with open(config_path, 'w') as f: - f.write(config_strings) - - config = Config.fromfile(config_path) - # check whether it is SSD - if config.model.bbox_head.type != 'SSDHead': - raise AssertionError('This is not a SSD model.') - - -def convert(in_file, out_file): - checkpoint = torch.load(in_file) - in_state_dict = checkpoint.pop('state_dict') - out_state_dict = OrderedDict() - meta_info = checkpoint['meta'] - parse_config('#' + meta_info['config']) - for key, value in in_state_dict.items(): - if 'extra' in key: - layer_idx = int(key.split('.')[2]) - new_key = 'neck.extra_layers.{}.{}.conv.'.format( - layer_idx // 2, layer_idx % 2) + key.split('.')[-1] - elif 'l2_norm' in key: - new_key = 'neck.l2_norm.weight' - elif 'bbox_head' in key: - new_key = key[:21] + '.0' + key[21:] - else: - new_key = key - out_state_dict[new_key] = value - checkpoint['state_dict'] = out_state_dict - - if torch.__version__ >= '1.6': - torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) - else: - torch.save(checkpoint, out_file) - - -def main(): - parser = argparse.ArgumentParser(description='Upgrade SSD version') - parser.add_argument('in_file', help='input checkpoint file') - parser.add_argument('out_file', help='output checkpoint file') - - args = parser.parse_args() - convert(args.in_file, args.out_file) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/slurm_test.sh b/cv/detection/co-detr/pytorch/tools/slurm_test.sh deleted file mode 100644 index 6dd67e57442b741fc30f26102eb5afe16139edb1..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/slurm_test.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -set -x - -PARTITION=$1 -JOB_NAME=$2 -CONFIG=$3 -CHECKPOINT=$4 -GPUS=${GPUS:-8} -GPUS_PER_NODE=${GPUS_PER_NODE:-8} -CPUS_PER_TASK=${CPUS_PER_TASK:-5} -PY_ARGS=${@:5} -SRUN_ARGS=${SRUN_ARGS:-""} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -srun -p ${PARTITION} \ - --job-name=${JOB_NAME} \ - --gres=gpu:${GPUS_PER_NODE} \ - --ntasks=${GPUS} \ - --ntasks-per-node=${GPUS_PER_NODE} \ - --cpus-per-task=${CPUS_PER_TASK} \ - --kill-on-bad-exit=1 \ - ${SRUN_ARGS} \ - python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/cv/detection/co-detr/pytorch/tools/slurm_train.sh b/cv/detection/co-detr/pytorch/tools/slurm_train.sh deleted file mode 100644 index b3feb3d9c7a6c33d82739cdf5ee10365673aaded..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/slurm_train.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -set -x - -PARTITION=$1 -JOB_NAME=$2 -CONFIG=$3 -WORK_DIR=$4 -GPUS=${GPUS:-8} -GPUS_PER_NODE=${GPUS_PER_NODE:-8} -CPUS_PER_TASK=${CPUS_PER_TASK:-5} -SRUN_ARGS=${SRUN_ARGS:-""} -PY_ARGS=${@:5} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -srun -p ${PARTITION} \ - --job-name=${JOB_NAME} \ - --gres=gpu:${GPUS_PER_NODE} \ - --ntasks=${GPUS} \ - --ntasks-per-node=${GPUS_PER_NODE} \ - --cpus-per-task=${CPUS_PER_TASK} \ - --kill-on-bad-exit=1 \ - ${SRUN_ARGS} \ - python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/cv/detection/co-detr/pytorch/tools/test.py b/cv/detection/co-detr/pytorch/tools/test.py deleted file mode 100644 index 7bba9ae40d93de33c2b36344dcd28bcbba4a9aef..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/test.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os -import os.path as osp -import time -import warnings - -import mmcv -import torch -from mmcv import Config, DictAction -from mmcv.cnn import fuse_conv_bn -from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, - wrap_fp16_model) - -from mmdet.apis import multi_gpu_test, single_gpu_test -from mmdet.datasets import (build_dataloader, build_dataset, - replace_ImageToTensor) -from mmdet.models import build_detector -from mmdet.utils import (build_ddp, build_dp, compat_cfg, get_device, - replace_cfg_vals, setup_multi_processes, - update_data_root) -from projects import * - - -def parse_args(): - parser = argparse.ArgumentParser( - description='MMDet test (and eval) a model') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument( - '--work-dir', - help='the directory to save the file containing evaluation metrics') - parser.add_argument('--out', help='output result file in pickle format') - parser.add_argument( - '--fuse-conv-bn', - action='store_true', - help='Whether to fuse conv and bn, this will slightly increase' - 'the inference speed') - parser.add_argument( - '--gpu-ids', - type=int, - nargs='+', - help='(Deprecated, please use --gpu-id) ids of gpus to use ' - '(only applicable to non-distributed training)') - parser.add_argument( - '--gpu-id', - type=int, - default=0, - help='id of gpu to use ' - '(only applicable to non-distributed testing)') - parser.add_argument( - '--format-only', - action='store_true', - help='Format the output results without perform evaluation. It is' - 'useful when you want to format the result to a specific format and ' - 'submit it to the test server') - parser.add_argument( - '--eval', - type=str, - nargs='+', - help='evaluation metrics, which depends on the dataset, e.g., "bbox",' - ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') - parser.add_argument('--show', action='store_true', help='show results') - parser.add_argument( - '--show-dir', help='directory where painted images will be saved') - parser.add_argument( - '--show-score-thr', - type=float, - default=0.3, - help='score threshold (default: 0.3)') - parser.add_argument( - '--gpu-collect', - action='store_true', - help='whether to use gpu to collect results.') - parser.add_argument( - '--tmpdir', - help='tmp directory used for collecting results from multiple ' - 'workers, available when gpu-collect is not specified') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--options', - nargs='+', - action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function (deprecate), ' - 'change to --eval-options instead.') - parser.add_argument( - '--eval-options', - nargs='+', - action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - - if args.options and args.eval_options: - raise ValueError( - '--options and --eval-options cannot be both ' - 'specified, --options is deprecated in favor of --eval-options') - if args.options: - warnings.warn('--options is deprecated in favor of --eval-options') - args.eval_options = args.options - return args - - -def main(): - args = parse_args() - - assert args.out or args.eval or args.format_only or args.show \ - or args.show_dir, \ - ('Please specify at least one operation (save/eval/format/show the ' - 'results / save the results) with the argument "--out", "--eval"' - ', "--format-only", "--show" or "--show-dir"') - - if args.eval and args.format_only: - raise ValueError('--eval and --format_only cannot be both specified') - - if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): - raise ValueError('The output file must be a pkl file.') - - cfg = Config.fromfile(args.config) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - cfg = compat_cfg(cfg) - - # set multi-process settings - setup_multi_processes(cfg) - - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - - if 'pretrained' in cfg.model: - cfg.model.pretrained = None - elif 'init_cfg' in cfg.model.backbone: - cfg.model.backbone.init_cfg = None - - if cfg.model.get('neck'): - if isinstance(cfg.model.neck, list): - for neck_cfg in cfg.model.neck: - if neck_cfg.get('rfp_backbone'): - if neck_cfg.rfp_backbone.get('pretrained'): - neck_cfg.rfp_backbone.pretrained = None - elif cfg.model.neck.get('rfp_backbone'): - if cfg.model.neck.rfp_backbone.get('pretrained'): - cfg.model.neck.rfp_backbone.pretrained = None - - if args.gpu_ids is not None: - cfg.gpu_ids = args.gpu_ids[0:1] - warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' - 'Because we only support single GPU mode in ' - 'non-distributed testing. Use the first GPU ' - 'in `gpu_ids` now.') - else: - cfg.gpu_ids = [args.gpu_id] - cfg.device = get_device() - # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': - distributed = False - else: - distributed = True - init_dist(args.launcher, **cfg.dist_params) - - test_dataloader_default_args = dict( - samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False) - - # in case the test dataset is concatenated - if isinstance(cfg.data.test, dict): - cfg.data.test.test_mode = True - if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1: - # Replace 'ImageToTensor' to 'DefaultFormatBundle' - cfg.data.test.pipeline = replace_ImageToTensor( - cfg.data.test.pipeline) - elif isinstance(cfg.data.test, list): - for ds_cfg in cfg.data.test: - ds_cfg.test_mode = True - if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1: - for ds_cfg in cfg.data.test: - ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) - - test_loader_cfg = { - **test_dataloader_default_args, - **cfg.data.get('test_dataloader', {}) - } - - rank, _ = get_dist_info() - # allows not to create - if args.work_dir is not None and rank == 0: - mmcv.mkdir_or_exist(osp.abspath(args.work_dir)) - timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) - json_file = osp.join(args.work_dir, f'eval_{timestamp}.json') - - # build the dataloader - dataset = build_dataset(cfg.data.test) - data_loader = build_dataloader(dataset, **test_loader_cfg) - - # build the model and load checkpoint - cfg.model.train_cfg = None - model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) - fp16_cfg = cfg.get('fp16', None) - if fp16_cfg is not None: - wrap_fp16_model(model) - checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') - if args.fuse_conv_bn: - model = fuse_conv_bn(model) - # old versions did not save class info in checkpoints, this walkaround is - # for backward compatibility - if 'CLASSES' in checkpoint.get('meta', {}): - model.CLASSES = checkpoint['meta']['CLASSES'] - else: - model.CLASSES = dataset.CLASSES - - if not distributed: - model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) - outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, - args.show_score_thr) - else: - model = build_ddp( - model, - cfg.device, - device_ids=[int(os.environ['LOCAL_RANK'])], - broadcast_buffers=False) - outputs = multi_gpu_test( - model, data_loader, args.tmpdir, args.gpu_collect - or cfg.evaluation.get('gpu_collect', False)) - - rank, _ = get_dist_info() - if rank == 0: - if args.out: - print(f'\nwriting results to {args.out}') - mmcv.dump(outputs, args.out) - kwargs = {} if args.eval_options is None else args.eval_options - if args.format_only: - dataset.format_results(outputs, **kwargs) - if args.eval: - eval_kwargs = cfg.get('evaluation', {}).copy() - # hard-code way to remove EvalHook args - for key in [ - 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', - 'rule', 'dynamic_intervals' - ]: - eval_kwargs.pop(key, None) - eval_kwargs.update(dict(metric=args.eval, **kwargs)) - metric = dataset.evaluate(outputs, **eval_kwargs) - print(metric) - metric_dict = dict(config=args.config, metric=metric) - if args.work_dir is not None and rank == 0: - mmcv.dump(metric_dict, json_file) - - -if __name__ == '__main__': - main() diff --git a/cv/detection/co-detr/pytorch/tools/train.py b/cv/detection/co-detr/pytorch/tools/train.py deleted file mode 100644 index 22d965b68bc26bb5efc11822868f3903603edada..0000000000000000000000000000000000000000 --- a/cv/detection/co-detr/pytorch/tools/train.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import copy -import os -import os.path as osp -import time -import warnings - -import mmcv -import torch -import torch.distributed as dist -from mmcv import Config, DictAction -from mmcv.runner import get_dist_info, init_dist -from mmcv.utils import get_git_hash - -from mmdet import __version__ -from mmdet.apis import init_random_seed, set_random_seed, train_detector -from mmdet.datasets import build_dataset -from mmdet.models import build_detector -from mmdet.utils import (collect_env, get_device, get_root_logger, - replace_cfg_vals, setup_multi_processes, - update_data_root) -from projects import * - - -def parse_args(): - parser = argparse.ArgumentParser(description='Train a detector') - parser.add_argument('config', help='train config file path') - parser.add_argument('--work-dir', help='the dir to save logs and models') - parser.add_argument( - '--resume-from', help='the checkpoint file to resume from') - parser.add_argument( - '--auto-resume', - action='store_true', - help='resume from the latest checkpoint automatically') - parser.add_argument( - '--no-validate', - action='store_true', - help='whether not to evaluate the checkpoint during training') - group_gpus = parser.add_mutually_exclusive_group() - group_gpus.add_argument( - '--gpus', - type=int, - help='(Deprecated, please use --gpu-id) number of gpus to use ' - '(only applicable to non-distributed training)') - group_gpus.add_argument( - '--gpu-ids', - type=int, - nargs='+', - help='(Deprecated, please use --gpu-id) ids of gpus to use ' - '(only applicable to non-distributed training)') - group_gpus.add_argument( - '--gpu-id', - type=int, - default=0, - help='id of gpu to use ' - '(only applicable to non-distributed training)') - parser.add_argument('--seed', type=int, default=None, help='random seed') - parser.add_argument( - '--diff-seed', - action='store_true', - help='Whether or not set different seeds for different ranks') - parser.add_argument( - '--deterministic', - action='store_true', - help='whether to set deterministic options for CUDNN backend.') - parser.add_argument( - '--options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file (deprecate), ' - 'change to --cfg-options instead.') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - parser.add_argument( - '--auto-scale-lr', - action='store_true', - help='enable automatically scaling LR.') - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - - if args.options and args.cfg_options: - raise ValueError( - '--options and --cfg-options cannot be both ' - 'specified, --options is deprecated in favor of --cfg-options') - if args.options: - warnings.warn('--options is deprecated in favor of --cfg-options') - args.cfg_options = args.options - - return args - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - - # replace the ${key} with the value of cfg.key - cfg = replace_cfg_vals(cfg) - - # update data root according to MMDET_DATASETS - update_data_root(cfg) - - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - if args.auto_scale_lr: - if 'auto_scale_lr' in cfg and \ - 'enable' in cfg.auto_scale_lr and \ - 'base_batch_size' in cfg.auto_scale_lr: - cfg.auto_scale_lr.enable = True - else: - warnings.warn('Can not find "auto_scale_lr" or ' - '"auto_scale_lr.enable" or ' - '"auto_scale_lr.base_batch_size" in your' - ' configuration file. Please update all the ' - 'configuration files to mmdet >= 2.24.1.') - - # set multi-process settings - setup_multi_processes(cfg) - - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - - # work_dir is determined in this priority: CLI > segment in file > filename - if args.work_dir is not None: - # update configs according to CLI args if args.work_dir is not None - cfg.work_dir = args.work_dir - elif cfg.get('work_dir', None) is None: - # use config filename as default work_dir if cfg.work_dir is None - cfg.work_dir = osp.join('./work_dirs', - osp.splitext(osp.basename(args.config))[0]) - - if args.resume_from is not None: - cfg.resume_from = args.resume_from - cfg.auto_resume = args.auto_resume - if args.gpus is not None: - cfg.gpu_ids = range(1) - warnings.warn('`--gpus` is deprecated because we only support ' - 'single GPU mode in non-distributed training. ' - 'Use `gpus=1` now.') - if args.gpu_ids is not None: - cfg.gpu_ids = args.gpu_ids[0:1] - warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' - 'Because we only support single GPU mode in ' - 'non-distributed training. Use the first GPU ' - 'in `gpu_ids` now.') - if args.gpus is None and args.gpu_ids is None: - cfg.gpu_ids = [args.gpu_id] - - # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': - distributed = False - else: - distributed = True - init_dist(args.launcher, **cfg.dist_params) - # re-set gpu_ids with distributed training mode - _, world_size = get_dist_info() - cfg.gpu_ids = range(world_size) - - # create work_dir - mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) - # dump config - cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) - # init the logger before other steps - timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) - log_file = osp.join(cfg.work_dir, f'{timestamp}.log') - logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) - - # init the meta dict to record some important information such as - # environment info and seed, which will be logged - meta = dict() - # log env info - env_info_dict = collect_env() - env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) - dash_line = '-' * 60 + '\n' - logger.info('Environment info:\n' + dash_line + env_info + '\n' + - dash_line) - meta['env_info'] = env_info - meta['config'] = cfg.pretty_text - # log some basic info - logger.info(f'Distributed training: {distributed}') - logger.info(f'Config:\n{cfg.pretty_text}') - - cfg.device = get_device() - # set random seeds - seed = init_random_seed(args.seed, device=cfg.device) - seed = seed + dist.get_rank() if args.diff_seed else seed - logger.info(f'Set random seed to {seed}, ' - f'deterministic: {args.deterministic}') - set_random_seed(seed, deterministic=args.deterministic) - cfg.seed = seed - meta['seed'] = seed - meta['exp_name'] = osp.basename(args.config) - - model = build_detector( - cfg.model, - train_cfg=cfg.get('train_cfg'), - test_cfg=cfg.get('test_cfg')) - model.init_weights() - - datasets = [build_dataset(cfg.data.train)] - if len(cfg.workflow) == 2: - assert 'val' in [mode for (mode, _) in cfg.workflow] - val_dataset = copy.deepcopy(cfg.data.val) - val_dataset.pipeline = cfg.data.train.get( - 'pipeline', cfg.data.train.dataset.get('pipeline')) - datasets.append(build_dataset(val_dataset)) - if cfg.checkpoint_config is not None: - # save mmdet version, config file content and class names in - # checkpoints as meta data - cfg.checkpoint_config.meta = dict( - mmdet_version=__version__ + get_git_hash()[:7], - CLASSES=datasets[0].CLASSES) - # add an attribute for visualization convenience - model.CLASSES = datasets[0].CLASSES - train_detector( - model, - datasets, - cfg, - distributed=distributed, - validate=(not args.no_validate), - timestamp=timestamp, - meta=meta) - - -if __name__ == '__main__': - main() diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/LICENSE b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/LICENSE deleted file mode 100644 index faaaa3ed1a78ff8b96b9d3e3c8dd1b257cb8b49e..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2022-present LG AI Research. - - Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/README.md b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/README.md index 86c15f52bd3b21c0272f731899a4f609e20f9b6a..60689c62180e40cc961e5eb069d408156f8baa0f 100644 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/README.md +++ b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/README.md @@ -32,10 +32,12 @@ imagenet ## Step 3: Training AugVAE(AugVAE-ML) -``` -$ cd /path/to/L-Verse/pytorch -$ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 -$ python3 train_vae.py --config ./configs/imagenet_augvae_ml.yaml --train_dir /path/to/imagenet/train --val_dir /path/to/imagenet/val --gpus 8 --batch_size 4 --epochs 2 +```bash +git clone https://github.com/tgisaturday/L-Verse.git +cd /path/to/L-Verse/pytorch +git checkout 504a6bf740812bdd2022f31f969968ec31794033 +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +python3 train_vae.py --config ./configs/imagenet_augvae_ml.yaml --train_dir /path/to/imagenet/train --val_dir /path/to/imagenet/val --gpus 8 --batch_size 4 --epochs 2 ``` ## Reference diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/configs/imagenet_augvae_ml.yaml b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/configs/imagenet_augvae_ml.yaml deleted file mode 100644 index a5570a5b86338a8ff02e0b63e7a2d4b3f0a88bd6..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/configs/imagenet_augvae_ml.yaml +++ /dev/null @@ -1,13 +0,0 @@ -num_tokens: 8192 -ch_mult: [1,2,4] -attn_resolutions: [64, 32, 16, 8] -loss_type: lpips_l2 -p_loss_weight: 0.1 -epochs: 30 -batch_size: 8 -backup: True -log_images: True -num_workers: 16 -image_log_steps: 1000 -lr_decay: True - diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/configs/imagenet_augvae_sl.yaml b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/configs/imagenet_augvae_sl.yaml deleted file mode 100644 index a141c095fae4da9f500a87b9b4f0d62f5c66047c..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/configs/imagenet_augvae_sl.yaml +++ /dev/null @@ -1,15 +0,0 @@ -finetune: True -pretrained_path: 'mlvae.ckpt' -attn_resolutions: [32] -loss_type: lpips_l2 -learning_rate: 4.5e-6 -p_loss_weight: 0.1 -epochs: 30 -batch_size: 8 -backup: True -log_images: True -num_workers: 16 -image_log_steps: 1000 -lr_decay: True - - diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/eval_vae.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/eval_vae.py deleted file mode 100644 index 0aaedc5811e8a0813b614738afa4802e78a1bdfa..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/eval_vae.py +++ /dev/null @@ -1,217 +0,0 @@ -import argparse, datetime, yaml -import torch - -from latent_verse.models.vqvae import AugVAE -from latent_verse.loader import ImageDataModule -from latent_verse.callbacks import ReconstructedImageLogger - -import pytorch_lightning as pl -from pytorch_lightning import seed_everything -from pytorch_lightning import Trainer - -if __name__ == "__main__": - - now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") - - config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) - parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', - help='YAML config file specifying default arguments') - - - parser = argparse.ArgumentParser(description='AugVAE Evaluation') - - #path configuration - parser.add_argument('--train_dir', type=str, default='dataset/train/', - help='path to train dataset') - parser.add_argument('--val_dir', type=str, default='dataset/val/', - help='path to val dataset') - parser.add_argument('--test_dir', type=str, default='dataset/val/', - help='path to test dataset') - parser.add_argument('--log_dir', type=str, default='results/', - help='path to save logs') - parser.add_argument('--backup_dir', type=str, default='backups/', - help='path to save backups for sudden crash') - parser.add_argument('--ckpt_path', type=str, - help='path to previous checkpoint') - parser.add_argument('--pretrained_path', type=str, - help='path to pretrained codebook') - - - #training configuration - parser.add_argument('--finetune', action='store_true', default=False, - help='finetune pretrained model') - parser.add_argument('--backup', action='store_true', default=False, - help='save backup and load from backup if restart happens') - parser.add_argument('--backup_steps', type =int, default = 1000, - help='saves backup every n training steps') - parser.add_argument('--log_images', action='store_true', default=False, - help='log image outputs. not recommended for tpus') - parser.add_argument('--image_log_steps', type=int, default=1000, - help='log image outputs for every n step. not recommended for tpus') - parser.add_argument('--refresh_rate', type=int, default=1, - help='progress bar refresh rate') - parser.add_argument('--precision', type=int, default=32, - help='precision for training') - - parser.add_argument('--fake_data', action='store_true', default=False, - help='using fake_data for debugging') - - - parser.add_argument('--seed', type=int, default=42, - help='random seed') - parser.add_argument('--gpus', type=int, default=1, - help='number of gpus') - parser.add_argument('--gpu_dist', action='store_true', default=False, - help='distributed training with gpus') - - - parser.add_argument('--num_sanity_val_steps', type=int, default=0, - help='num_sanity_val_steps') - parser.add_argument('--val_percent_check', type=int, default=100, - help='num_val_percent') - parser.add_argument('--learning_rate', default=4.5e-6, type=float, - help='base learning rate') - parser.add_argument('--lr_decay', action='store_true', default=False, - help = 'use learning rate decay') - - parser.add_argument('--batch_size', type=int, default=8, - help='training settings') - parser.add_argument('--epochs', type=int, default=1, - help='training settings') - parser.add_argument('--num_workers', type=int, default=16, - help='training settings') - parser.add_argument('--img_size', type=int, default=256, - help='training settings') - parser.add_argument('--resize_ratio', type=float, default=0.75, - help='Random resized crop lower ratio') - - parser.add_argument('--debug', action='store_true', default=False, - help='debug run') - parser.add_argument('--web_dataset',action='store_true', default=False, - help='enable web_dataset') - parser.add_argument('--dataset_size', nargs='+', type=int, default=[1e9], - help='training settings') - - #model configuration - parser.add_argument('--model', type=str, default='vqvae') - parser.add_argument('--use_attn', type=bool, default=False, help='use attention') - parser.add_argument('--codebook_dim', type=int, default=256, - help='number of embedding dimension for codebook') - parser.add_argument('--num_tokens', type=int, default=1024, - help='codebook size') - parser.add_argument('--double_z', type=bool, default=False, - help='double z for encoder') - parser.add_argument('--z_channels', type=int, default=256, - help='image latent feature dimension') - parser.add_argument('--resolution', type=int, default=256, - help='image resolution') - parser.add_argument('--in_channels', type=int, default=3, - help='input image channel') - parser.add_argument('--out_channels', type=int, default=3, - help='output image channel') - parser.add_argument('--hidden_dim', type=int, default=128, - help='hidden dimension init size') - parser.add_argument('--ch_mult', nargs='+', type=int, default=[1,1,2,2,4], - help='resnet channel multiplier') - parser.add_argument('--num_res_blocks', type=int, default=2, - help='number of resnet blocks') - parser.add_argument('--attn_resolutions', nargs='+', type=int, default=[16], - help='model settings') - parser.add_argument('--dropout', type=float, default=0.0, - help='model settings') - parser.add_argument('--quant_beta', type=float, default=0.25, - help='quantizer beta') - parser.add_argument('--quant_ema_decay', type=float, default=0.99, - help='quantizer ema decay') - parser.add_argument('--quant_ema_eps', type=float, default=1e-5, - help='quantizer ema epsilon') - - #loss configuration - parser.add_argument('--loss_type', type=str, default='mse') - parser.add_argument('--p_loss_weight', type = float, default=0.1, - help = 'Perceptual loss weight') - parser.add_argument('--codebook_weight', type=float, default=1.0, - help='lossconfig') - - - args_config, remaining = config_parser.parse_known_args() - if args_config.config: - with open(args_config.config, 'r') as f: - cfg = yaml.safe_load(f) - parser.set_defaults(**cfg) - - # The main arg parser parses the rest of the args, the usual - # defaults will have been overridden if config file specified. - args = parser.parse_args(remaining) - - #Map dataset directory to test_dir - args.train_dir = args.test_dir - args.val_dir = args.test_dir - - #random seed fix - seed_everything(args.seed) - - tpus = None - gpus = args.gpus - if args.gpu_dist: - torch.distributed.init_process_group(backend='nccl') - args.world_size = torch.distributed.get_world_size() - else: - args.world_size = args.gpus - - args.base_lr = args.learning_rate - args.learning_rate = args.learning_rate * args.world_size * args.batch_size - - - datamodule = ImageDataModule(args.train_dir, args.val_dir, - args.batch_size, args.num_workers, - args.img_size, args.resize_ratio, - args.fake_data, args.web_dataset, - world_size = args.world_size, - dataset_size = args.dataset_size) - - - if args.finetune: - model = AugVAE.load_from_checkpoint(args.ckpt_path, finetuned=True, - ft_attn_resolutions=args.attn_resolutions, - ft_loss_type = args.loss_type, - ft_args = args) - else: - model = AugVAE.load_from_checkpoint(args.ckpt_path) - - - del model.loss - model.args.log_dir = args.log_dir - - default_root_dir = args.log_dir - - if args.debug: - limit_train_batches = 100 - limit_test_batches = 100 - args.backup_steps = 10 - args.image_log_steps = 10 - else: - limit_train_batches = 1.0 - limit_test_batches = 1.0 - - - logger = pl.loggers.tensorboard.TensorBoardLogger(args.log_dir, name='vqvae') - - - trainer = Trainer(tpu_cores=tpus, gpus= gpus, default_root_dir=default_root_dir, - max_epochs=args.epochs, progress_bar_refresh_rate=args.refresh_rate,precision=args.precision, - accelerator='ddp', benchmark=True, - num_sanity_val_steps=args.num_sanity_val_steps, - limit_val_batches = args.val_percent_check, - limit_train_batches=limit_train_batches,limit_test_batches=limit_test_batches, - logger = logger) - - if args.log_images: - trainer.callbacks.append(ReconstructedImageLogger(every_n_steps=args.image_log_steps, nrow=args.batch_size)) - - print("Setting batch size: {}".format(model.hparams.batch_size)) - - - trainer.test(model, datamodule=datamodule) - - diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/callbacks.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/callbacks.py deleted file mode 100644 index de475f3d94393c41988c284178ee238ddff38366..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/callbacks.py +++ /dev/null @@ -1,175 +0,0 @@ -from typing import Any, Optional, Tuple - -import pytorch_lightning as pl -from pytorch_lightning import Callback - -from pytorch_lightning.utilities.types import STEP_OUTPUT -from pytorch_lightning.utilities.distributed import rank_zero_only -import torch.nn.functional as F -import torchvision -import torchvision.transforms.functional as TF - - - - - -class ReconstructedImageLogger(Callback): - def __init__( - self, - every_n_steps: int = 1000, - nrow: int = 8, - padding: int = 2, - normalize: bool = True, - norm_range: Optional[Tuple[int, int]] = None, - scale_each: bool = False, - pad_value: int = 0, - use_wandb: bool = False, - multi_optim = False, - ) -> None: - """ - Args: - num_samples: Number of images displayed in the grid. Default: ``3``. - nrow: Number of images displayed in each row of the grid. - The final grid size is ``(B / nrow, nrow)``. Default: ``8``. - padding: Amount of padding. Default: ``2``. - normalize: If ``True``, shift the image to the range (0, 1), - by the min and max values specified by :attr:`range`. Default: ``True``. - norm_range: Tuple (min, max) where min and max are numbers, - then these numbers are used to normalize the image. By default, min and max - are computed from the tensor. - scale_each: If ``True``, scale each image in the batch of - images separately rather than the (min, max) over all images. Default: ``False``. - pad_value: Value for the padded pixels. Default: ``0``. - """ - super().__init__() - self.every_n_steps = every_n_steps - self.nrow = nrow - self.padding = padding - self.normalize = normalize - self.norm_range = norm_range - self.scale_each = scale_each - self.pad_value = pad_value - self.multi_optim = multi_optim - self.use_wandb = use_wandb - - @rank_zero_only - def on_train_batch_end( - self, - trainer: 'pl.Trainer', - pl_module: 'pl.LightningModule', - outputs: Optional[STEP_OUTPUT], - batch: Any, - batch_idx: int, - dataloader_idx: int, - ) -> None: - """Called when the train batch ends.""" - - if batch_idx % self.every_n_steps == 0: - if self.multi_optim: - x = outputs[0]['x'] - xrec = outputs[0]['xrec'] - else: - x = outputs['x'] - xrec = outputs['xrec'] - - x_grid = torchvision.utils.make_grid( - tensor=x, - nrow=self.nrow, - padding=self.padding, - normalize=self.normalize, - value_range=self.norm_range, - scale_each=self.scale_each, - pad_value=self.pad_value, - ) - xrec_grid = torchvision.utils.make_grid( - tensor=xrec, - nrow=self.nrow, - padding=self.padding, - normalize=self.normalize, - value_range=self.norm_range, - scale_each=self.scale_each, - pad_value=self.pad_value, - ) - - x_title = "train/input" - trainer.logger.experiment.add_image(x_title, x_grid, global_step=trainer.global_step) - xrec_title = "train/reconstruction" - trainer.logger.experiment.add_image(xrec_title, xrec_grid, global_step=trainer.global_step) - - @rank_zero_only - def on_validation_batch_end( - self, - trainer: 'pl.Trainer', - pl_module: 'pl.LightningModule', - outputs: Optional[STEP_OUTPUT], - batch: Any, - batch_idx: int, - dataloader_idx: int, - ) -> None: - """Called when the validation batch ends.""" - if batch_idx % self.every_n_steps == 0: - x = outputs['x'] - xrec = outputs['xrec'] - x_grid = torchvision.utils.make_grid( - tensor=x, - nrow=self.nrow, - padding=self.padding, - normalize=self.normalize, - value_range=self.norm_range, - scale_each=self.scale_each, - pad_value=self.pad_value, - ) - xrec_grid = torchvision.utils.make_grid( - tensor=xrec, - nrow=self.nrow, - padding=self.padding, - normalize=self.normalize, - value_range=self.norm_range, - scale_each=self.scale_each, - pad_value=self.pad_value, - ) - - x_title = "val/input" - trainer.logger.experiment.add_image(x_title, x_grid, global_step=batch_idx) - xrec_title = "val/reconstruction" - trainer.logger.experiment.add_image(xrec_title, xrec_grid, global_step=batch_idx) - - def on_test_batch_end( - self, - trainer: 'pl.Trainer', - pl_module: 'pl.LightningModule', - outputs: Optional[STEP_OUTPUT], - batch: Any, - batch_idx: int, - dataloader_idx: int, - ) -> None: - """Called when the text batch ends.""" - x = outputs['input_images'] - xrec = outputs['generated_images'] - - x_grid = torchvision.utils.make_grid( - tensor=x, - nrow=self.nrow, - padding=self.padding, - normalize=self.normalize, - value_range=self.norm_range, - scale_each=self.scale_each, - pad_value=self.pad_value, - ) - xrec_grid = torchvision.utils.make_grid( - tensor=xrec, - nrow=self.nrow, - padding=self.padding, - normalize=self.normalize, - value_range=self.norm_range, - scale_each=self.scale_each, - pad_value=self.pad_value, - ) - - x_title = "test/input" - trainer.logger.experiment.add_image(x_title, x_grid, global_step=batch_idx) - xrec_title = "test/reconstruction" - trainer.logger.experiment.add_image(xrec_title, xrec_grid, global_step=batch_idx) - - pl_module.gather_and_save(outputs, batch_idx) - return outputs \ No newline at end of file diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/loader.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/loader.py deleted file mode 100644 index c7c98f6dc3865bfd2257b3f709d6361ab298ff7c..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/loader.py +++ /dev/null @@ -1,245 +0,0 @@ -from pathlib import Path -from random import randint - -import PIL - -from torch.utils.data import Dataset, DataLoader -from torchvision import transforms as T -from torchvision.datasets import ImageFolder, FakeData -from pytorch_lightning import LightningDataModule -import torch -from typing import Any, Tuple - -import webdataset as wds - -from PIL import Image -from io import BytesIO - -#To prevent truncated error -from PIL import ImageFile -ImageFile.LOAD_TRUNCATED_IMAGES = True - -def web_dataset_helper(path): - if Path(path).is_dir(): - DATASET = [str(p) for p in Path(path).glob("**/*") if ".tar" in str(p).lower()] # .name - assert len(DATASET) > 0, 'The directory ({}) does not contain any WebDataset/.tar files.'.format(path) - print('Found {} WebDataset .tar(.gz) file(s) under given path {}!'.format(len(DATASET), path)) - elif ('http://' in path.lower()) | ('https://' in path.lower()): - DATASET = f"pipe:curl -L -s {path} || true" - print('Found {} http(s) link under given path!'.format(len(DATASET), path)) - elif 'gs://' in path.lower(): - DATASET = f"pipe:gsutil cat {path} || true" - print('Found {} GCS link under given path!'.format(len(DATASET), path)) - elif '.tar' in path: - DATASET = path - print('Found WebDataset .tar(.gz) file under given path {}!'.format(path)) - else: - raise Exception('No folder, no .tar(.gz) and no url pointing to tar files provided under {}.'.format(path)) - return DATASET - -def identity(x): - return x - -class Grayscale2RGB: - def __init__(self): - pass - def __call__(self, img): - if img.mode != 'RGB': - return img.convert('RGB') - else: - return img - def __repr__(self): - return self.__class__.__name__ + '()' - - - - -class ImageDataModule(LightningDataModule): - - def __init__(self, train_dir, val_dir, batch_size, num_workers, img_size, resize_ratio=0.75, - fake_data=False, web_dataset=False, world_size = 1, dataset_size = [int(1e9)]): - super().__init__() - self.train_dir = train_dir - self.val_dir = val_dir - self.batch_size = batch_size - self.num_workers = num_workers - self.fake_data = fake_data - self.img_size = img_size - self.web_dataset = web_dataset - if len(dataset_size) == 1: - self.train_dataset_size = dataset_size[0] - self.val_dataset_size = dataset_size[0] - else: - self.train_dataset_size = dataset_size[0] - self.val_dataset_size = dataset_size[1] - self.world_size = world_size - self.transform_train = T.Compose([ - Grayscale2RGB(), - T.RandomResizedCrop(img_size, - scale=(resize_ratio, 1.),ratio=(1., 1.)), - T.ToTensor(), - T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ]) - self.transform_val = T.Compose([ - Grayscale2RGB(), - T.Resize(img_size), - T.CenterCrop(img_size), - T.ToTensor(), - T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ]) - def imagetransform(self, b): - return Image.open(BytesIO(b)) - - def dummy(self, s): - return torch.zeros(1) - - def setup(self, stage=None): - if self.fake_data: - self.train_dataset = FakeData(12000000, (3, self.img_size, self.img_size), 1000, self.transform_train) - self.val_dataset = FakeData(50000, (3, self.img_size, self.img_size), 1000, self.transform_val) - self.transform_train = None - self.transform_val = None - else: - if self.web_dataset: - DATASET_TRAIN = web_dataset_helper(self.train_dir) - DATASET_VAL = web_dataset_helper(self.val_dir) - - - self.train_dataset = ( - wds.WebDataset(DATASET_TRAIN, handler=wds.warn_and_continue) - .shuffle(1000, handler=wds.warn_and_continue) - .decode("pil", handler=wds.warn_and_continue) - .to_tuple("jpg;png;jpeg", handler=wds.warn_and_continue) - .map_tuple(self.transform_train, handler=wds.warn_and_continue) - .batched(self.batch_size, partial=False) # It is good to avoid partial batches when using Distributed training - ) - - self.val_dataset = ( - wds.WebDataset(DATASET_VAL, handler=wds.warn_and_continue) - .decode("pil", handler=wds.warn_and_continue) - .to_tuple("jpg;png;jpeg", handler=wds.warn_and_continue) - .map_tuple(self.transform_val, handler=wds.warn_and_continue) - .batched(self.batch_size, partial=False) # It is good to avoid partial batches when using Distributed training - ) - - else: - self.train_dataset = ImageDataset(self.train_dir, self.transform_train) - self.val_dataset = ImageDataset(self.val_dir, self.transform_val) - - - def train_dataloader(self): - if self.web_dataset: - dl = wds.WebLoader(self.train_dataset, batch_size=None, num_workers=self.num_workers) - number_of_batches = self.train_dataset_size // (self.batch_size * self.world_size) - dl = dl.repeat(9999999999).slice(number_of_batches) - dl.length = number_of_batches - return dl - else: - return DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True) - - def val_dataloader(self): - if self.web_dataset: - dl = wds.WebLoader(self.val_dataset, batch_size=None, num_workers=self.num_workers) - number_of_batches = self.val_dataset_size // (self.batch_size * self.world_size) - dl = dl.repeat(9999999999).slice(number_of_batches) - dl.length = number_of_batches - return dl - else: - return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers) - - def test_dataloader(self): - #simply reuse val_dataloader for test. - if self.web_dataset: - dl = wds.WebLoader(self.val_dataset, batch_size=None, num_workers=self.num_workers) - number_of_batches = self.val_dataset_size // (self.batch_size * self.world_size) - dl = dl.repeat(9999999999).slice(number_of_batches) - dl.length = number_of_batches - return dl - else: - return DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers) - -IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') - -class ImageDataset(ImageFolder): - def random_sample(self): - return self.__getitem__(randint(0, self.__len__() - 1)) - - def sequential_sample(self, ind): - if ind >= self.__len__() - 1: - return self.__getitem__(0) - return self.__getitem__(ind + 1) - - def skip_sample(self, ind): - return self.random_sample() - - - def __getitem__(self, index: int) -> Tuple[Any, Any]: - """ - Args: - index (int): Index - - Returns: - tuple: (sample, target) where target is class_index of the target class. - """ - try: - path, target = self.samples[index] - sample = self.loader(path) - except (PIL.UnidentifiedImageError, OSError) as corrupt_image_exceptions: - print(corrupt_image_exceptions) - print(f"An exception occurred trying to load file {path}.") - print(f"Skipping index {index}") - return self.skip_sample(index) - - if self.transform is not None: - sample = self.transform(sample) - if self.target_transform is not None: - target = self.target_transform(target) - - return sample, target - - -class ImageDataset2(Dataset): - def __init__(self, - folder, - transform=None, - shuffle=False, - ): - """ - @param folder: Folder containing images and text files matched by their paths' respective "stem" - @param truncate_captions: Rather than throw an exception, captions which are too long will be truncated. - """ - super().__init__() - self.shuffle = shuffle - path = Path(folder) - - self.image_files = [ - *path.glob('**/*.png'), *path.glob('**/*.jpg'), - *path.glob('**/*.jpeg'), *path.glob('**/*.bmp') - ] - self.transform = transform - - def __len__(self): - return len(self.image_files) - - def random_sample(self): - return self.__getitem__(randint(0, self.__len__() - 1)) - - def sequential_sample(self, ind): - if ind >= self.__len__() - 1: - return self.__getitem__(0) - return self.__getitem__(ind + 1) - - def skip_sample(self, ind): - if self.shuffle: - return self.random_sample() - return self.sequential_sample(ind=ind) - - def __getitem__(self, ind): - try: - image_tensor = self.transform(PIL.Image.open(self.image_files[ind])) - except (PIL.UnidentifiedImageError, OSError) as corrupt_image_exceptions: - print(corrupt_image_exceptions) - print(f"An exception occurred trying to load file {self.image_files[ind]}.") - print(f"Skipping index {ind}") - return self.skip_sample(ind) - return image_tensor \ No newline at end of file diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/models/vqvae.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/models/vqvae.py deleted file mode 100644 index c08f8fccf9de1a1646e63ed2fd022461634d1af1..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/models/vqvae.py +++ /dev/null @@ -1,423 +0,0 @@ -import os -import torch -import torch.nn as nn -import torch.nn.functional as F -import pytorch_lightning as pl -import math -from latent_verse.modules.vqvae.vae import Encoder, Decoder -from latent_verse.modules.vqvae.quantize import VectorQuantizer, EMAVectorQuantizer -from torch.optim.lr_scheduler import ReduceLROnPlateau -from einops import rearrange -from latent_verse.modules.losses.lpips import RecPerceptualLoss -from latent_verse.utils.util import normalize - -from torchvision import transforms as T -from pytorch_lightning.utilities.distributed import rank_zero_only - -class VQVAE(pl.LightningModule): - def __init__(self, - args, batch_size, learning_rate, - ignore_keys=[], finetuned=False, - ft_attn_resolutions=None, - ft_loss_type=None, ft_args=None, - ): - super().__init__() - self.image_size = args.resolution - self.num_tokens = args.num_tokens - - self.encoders = nn.ModuleList() - self.pre_quants = nn.ModuleList() - self.quantizers = nn.ModuleList() - self.post_quants = nn.ModuleList() - self.decoders = nn.ModuleList() - - self.enc_attn_resolutions = args.attn_resolutions - self.dec_attn_resolutions = args.attn_resolutions - self.enc_resolution = args.resolution - self.dec_resolution = args.resolution - self.quant_idxs = [] - encoder = Encoder(hidden_dim=args.hidden_dim, in_channels=args.in_channels, ch_mult= args.ch_mult, - num_res_blocks=args.num_res_blocks, - dropout=args.dropout, attn_resolutions = [args.attn_resolutions[0],], - resolution=args.resolution, z_channels=args.z_channels, - double_z=args.double_z, use_attn=args.use_attn) - self.encoders.append(encoder) - - pre_quant = torch.nn.Conv2d(args.z_channels, args.codebook_dim, 1) - self.pre_quants.append(pre_quant) - - quantizer = VectorQuantizer(args.num_tokens, args.codebook_dim, beta=args.quant_beta) - self.quantizers.append(quantizer) - - post_quant = torch.nn.Conv2d(args.codebook_dim, args.z_channels, 1) - self.post_quants.append(post_quant) - - self.quant_idxs.append(0) - - dec_in_channels = args.z_channels - for i, res in enumerate(args.attn_resolutions[1:]): - encoder = Encoder(hidden_dim=args.hidden_dim, in_channels=args.z_channels, ch_mult= [2, 4], - num_res_blocks=args.num_res_blocks, - dropout=args.dropout, attn_resolutions = [res,], - resolution=args.attn_resolutions[i], z_channels=args.z_channels, - double_z=args.double_z, use_attn=args.use_attn) - self.encoders.append(encoder) - - pre_quant = torch.nn.Conv2d(args.z_channels, args.codebook_dim, 1) - self.pre_quants.append(pre_quant) - - quantizer = VectorQuantizer(args.num_tokens, args.codebook_dim, beta=args.quant_beta) - self.quantizers.append(quantizer) - - post_quant = torch.nn.Conv2d(args.codebook_dim, args.z_channels, 1) - self.post_quants.append(post_quant) - - self.quant_idxs.append(i+1) - - decoder = Decoder(hidden_dim=args.hidden_dim, out_channels = args.z_channels, ch_mult= [2, 4], - num_res_blocks=args.num_res_blocks, - dropout=args.dropout, in_channels = args.z_channels, attn_resolutions = [args.attn_resolutions[-(i+1)],], - resolution=args.attn_resolutions[-(i+2)], z_channels=dec_in_channels, use_attn=args.use_attn) - self.decoders.append(decoder) - - #double decoder in_channels after first decoder init - if i == 0: - dec_in_channels = args.z_channels * 2 - - - args.out_channels = 3 - - decoder = Decoder(hidden_dim=args.hidden_dim, out_channels=args.out_channels, ch_mult= args.ch_mult, - num_res_blocks=args.num_res_blocks, - dropout=args.dropout, in_channels=args.in_channels, attn_resolutions = [args.attn_resolutions[0],], - resolution=args.resolution, z_channels=dec_in_channels, use_attn=args.use_attn) - self.decoders.append(decoder) - - self.setup_loss(args) - - self.save_hyperparameters("args", "batch_size", "learning_rate") - self.args = args - self.image_seq_len = 0 - for i in self.quant_idxs: - self.image_seq_len =self.image_seq_len + self.enc_attn_resolutions[i] ** 2 - if finetuned: - self.setup_finetune(ft_attn_resolutions, ft_loss_type, ft_args) - - def setup_loss(self, args): - if args.loss_type == 'smooth_l1': - self.loss = nn.SmoothL1Loss() - - elif args.loss_type == 'l1': - self.loss = nn.L1Loss() - - elif args.loss_type == 'mse': - self.loss = nn.MSELoss() - - elif args.loss_type in ['lpips_l1', 'lpips_l2']: - self.loss = RecPerceptualLoss(loss_type = args.loss_type, perceptual_weight=args.p_loss_weight) - else: - print(f"Loss type {args.loss_type} is not currently supported. Using default MSELoss.") - self.loss = nn.MSELoss() - - def setup_eval(self): - self.freeze() - for quantizer in self.quantizers: - quantizer.embedding.update = False - del self.loss - - - def setup_finetune(self, attn_resolutions, loss_type, args): - self.args.finetune = True - self.hparams.learning_rate = args.learning_rate - self.hparams.batch_size = args.batch_size - - self.quant_idxs = [] - - for i, attn in enumerate(self.enc_attn_resolutions): - if attn in attn_resolutions: - self.quant_idxs.append(i) - - self.image_seq_len = 0 - for i in self.quant_idxs: - self.image_seq_len = self.image_seq_len + self.enc_attn_resolutions[i] ** 2 - - self.enc_attn_resolutions = attn_resolutions - self.dec_attn_resolutions = attn_resolutions - - del self.decoders[:len(self.decoders) - self.quant_idxs[-1] - 1] - del self.encoders[self.quant_idxs[-1]+1:] - - self.args.loss_type = loss_type - self.setup_loss(args) - - self.connectors = nn.ModuleList() - for i in range(len(self.decoders)): - conn = torch.nn.Conv2d(self.args.z_channels, self.args.z_channels * 2, 1) - self.connectors.append(conn) - - def encode(self, input): - quants = [] - encoding_indices = [] - emb_loss = None - enc_idxs = None - enc = input - - for i, encoder in enumerate(self.encoders): - enc = encoder(enc) - if self.args.finetune and i == len(self.encoders): - h = self.pre_quants[i](enc) - quant, loss, info = self.quantizers[i](h) - quants.append(quant) - encoding_indices.append(info[2]) - emb_loss = loss - enc_idxs = info[2] - else: - h = self.pre_quants[i](enc) - quant, loss, info = self.quantizers[i](h) - quants.append(quant) - encoding_indices.append(info[2]) - if emb_loss == None: - emb_loss = loss - enc_idxs = info[2] - else: - emb_loss = emb_loss + loss - enc_idxs = torch.cat((enc_idxs, info[2])) - - encodings = F.one_hot(enc_idxs, self.args.num_tokens).type(h.dtype) - avg_probs = torch.mean(encodings, dim=0) - perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10))) - info = (perplexity, encodings, encoding_indices) - - return quants, emb_loss, info - - def decode(self, input, feed_seq=False, return_var=False): - quants = [] - - if feed_seq: - image_seq = input - split_idxes = [] - for i, res in enumerate(self.enc_attn_resolutions): - split_idxes.append(res ** 2) - quant_seqs = torch.split(image_seq,split_idxes, dim=1) - - for i, seq in enumerate(quant_seqs): - z = self.quantizers[self.quant_idxs[i]].embedding(seq) - b, n, c = z.shape - h = w = int(math.sqrt(n)) - z = rearrange(z, 'b (h w) c -> b c h w', h = h, w = w) - quants.append(z) - - else: - quants = input - - dec = None - - for i, decoder in enumerate(self.decoders): - if self.args.finetune: - if i == 0: - dec = self.post_quants[i](quants[-(i+1)]) - quant = self.connectors[i](dec) - else: - quant = self.post_quants[i](quants[-(i+1)]) - if i != 0: - quant = torch.cat((quant, dec), dim=1) - dec = decoder(quant) - - return dec - - @torch.no_grad() - def get_codebook_indices(self, img): # Reshapes the output of self.encode() into a sequence for BiART training - b = img.shape[0] - concat_indices = None - _, _, [_, _, indices] = self.encode(img) - - for i in self.quant_idxs: - idxs = indices[i] - n = idxs.shape[0] // b - idxs = idxs.view(b,n) - if concat_indices == None: - concat_indices = idxs - else: - concat_indices = torch.cat((concat_indices,idxs), dim=1) - return concat_indices - - def forward(self, input, return_var=False): - quant, diff, info = self.encode(input) - dec = self.decode(quant, return_var=return_var) - return dec, diff, info[0] - - def get_trainable_params(self): - return [params for params in self.parameters() if params.requires_grad] - - - def get_last_layer(self): - return self.decoders[-1].conv_out.weight - - @torch.no_grad() - def gather_and_save(self, outputs, batch_idx): - # this out is now the full size of the batch - outputs = self.all_gather(outputs) - originals = [] - generated = [] - - x_i_refs = outputs["input_images"] - for images in x_i_refs: - for img in images: - originals.append(img) - x_i_gens = outputs["generated_images"] - for images in x_i_gens: - for img in images: - generated.append(img) - - self.save_results(self.args.log_dir, batch_idx, originals, generated) - - - @rank_zero_only - def save_results(self, log_dir, batch_idx, original_images, generated): - result_dir = os.path.join(log_dir,'original_images/') - os.makedirs(result_dir,exist_ok=True) - for idx, image in enumerate(original_images): - image = T.ToPILImage()(image) - image.save(os.path.join(result_dir,'{}_{}.png'.format(batch_idx, idx))) - - result_dir = os.path.join(log_dir,'generated_images/') - os.makedirs(result_dir,exist_ok=True) - for idx, image in enumerate(generated): - image = T.ToPILImage()(image) - image.save(os.path.join(result_dir,'{}_{}.png'.format(batch_idx, idx))) - - def training_step(self, batch, batch_idx, optimizer_idx=0): - x = batch[0] - - if self.args.loss_type in ['lpips_l1', 'lpips_l2']: - xrec, qloss, perplexity = self(x) - aeloss = self.loss(xrec, x) - loss = aeloss + qloss - if self.args.loss_type == 'lpips_l1': - recloss = F.l1_loss(xrec, x) - else: - recloss = F.mse_loss(xrec, x) - self.log("train/lpips_loss", aeloss - recloss, prog_bar=True, logger=True) - self.log("train/rec_loss", recloss, prog_bar=True, logger=True) - self.log("train/embed_loss", qloss, prog_bar=True, logger=True) - self.log("train/log_perplexity", perplexity, prog_bar=True, logger=True) - - else: - xrec, qloss, perplexity = self(x) - aeloss = self.loss(xrec, x) - loss = aeloss + qloss - self.log("train/rec_loss", aeloss, prog_bar=True, logger=True) - self.log("train/embed_loss", qloss, prog_bar=True, logger=True) - self.log("train/log_perplexity", perplexity, prog_bar=True, logger=True) - - self.log("train/total_loss", loss, prog_bar=False, logger=True) - if self.args.log_images: - return {'loss':loss, 'x':x.detach(), 'xrec':xrec.detach()} - - return loss - - def validation_step(self, batch, batch_idx): - x = batch[0] - - if self.args.loss_type in ['lpips_l1', 'lpips_l2']: - xrec, qloss, perplexity = self(x) - aeloss = self.loss(xrec, x) - loss = aeloss + qloss - if self.args.loss_type == 'lpips_l1': - recloss = F.l1_loss(xrec, x) - else: - recloss = F.mse_loss(xrec, x) - self.log("val/lpips_loss", aeloss - recloss, prog_bar=True, logger=True) - self.log("val/rec_loss", recloss, prog_bar=True, logger=True) - self.log("val/embed_loss", qloss, prog_bar=True, logger=True) - self.log("val/log_perplexity", perplexity, prog_bar=True, logger=True) - - else: - xrec, qloss, perplexity = self(x) - aeloss = self.loss(xrec, x) - loss = aeloss + qloss - self.log("val/rec_loss", aeloss, prog_bar=True, logger=True) - self.log("val/embed_loss", qloss, prog_bar=True, logger=True) - self.log("val/log_perplexity", perplexity, prog_bar=True, logger=True) - - self.log("val/total_loss", loss, prog_bar=False, logger=True) - - if self.args.log_images: - return {'loss':loss, 'x':x.detach(), 'xrec':xrec.detach()} - - return loss - - @torch.no_grad() - def test_step(self, batch, batch_idx): - x = batch[0] - - x_ref = normalize(x) - x_rec, _, _ = self(x) - x_rec = normalize(x_rec) - - input_images = x_ref - generated_images = x_rec - - return {'input_images':input_images, 'generated_images':generated_images} - - - def configure_optimizers(self): - lr = self.hparams.learning_rate - opt = torch.optim.AdamW(self.get_trainable_params(), lr=lr, betas=(0.9, 0.999),weight_decay=1e-5) - if self.args.lr_decay: - scheduler = ReduceLROnPlateau( - opt, - mode="min", - factor=0.5, - patience=10, - cooldown=10, - min_lr=1e-6, - verbose=True, - ) - sched = {'scheduler':scheduler, 'monitor':'train/total_loss'} - return [opt], [sched] - else: - return [opt], [] - - def get_last_layer(self): - return self.decoders[-1].conv_out.weight - -class AugVAE(VQVAE): - def __init__(self, - args, batch_size, learning_rate, - ignore_keys=[], finetuned=False, - ft_attn_resolutions=None, - ft_loss_type=None, ft_args=None, - ): - super().__init__(args, batch_size, learning_rate, - ignore_keys=ignore_keys, finetuned=finetuned, - ft_attn_resolutions=ft_attn_resolutions, - ft_loss_type=ft_loss_type, ft_args=ft_args - ) - for quantizer in self.quantizers: - del quantizer - del self.quantizers - - self.quantizers = nn.ModuleList() - - quantizer = EMAVectorQuantizer(num_tokens=args.num_tokens, - codebook_dim=args.codebook_dim, - beta=args.quant_beta, decay=args.quant_ema_decay, eps=args.quant_ema_eps) - self.quantizers.append(quantizer) - for i, res in enumerate(args.attn_resolutions[1:]): - quantizer = EMAVectorQuantizer(num_tokens=args.num_tokens, - codebook_dim=args.codebook_dim, - beta=args.quant_beta, decay=args.quant_ema_decay, eps=args.quant_ema_eps) - self.quantizers.append(quantizer) - - self.quant_weight_share() - - def quant_weight_share(self): - for i in range(len(self.pre_quants[1:])): - self.pre_quants[i+1].weight = self.pre_quants[0].weight - for i in range(len(self.quantizers[1:])): - self.quantizers[i+1].embedding.weight = self.quantizers[0].embedding.weight - self.quantizers[i+1].embedding.cluster_size = self.quantizers[0].embedding.cluster_size - self.quantizers[i+1].embedding.embed_avg = self.quantizers[0].embedding.embed_avg - for i in range(len(self.post_quants[1:])): - self.post_quants[i+1].weight = self.post_quants[0].weight diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/losses/lpips.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/losses/lpips.py deleted file mode 100644 index d3b15b0abb23b1a80d626558aa50a30e462edfd0..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/losses/lpips.py +++ /dev/null @@ -1,139 +0,0 @@ -"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models""" - -import torch -import torch.nn as nn -from torchvision import models -from collections import namedtuple - -from latent_verse.utils.util import get_ckpt_path - -class LPIPS(nn.Module): - # Learned perceptual metric - def __init__(self, use_dropout=True): - super().__init__() - self.scaling_layer = ScalingLayer() - self.chns = [64, 128, 256, 512, 512] # vg16 features - self.net = vgg16(pretrained=True, requires_grad=False) - self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) - self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) - self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) - self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) - self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) - self.load_from_pretrained() - for param in self.parameters(): - param.requires_grad = False - - def load_from_pretrained(self, name="vgg_lpips"): - ckpt = get_ckpt_path(name, "latent_verse/modules/autoencoder/lpips") - self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) - print("loaded pretrained LPIPS loss from {}".format(ckpt)) - - @classmethod - def from_pretrained(cls, name="vgg_lpips"): - if name != "vgg_lpips": - raise NotImplementedError - model = cls() - ckpt = get_ckpt_path(name) - model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) - return model - - def forward(self, input, target): - in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target)) - outs0, outs1 = self.net(in0_input), self.net(in1_input) - feats0, feats1, diffs = {}, {}, {} - lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] - for kk in range(len(self.chns)): - feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) - diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 - - res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))] - val = res[0] - for l in range(1, len(self.chns)): - val = val + res[l] - return val - -class RecPerceptualLoss(nn.Module): - def __init__(self, loss_type = 'lpips_l1', perceptual_weight=1.0): - super().__init__() - self.loss_type = loss_type - self.perceptual_weight = perceptual_weight - if loss_type == 'lpips_l1': - self.rec_loss = nn.L1Loss(reduction='none') - else: - self.rec_loss = nn.MSELoss(reduction='none') - self.perceptual_loss = LPIPS().eval() - - def forward(self, xrec, x): - p_loss = self.perceptual_loss(x, xrec) - rec_loss = self.rec_loss(xrec, x) - loss = rec_loss + self.perceptual_weight * p_loss - loss = torch.mean(loss) - return loss - -class ScalingLayer(nn.Module): - def __init__(self): - super(ScalingLayer, self).__init__() - self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) - self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) - - def forward(self, inp): - return (inp - self.shift) / self.scale - - -class NetLinLayer(nn.Module): - """ A single linear layer which does a 1x1 conv """ - def __init__(self, chn_in, chn_out=1, use_dropout=False): - super(NetLinLayer, self).__init__() - layers = [nn.Dropout(), ] if (use_dropout) else [] - layers = layers + [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] - self.model = nn.Sequential(*layers) - - -class vgg16(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(vgg16, self).__init__() - vgg_pretrained_features = models.vgg16(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(4): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(4, 9): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(9, 16): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(16, 23): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(23, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1_2 = h - h = self.slice2(h) - h_relu2_2 = h - h = self.slice3(h) - h_relu3_3 = h - h = self.slice4(h) - h_relu4_3 = h - h = self.slice5(h) - h_relu5_3 = h - vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) - out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) - return out - - -def normalize_tensor(x,eps=1e-10): - norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True)) - return x/(norm_factor+eps) - - -def spatial_average(x, keepdim=True): - return x.mean([2,3],keepdim=keepdim) - diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/vqvae/quantize.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/vqvae/quantize.py deleted file mode 100644 index 455897e5aceee151cb6151b2c1e45618303d4e49..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/vqvae/quantize.py +++ /dev/null @@ -1,120 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from einops import rearrange - -class VectorQuantizer(nn.Module): - def __init__(self, num_tokens, codebook_dim, beta, normalized=False, contrast=False): - super().__init__() - self.codebook_dim = codebook_dim - self.num_tokens = num_tokens - self.beta = beta - - self.embedding = nn.Embedding(self.num_tokens, self.codebook_dim) - - def forward(self, z): - # reshape z -> (batch, height, width, channel) and flatten - z = rearrange(z, 'b c h w -> b h w c') - z_flattened = z.reshape(-1, self.codebook_dim) - - # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z - d = z_flattened.pow(2).sum(dim=1, keepdim=True) + \ - self.embedding.weight.pow(2).sum(dim=1) - 2 * \ - torch.einsum('bd,nd->bn', z_flattened, self.embedding.weight) # 'n d -> d n' - - - encoding_indices = torch.argmin(d, dim=1) - z_q = self.embedding(encoding_indices).view(z.shape) - encodings = F.one_hot(encoding_indices, self.num_tokens).type(z.dtype) - avg_probs = torch.mean(encodings, dim=0) - perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10))) - - # compute loss for embedding - loss = self.beta * F.mse_loss(z_q.detach(), z) + F.mse_loss(z_q, z.detach()) - - # preserve gradients - z_q = z + (z_q - z).detach() - - # reshape back to match original input shape - #z_q, 'b h w c -> b c h w' - z_q = rearrange(z_q, 'b h w c -> b c h w') - return z_q, loss, (perplexity, encodings, encoding_indices) - - -class EmbeddingEMA(nn.Module): - def __init__(self, num_tokens, codebook_dim, decay=0.99, eps=1e-5): - super().__init__() - self.decay = decay - self.eps = eps - weight = torch.randn(num_tokens, codebook_dim) - self.weight = nn.Parameter(weight, requires_grad = False) - self.cluster_size = nn.Parameter(torch.zeros(num_tokens), requires_grad = False) - self.embed_avg = nn.Parameter(weight.clone(), requires_grad = False) - self.update = True - - def forward(self, embed_id): - return F.embedding(embed_id, self.weight) - - def cluster_size_ema_update(self, new_cluster_size): - self.cluster_size.data.mul_(self.decay).add_(new_cluster_size, alpha=1 - self.decay) - - def embed_avg_ema_update(self, new_embed_avg): - self.embed_avg.data.mul_(self.decay).add_(new_embed_avg, alpha=1 - self.decay) - - def weight_update(self, num_tokens): - n = self.cluster_size.sum() - smoothed_cluster_size = ( - (self.cluster_size + self.eps) / (n + num_tokens * self.eps) * n - ) - #normalize embedding average with smoothed cluster size - embed_normalized = self.embed_avg / smoothed_cluster_size.unsqueeze(1) - self.weight.data.copy_(embed_normalized) - -class EMAVectorQuantizer(nn.Module): - def __init__(self, num_tokens, codebook_dim, beta, decay=0.99, eps=1e-5): - super().__init__() - self.codebook_dim = codebook_dim - self.num_tokens = num_tokens - self.beta = beta - self.embedding = EmbeddingEMA(self.num_tokens, self.codebook_dim, decay, eps) - - def forward(self, z): - # reshape z -> (batch, height, width, channel) and flatten - #z, 'b c h w -> b h w c' - z = rearrange(z, 'b c h w -> b h w c') - z_flattened = z.reshape(-1, self.codebook_dim) - - # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z - d = z_flattened.pow(2).sum(dim=1, keepdim=True) + \ - self.embedding.weight.pow(2).sum(dim=1) - 2 * \ - torch.einsum('bd,nd->bn', z_flattened, self.embedding.weight) # 'n d -> d n' - - - encoding_indices = torch.argmin(d, dim=1) - - z_q = self.embedding(encoding_indices).view(z.shape) - encodings = F.one_hot(encoding_indices, self.num_tokens).type(z.dtype) - avg_probs = torch.mean(encodings, dim=0) - perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10))) - - if self.training and self.embedding.update: - #EMA cluster size - encodings_sum = encodings.sum(0) - self.embedding.cluster_size_ema_update(encodings_sum) - #EMA embedding average - embed_sum = encodings.transpose(0,1) @ z_flattened - self.embedding.embed_avg_ema_update(embed_sum) - #normalize embed_avg and update weight - self.embedding.weight_update(self.num_tokens) - - # compute loss for embedding - loss = self.beta * F.mse_loss(z_q.detach(), z) - - # preserve gradients - z_q = z + (z_q - z).detach() - - # reshape back to match original input shape - #z_q, 'b h w c -> b c h w' - z_q = rearrange(z_q, 'b h w c -> b c h w') - return z_q, loss, (perplexity, encodings, encoding_indices) diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/vqvae/vae.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/vqvae/vae.py deleted file mode 100644 index 57cdf54cb70cbcf4f3e26b927402862d885943e8..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/modules/vqvae/vae.py +++ /dev/null @@ -1,413 +0,0 @@ -#from https://github.com/CompVis/taming-transformers -# pytorch_diffusion + derived encoder decoder -import math -import torch -import torch.nn as nn -import numpy as np - - -def get_timestep_embedding(timesteps, embedding_dim): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: - From Fairseq. - Build sinusoidal embeddings. - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - assert len(timesteps.shape) == 1 - - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) - emb = emb.type_as(timesteps) - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0,1,0,0)) - return emb - - -def nonlinearity(x): - # swish - return x*torch.sigmoid(x) - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class Upsample(nn.Module): - def __init__(self, in_channels, with_conv, stride=2): - super().__init__() - self.with_conv = with_conv - self.stride = stride - if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=self.stride, mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - def __init__(self, in_channels, with_conv, stride=2): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) - if stride == 4: - self.conv_2 = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) - self.stride = stride - - def forward(self, x): - if self.with_conv: - pad = (0,1,0,1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - if self.stride == 4: - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv_2(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=self.stride, stride=self.stride) - return x - - -class ResnetBlock(nn.Module): - def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, - out_channels) - self.norm2 = Normalize(out_channels) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) - - if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] - - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) - - return x+h - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = q.reshape(b,c,h*w) - q = q.permute(0,2,1) # b,hw,c - k = k.reshape(b,c,h*w) # b,c,hw - w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b,c,h*w) - w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b,c,h,w) - - h_ = self.proj_out(h_) - - return x+h_ - -class Encoder(nn.Module): - def __init__(self, *, hidden_dim, in_channels, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, - resolution, z_channels, double_z=True, use_attn=False, **ignore_kwargs): - super().__init__() - self.hidden_dim = hidden_dim - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.use_attn = use_attn - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.hidden_dim, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - if self.use_attn: - attn = nn.ModuleList() - block_in = hidden_dim*in_ch_mult[i_level] - block_out = hidden_dim*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if self.use_attn: - if curr_res in attn_resolutions: - attn.append(AttnBlock(block_in)) - down = nn.Module() - down.block = block - if self.use_attn: - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - if self.use_attn: - self.mid.attn_1 = AttnBlock(block_in) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - 2*z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1) - - - def forward(self, x): - #assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution) - - # timestep embedding - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if self.use_attn: - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - if self.use_attn: - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class Decoder(nn.Module): - def __init__(self, *, hidden_dim, out_channels, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, use_attn=False, **ignorekwargs): - super().__init__() - self.hidden_dim = hidden_dim - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - self.use_attn = use_attn - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = hidden_dim*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - if self.use_attn: - self.mid.attn_1 = AttnBlock(block_in) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - if self.use_attn: - attn = nn.ModuleList() - block_out = hidden_dim*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if self.use_attn: - if curr_res in attn_resolutions: - attn.append(AttnBlock(block_in)) - up = nn.Module() - up.block = block - if self.use_attn: - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, z): - #assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - if self.use_attn: - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) - if self.use_attn: - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - if self.give_pre_end: - return h - - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/utils/util.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/utils/util.py deleted file mode 100644 index 6ea5323a850a47a631c6781f750ed23fb17db5bb..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/latent_verse/utils/util.py +++ /dev/null @@ -1,275 +0,0 @@ -#from https://github.com/CompVis/taming-transformers -import os, hashlib -import requests -from tqdm import tqdm -from pytorch_lightning.utilities.distributed import rank_zero_only -import numpy as np - -def normalize(images): - normed=[] - for img in images: - img = (img - img.min())/(img.max()-img.min()) - normed.append(img) - return torch.stack(normed) - -URL_MAP = { - "vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1" -} - -CKPT_MAP = { - "vgg_lpips": "vgg.pth" -} - -MD5_MAP = { - "vgg_lpips": "d507d7349b931f0638a25a48a722f98a" -} - -class SampleGenerator(object): - """Iterator which returns multiple samples of a given input data. - - Can be used in place of a PyTorch `DataLoader` to generate synthetic data. - - Args: - data: The data which should be returned at each iterator step. - sample_count: The maximum number of `data` samples to be returned. - """ - - def __init__(self, data, sample_count): - self._data = data - self._sample_count = sample_count - self._count = 0 - - def __iter__(self): - return SampleGenerator(self._data, self._sample_count) - - def __len__(self): - return self._sample_count - - def __next__(self): - return self.next() - - def next(self): - if self._count >= self._sample_count: - raise StopIteration - self._count += 1 - return self._data - -def download(url, local_path, chunk_size=1024): - os.makedirs(os.path.split(local_path)[0], exist_ok=True) - with requests.get(url, stream=True) as r: - total_size = int(r.headers.get("content-length", 0)) - with tqdm(total=total_size, unit="B", unit_scale=True) as pbar: - with open(local_path, "wb") as f: - for data in r.iter_content(chunk_size=chunk_size): - if data: - f.write(data) - pbar.update(chunk_size) - - -def md5_hash(path): - with open(path, "rb") as f: - content = f.read() - return hashlib.md5(content).hexdigest() - - -def get_ckpt_path(name, root, check=False): - assert name in URL_MAP - path = os.path.join(root, CKPT_MAP[name]) - if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]): - print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path)) - download(URL_MAP[name], path) - md5 = md5_hash(path) - assert md5 == MD5_MAP[name], md5 - return path - - -class KeyNotFoundError(Exception): - def __init__(self, cause, keys=None, visited=None): - self.cause = cause - self.keys = keys - self.visited = visited - messages = list() - if keys is not None: - messages.append("Key not found: {}".format(keys)) - if visited is not None: - messages.append("Visited: {}".format(visited)) - messages.append("Cause:\n{}".format(cause)) - message = "\n".join(messages) - super().__init__(message) - - -def retrieve( - list_or_dict, key, splitval="/", default=None, expand=True, pass_success=False -): - """Given a nested list or dict return the desired value at key expanding - callable nodes if necessary and :attr:`expand` is ``True``. The expansion - is done in-place. - - Parameters - ---------- - list_or_dict : list or dict - Possibly nested list or dictionary. - key : str - key/to/value, path like string describing all keys necessary to - consider to get to the desired value. List indices can also be - passed here. - splitval : str - String that defines the delimiter between keys of the - different depth levels in `key`. - default : obj - Value returned if :attr:`key` is not found. - expand : bool - Whether to expand callable nodes on the path or not. - - Returns - ------- - The desired value or if :attr:`default` is not ``None`` and the - :attr:`key` is not found returns ``default``. - - Raises - ------ - Exception if ``key`` not in ``list_or_dict`` and :attr:`default` is - ``None``. - """ - - keys = key.split(splitval) - - success = True - try: - visited = [] - parent = None - last_key = None - for key in keys: - if callable(list_or_dict): - if not expand: - raise KeyNotFoundError( - ValueError( - "Trying to get past callable node with expand=False." - ), - keys=keys, - visited=visited, - ) - list_or_dict = list_or_dict() - parent[last_key] = list_or_dict - - last_key = key - parent = list_or_dict - - try: - if isinstance(list_or_dict, dict): - list_or_dict = list_or_dict[key] - else: - list_or_dict = list_or_dict[int(key)] - except (KeyError, IndexError, ValueError) as e: - raise KeyNotFoundError(e, keys=keys, visited=visited) - - visited += [key] - # final expansion of retrieved value - if expand and callable(list_or_dict): - list_or_dict = list_or_dict() - parent[last_key] = list_or_dict - except KeyNotFoundError as e: - if default is None: - raise e - else: - list_or_dict = default - success = False - - if not pass_success: - return list_or_dict - else: - return list_or_dict, success - - -import torch -import torch.nn as nn - - -def count_params(model): - total_params = sum(p.numel() for p in model.parameters()) - return total_params - - -class ActNorm(nn.Module): - def __init__(self, num_features, logdet=False, affine=True, - allow_reverse_init=False): - assert affine - super().__init__() - self.logdet = logdet - self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1)) - self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1)) - self.allow_reverse_init = allow_reverse_init - - self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8)) - - def initialize(self, input): - with torch.no_grad(): - flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1) - mean = ( - flatten.mean(1) - .unsqueeze(1) - .unsqueeze(2) - .unsqueeze(3) - .permute(1, 0, 2, 3) - ) - std = ( - flatten.std(1) - .unsqueeze(1) - .unsqueeze(2) - .unsqueeze(3) - .permute(1, 0, 2, 3) - ) - - self.loc.data.copy_(-mean) - self.scale.data.copy_(1 / (std + 1e-6)) - - def forward(self, input, reverse=False): - if reverse: - return self.reverse(input) - if len(input.shape) == 2: - input = input[:,:,None,None] - squeeze = True - else: - squeeze = False - - _, _, height, width = input.shape - - if self.training and self.initialized.item() == 0: - self.initialize(input) - self.initialized.fill_(1) - - h = self.scale * (input + self.loc) - - if squeeze: - h = h.squeeze(-1).squeeze(-1) - - if self.logdet: - log_abs = torch.log(torch.abs(self.scale)) - logdet = height*width*torch.sum(log_abs) - logdet = logdet * torch.ones(input.shape[0]).to(input) - return h, logdet - - return h - - def reverse(self, output): - if self.training and self.initialized.item() == 0: - if not self.allow_reverse_init: - raise RuntimeError( - "Initializing ActNorm in reverse direction is " - "disabled by default. Use allow_reverse_init=True to enable." - ) - else: - self.initialize(output) - self.initialized.fill_(1) - - if len(output.shape) == 2: - output = output[:,:,None,None] - squeeze = True - else: - squeeze = False - - h = output / self.scale - self.loc - - if squeeze: - h = h.squeeze(-1).squeeze(-1) - return h diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/requirements.txt b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/requirements.txt deleted file mode 100644 index 00225f721a7ca759c979745fe1b32ee1b96be6a8..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -torch -torchvision -pudb -pytorch-lightning -einops -regex -ftfy -cython -WebDataset -pillow -wandb -sklearn -tensorboard -mkl-service \ No newline at end of file diff --git a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/train_vae.py b/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/train_vae.py deleted file mode 100644 index 201a872fce347627e5de19d52279464f3690dcae..0000000000000000000000000000000000000000 --- a/multimodal/Language-Image_Pre-Training/L-Verse/pytorch/train_vae.py +++ /dev/null @@ -1,239 +0,0 @@ -import argparse, os, datetime, glob, yaml -import torch - -from latent_verse.models.vqvae import AugVAE -from latent_verse.loader import ImageDataModule -from latent_verse.callbacks import ReconstructedImageLogger - -import pytorch_lightning as pl -from pytorch_lightning import seed_everything -from pytorch_lightning import Trainer -from pytorch_lightning.callbacks import LearningRateMonitor -from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint - -if __name__ == "__main__": - - now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") - - config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) - parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', - help='YAML config file specifying default arguments') - - - parser = argparse.ArgumentParser(description='AugVAE Training') - - #path configuration - parser.add_argument('--train_dir', type=str, default='dataset/train/', - help='path to train dataset') - parser.add_argument('--val_dir', type=str, default='dataset/val/', - help='path to val dataset') - parser.add_argument('--log_dir', type=str, default='results/', - help='path to save logs') - parser.add_argument('--backup_dir', type=str, default='backups/', - help='path to save backups for sudden crash') - parser.add_argument('--ckpt_path', type=str, - help='path to previous checkpoint') - parser.add_argument('--pretrained_path', type=str, - help='path to pretrained codebook') - - #training configuration - parser.add_argument('--resume', action='store_true', default=False, - help='whether to resume from checkpoint') - parser.add_argument('--finetune', action='store_true', default=False, - help='finetune pretrained model') - parser.add_argument('--backup', action='store_true', default=False, - help='save backup and load from backup if restart happens') - parser.add_argument('--backup_steps', type =int, default = 1000, - help='saves backup every n training steps') - parser.add_argument('--log_images', action='store_true', default=False, - help='log image outputs. not recommended for tpus') - parser.add_argument('--image_log_steps', type=int, default=1000, - help='log image outputs for every n step. not recommended for tpus') - parser.add_argument('--refresh_rate', type=int, default=1, - help='progress bar refresh rate') - parser.add_argument('--precision', type=int, default=32, - help='precision for training') - - parser.add_argument('--fake_data', action='store_true', default=False, - help='using fake_data for debugging') - - - parser.add_argument('--seed', type=int, default=42, - help='random seed') - parser.add_argument('--gpus', type=int, default=1, - help='number of gpus') - parser.add_argument('--gpu_dist', action='store_true', default=False, - help='distributed training with gpus') - - parser.add_argument('--num_sanity_val_steps', type=int, default=0, - help='num_sanity_val_steps') - parser.add_argument('--val_percent_check', type=int, default=100, - help='num_val_percent') - parser.add_argument('--learning_rate', default=4.5e-6, type=float, - help='base learning rate') - parser.add_argument('--lr_decay', action='store_true', default=False, - help = 'use learning rate decay') - - parser.add_argument('--batch_size', type=int, default=8, - help='training settings') - parser.add_argument('--epochs', type=int, default=100, - help='training settings') - parser.add_argument('--num_workers', type=int, default=16, - help='training settings') - parser.add_argument('--img_size', type=int, default=256, - help='training settings') - parser.add_argument('--resize_ratio', type=float, default=0.75, - help='Random resized crop lower ratio') - - parser.add_argument('--debug', action='store_true', default=False, - help='debug run') - parser.add_argument('--web_dataset',action='store_true', default=False, - help='enable web_dataset') - parser.add_argument('--dataset_size', nargs='+', type=int, default=[1e9], - help='training settings') - - #model configuration - parser.add_argument('--use_attn', type=bool, default=False, help='use attention') - parser.add_argument('--codebook_dim', type=int, default=256, - help='number of embedding dimension for codebook') - parser.add_argument('--num_tokens', type=int, default=1024, - help='codebook size') - parser.add_argument('--double_z', type=bool, default=False, - help='double z for encoder') - parser.add_argument('--z_channels', type=int, default=256, - help='image latent feature dimension') - parser.add_argument('--resolution', type=int, default=256, - help='image resolution') - parser.add_argument('--in_channels', type=int, default=3, - help='input image channel') - parser.add_argument('--out_channels', type=int, default=3, - help='output image channel') - parser.add_argument('--hidden_dim', type=int, default=128, - help='hidden dimension init size') - parser.add_argument('--ch_mult', nargs='+', type=int, default=[1,1,2,2,4], - help='resnet channel multiplier') - parser.add_argument('--num_res_blocks', type=int, default=2, - help='number of resnet blocks') - parser.add_argument('--attn_resolutions', nargs='+', type=int, default=[16], - help='model settings') - parser.add_argument('--dropout', type=float, default=0.0, - help='model settings') - parser.add_argument('--quant_beta', type=float, default=0.25, - help='quantizer beta') - parser.add_argument('--quant_ema_decay', type=float, default=0.99, - help='quantizer ema decay') - parser.add_argument('--quant_ema_eps', type=float, default=1e-5, - help='quantizer ema epsilon') - - #loss configuration - parser.add_argument('--loss_type', type=str, default='mse') - parser.add_argument('--p_loss_weight', type = float, default=0.1, - help = 'Perceptual loss weight') - parser.add_argument('--codebook_weight', type=float, default=1.0, - help='lossconfig') - - #misc configuration - - args_config, remaining = config_parser.parse_known_args() - if args_config.config: - with open(args_config.config, 'r') as f: - cfg = yaml.safe_load(f) - parser.set_defaults(**cfg) - - # The main arg parser parses the rest of the args, the usual - # defaults will have been overridden if config file specified. - args = parser.parse_args(remaining) - - #random seed fix - seed_everything(args.seed) - - tpus = None - gpus = args.gpus - if args.gpu_dist: - torch.distributed.init_process_group(backend='nccl') - args.world_size = torch.distributed.get_world_size() - else: - args.world_size = args.gpus - - args.base_lr = args.learning_rate - args.learning_rate = args.learning_rate * args.world_size * args.batch_size - - datamodule = ImageDataModule(args.train_dir, args.val_dir, - args.batch_size, args.num_workers, - args.img_size, args.resize_ratio, - args.fake_data, args.web_dataset, - world_size = args.world_size, - dataset_size = args.dataset_size) - - - - - if args.finetune: - model = AugVAE.load_from_checkpoint(args.pretrained_path) - model.setup_finetune(args.attn_resolutions, args.loss_type, args) - else: - model = AugVAE(args, args.batch_size, args.learning_rate) - - default_root_dir = args.log_dir - - if args.debug: - limit_train_batches = 100 - limit_test_batches = 100 - args.backup_steps = 10 - args.image_log_steps = 10 - else: - limit_train_batches = 1.0 - limit_test_batches = 1.0 - - if args.resume: - ckpt_path = args.ckpt_path - else: - ckpt_path = None - if args.val_percent_check ==0: - checkpoint_callback = ModelCheckpoint(monitor="train/total_loss") - else: - checkpoint_callback = ModelCheckpoint(monitor="val/total_loss") - - if args.backup: - args.backup_dir = os.path.join(args.backup_dir, f'augvae/{args.finetune}') - backup_callback = ModelCheckpoint( - dirpath=args.backup_dir, - every_n_train_steps = args.backup_steps, - filename='{epoch}_{step}' - ) - - if len(glob.glob(os.path.join(args.backup_dir,'*.ckpt'))) != 0 : - ckpt_path = sorted(glob.glob(os.path.join(args.backup_dir,'*.ckpt')))[-1] - if args.resume: - print("Setting default ckpt to {}. If this is unexpected behavior, remove {}".format(ckpt_path, ckpt_path)) - - - logger = pl.loggers.tensorboard.TensorBoardLogger(args.log_dir, name='augvae') - - - - trainer = Trainer(tpu_cores=tpus, gpus= gpus, default_root_dir=default_root_dir, - max_epochs=args.epochs, progress_bar_refresh_rate=args.refresh_rate,precision=args.precision, - accelerator='ddp', benchmark=True, - num_sanity_val_steps=args.num_sanity_val_steps, - limit_val_batches = args.val_percent_check, - limit_train_batches=limit_train_batches,limit_test_batches=limit_test_batches, - resume_from_checkpoint = ckpt_path, callbacks=[checkpoint_callback], - logger = logger) - - - trainer.callbacks.append(LearningRateMonitor()) - if args.backup: - trainer.callbacks.append(backup_callback) - if args.resume: - trainer.callbacks.append(ModelCheckpoint()) - if args.log_images: - trainer.callbacks.append(ReconstructedImageLogger(every_n_steps=args.image_log_steps, nrow=args.batch_size)) - - print("Setting batch size: {} learning rate: {:.2e} * {} * {} = {:.2e}".format(model.hparams.batch_size,args.base_lr,args.world_size,args.batch_size, model.hparams.learning_rate)) - - - trainer.fit(model, datamodule=datamodule) - - -