From 701647e95abcaffad01a3875d0901c5ef296a436 Mon Sep 17 00:00:00 2001 From: tjl1 <1242564795@qq.com> Date: Wed, 18 May 2022 20:42:49 +0800 Subject: [PATCH] =?UTF-8?q?Convmixer=E5=88=9D=E6=AC=A1=E6=8F=90=E4=BA=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- PyTorch/__init__.py | 0 PyTorch/contrib/__init__.py | 0 PyTorch/contrib/cv/__init__.py | 0 PyTorch/contrib/cv/classification/__init__.py | 0 .../cv/classification/convmixer/MANIFEST.in | 2 + .../cv/classification/convmixer/README.md | 47 + .../convmixer/avg_checkpoints.py | 121 + .../cv/classification/convmixer/benchmark.py | 481 + .../convmixer/clean_checkpoint.py | 74 + .../convmixer/convert/convert_from_mxnet.py | 107 + .../convmixer/convert/convert_nest_flax.py | 109 + .../cv/classification/convmixer/convmixer.py | 63 + ...cend.20220428-cp37-cp37m-linux_aarch64.whl | Bin 0 -> 395882 bytes .../convmixer/docs/archived_changes.md | 205 + .../classification/convmixer/docs/changes.md | 130 + .../convmixer/docs/feature_extraction.md | 173 + .../cv/classification/convmixer/docs/index.md | 80 + .../convmixer/docs/javascripts/tables.js | 6 + .../classification/convmixer/docs/models.md | 171 + .../convmixer/docs/models/.pages | 1 + .../docs/models/.templates/code_snippets.md | 62 + .../models/.templates/generate_readmes.py | 64 + .../models/adversarial-inception-v3.md | 98 + .../docs/models/.templates/models/advprop.md | 457 + .../models/.templates/models/big-transfer.md | 295 + .../models/.templates/models/csp-darknet.md | 81 + .../models/.templates/models/csp-resnet.md | 76 + .../models/.templates/models/csp-resnext.md | 77 + .../docs/models/.templates/models/densenet.md | 305 + .../docs/models/.templates/models/dla.md | 545 + .../docs/models/.templates/models/dpn.md | 256 + .../models/.templates/models/ecaresnet.md | 236 + .../.templates/models/efficientnet-pruned.md | 145 + .../models/.templates/models/efficientnet.md | 325 + .../.templates/models/ensemble-adversarial.md | 98 + .../models/.templates/models/ese-vovnet.md | 92 + .../docs/models/.templates/models/fbnet.md | 76 + .../.templates/models/gloun-inception-v3.md | 78 + .../models/.templates/models/gloun-resnet.md | 504 + .../models/.templates/models/gloun-resnext.md | 142 + .../models/.templates/models/gloun-senet.md | 63 + .../.templates/models/gloun-seresnext.md | 136 + .../.templates/models/gloun-xception.md | 66 + .../docs/models/.templates/models/hrnet.md | 358 + .../models/.templates/models/ig-resnext.md | 209 + .../.templates/models/inception-resnet-v2.md | 72 + .../models/.templates/models/inception-v3.md | 85 + .../models/.templates/models/inception-v4.md | 71 + .../.templates/models/legacy-se-resnet.md | 257 + .../.templates/models/legacy-se-resnext.md | 167 + .../models/.templates/models/legacy-senet.md | 74 + .../docs/models/.templates/models/mixnet.md | 164 + .../docs/models/.templates/models/mnasnet.md | 109 + .../models/.templates/models/mobilenet-v2.md | 210 + .../models/.templates/models/mobilenet-v3.md | 138 + .../docs/models/.templates/models/nasnet.md | 70 + .../models/.templates/models/noisy-student.md | 510 + .../docs/models/.templates/models/pnasnet.md | 71 + .../docs/models/.templates/models/regnetx.md | 492 + .../docs/models/.templates/models/regnety.md | 506 + .../docs/models/.templates/models/res2net.md | 260 + .../docs/models/.templates/models/res2next.md | 75 + .../docs/models/.templates/models/resnest.md | 408 + .../docs/models/.templates/models/resnet-d.md | 263 + .../docs/models/.templates/models/resnet.md | 378 + .../docs/models/.templates/models/resnext.md | 183 + .../docs/models/.templates/models/rexnet.md | 197 + .../models/.templates/models/se-resnet.md | 122 + .../docs/models/.templates/models/selecsls.md | 136 + .../models/.templates/models/seresnext.md | 167 + .../docs/models/.templates/models/skresnet.md | 112 + .../models/.templates/models/skresnext.md | 70 + .../docs/models/.templates/models/spnasnet.md | 62 + .../models/.templates/models/ssl-resnet.md | 131 + .../models/.templates/models/ssl-resnext.md | 217 + .../models/.templates/models/swsl-resnet.md | 131 + .../models/.templates/models/swsl-resnext.md | 217 + .../models/tf-efficientnet-condconv.md | 189 + .../.templates/models/tf-efficientnet-lite.md | 195 + .../.templates/models/tf-efficientnet.md | 602 + .../.templates/models/tf-inception-v3.md | 87 + .../models/.templates/models/tf-mixnet.md | 133 + .../.templates/models/tf-mobilenet-v3.md | 320 + .../docs/models/.templates/models/tresnet.md | 291 + .../.templates/models/vision-transformer.md | 319 + .../models/.templates/models/wide-resnet.md | 102 + .../docs/models/.templates/models/xception.md | 163 + .../docs/models/adversarial-inception-v3.md | 159 + .../convmixer/docs/models/advprop.md | 518 + .../convmixer/docs/models/big-transfer.md | 356 + .../convmixer/docs/models/csp-darknet.md | 142 + .../convmixer/docs/models/csp-resnet.md | 137 + .../convmixer/docs/models/csp-resnext.md | 138 + .../convmixer/docs/models/densenet.md | 366 + .../convmixer/docs/models/dla.md | 606 + .../convmixer/docs/models/dpn.md | 317 + .../convmixer/docs/models/ecaresnet.md | 297 + .../docs/models/efficientnet-pruned.md | 206 + .../convmixer/docs/models/efficientnet.md | 386 + .../docs/models/ensemble-adversarial.md | 159 + .../convmixer/docs/models/ese-vovnet.md | 153 + .../convmixer/docs/models/fbnet.md | 137 + .../docs/models/gloun-inception-v3.md | 139 + .../convmixer/docs/models/gloun-resnet.md | 565 + .../convmixer/docs/models/gloun-resnext.md | 203 + .../convmixer/docs/models/gloun-senet.md | 124 + .../convmixer/docs/models/gloun-seresnext.md | 197 + .../convmixer/docs/models/gloun-xception.md | 127 + .../convmixer/docs/models/hrnet.md | 419 + .../convmixer/docs/models/ig-resnext.md | 270 + .../docs/models/inception-resnet-v2.md | 133 + .../convmixer/docs/models/inception-v3.md | 146 + .../convmixer/docs/models/inception-v4.md | 132 + .../convmixer/docs/models/legacy-se-resnet.md | 318 + .../docs/models/legacy-se-resnext.md | 228 + .../convmixer/docs/models/legacy-senet.md | 135 + .../convmixer/docs/models/mixnet.md | 225 + .../convmixer/docs/models/mnasnet.md | 170 + .../convmixer/docs/models/mobilenet-v2.md | 271 + .../convmixer/docs/models/mobilenet-v3.md | 199 + .../convmixer/docs/models/nasnet.md | 131 + .../convmixer/docs/models/noisy-student.md | 571 + .../convmixer/docs/models/pnasnet.md | 132 + .../convmixer/docs/models/regnetx.md | 553 + .../convmixer/docs/models/regnety.md | 567 + .../convmixer/docs/models/res2net.md | 321 + .../convmixer/docs/models/res2next.md | 136 + .../convmixer/docs/models/resnest.md | 469 + .../convmixer/docs/models/resnet-d.md | 324 + .../convmixer/docs/models/resnet.md | 439 + .../convmixer/docs/models/resnext.md | 244 + .../convmixer/docs/models/rexnet.md | 258 + .../convmixer/docs/models/se-resnet.md | 183 + .../convmixer/docs/models/selecsls.md | 197 + .../convmixer/docs/models/seresnext.md | 228 + .../convmixer/docs/models/skresnet.md | 173 + .../convmixer/docs/models/skresnext.md | 131 + .../convmixer/docs/models/spnasnet.md | 123 + .../convmixer/docs/models/ssl-resnet.md | 192 + .../convmixer/docs/models/ssl-resnext.md | 278 + .../convmixer/docs/models/swsl-resnet.md | 192 + .../convmixer/docs/models/swsl-resnext.md | 278 + .../docs/models/tf-efficientnet-condconv.md | 250 + .../docs/models/tf-efficientnet-lite.md | 256 + .../convmixer/docs/models/tf-efficientnet.md | 663 + .../convmixer/docs/models/tf-inception-v3.md | 148 + .../convmixer/docs/models/tf-mixnet.md | 194 + .../convmixer/docs/models/tf-mobilenet-v3.md | 381 + .../convmixer/docs/models/tresnet.md | 352 + .../docs/models/vision-transformer.md | 380 + .../convmixer/docs/models/wide-resnet.md | 163 + .../convmixer/docs/models/xception.md | 224 + .../classification/convmixer/docs/results.md | 67 + .../classification/convmixer/docs/scripts.md | 27 + .../docs/training_hparam_examples.md | 47 + .../cv/classification/convmixer/hubconf.py | 4 + .../cv/classification/convmixer/inference.py | 127 + .../cv/classification/convmixer/mkdocs.yml | 46 + .../classification/convmixer/model-index.yml | 14 + .../cv/classification/convmixer/read.md | 83 + .../convmixer/requirements-docs.txt | 4 + .../convmixer/requirements-modelindex.txt | 2 + .../classification/convmixer/requirements.txt | 7 + .../convmixer/results/README.md | 59 + .../convmixer/results/generate_csv_results.py | 75 + .../results/imagenet21k_goog_synsets.txt | 21843 ++++++++++++++++ .../convmixer/results/imagenet_a_indices.txt | 200 + .../convmixer/results/imagenet_a_synsets.txt | 200 + .../convmixer/results/imagenet_r_indices.txt | 200 + .../convmixer/results/imagenet_r_synsets.txt | 200 + .../results/imagenet_real_labels.json | 1 + .../convmixer/results/imagenet_synsets.txt | 1000 + .../results/results-imagenet-a-clean.csv | 421 + .../convmixer/results/results-imagenet-a.csv | 421 + .../results/results-imagenet-r-clean.csv | 421 + .../convmixer/results/results-imagenet-r.csv | 421 + .../results/results-imagenet-real.csv | 421 + .../convmixer/results/results-imagenet.csv | 421 + .../results-imagenetv2-matched-frequency.csv | 421 + .../convmixer/results/results-sketch.csv | 421 + .../cv/classification/convmixer/setup.cfg | 5 + .../cv/classification/convmixer/setup.py | 48 + .../classification/convmixer/test/env_npu.sh | 76 + .../convmixer/test/train_eval_1p.sh | 112 + .../convmixer/test/train_full_8p.sh | 154 + .../convmixer/test/train_performance_1p.sh | 155 + .../convmixer/test/train_performance_8p.sh | 154 + .../convmixer/tests/__init__.py | 0 .../convmixer/tests/test_layers.py | 71 + .../convmixer/tests/test_models.py | 299 + .../convmixer/tests/test_optim.py | 733 + .../classification/convmixer/timm/__init__.py | 4 + .../convmixer/timm/data/__init__.py | 12 + .../convmixer/timm/data/auto_augment.py | 833 + .../convmixer/timm/data/config.py | 78 + .../convmixer/timm/data/constants.py | 7 + .../convmixer/timm/data/dataset.py | 146 + .../convmixer/timm/data/dataset_factory.py | 30 + .../timm/data/distributed_sampler.py | 128 + .../convmixer/timm/data/loader.py | 298 + .../convmixer/timm/data/mixup.py | 316 + .../convmixer/timm/data/parsers/__init__.py | 1 + .../convmixer/timm/data/parsers/class_map.py | 16 + .../convmixer/timm/data/parsers/constants.py | 1 + .../convmixer/timm/data/parsers/parser.py | 17 + .../timm/data/parsers/parser_factory.py | 29 + .../timm/data/parsers/parser_image_folder.py | 69 + .../timm/data/parsers/parser_image_in_tar.py | 222 + .../timm/data/parsers/parser_image_tar.py | 72 + .../timm/data/parsers/parser_tfds.py | 223 + .../convmixer/timm/data/random_erasing.py | 97 + .../convmixer/timm/data/real_labels.py | 42 + .../convmixer/timm/data/tf_preprocessing.py | 232 + .../convmixer/timm/data/transforms.py | 158 + .../convmixer/timm/data/transforms_factory.py | 236 + .../convmixer/timm/loss/__init__.py | 4 + .../convmixer/timm/loss/asymmetric_loss.py | 97 + .../timm/loss/binary_cross_entropy.py | 47 + .../convmixer/timm/loss/cross_entropy.py | 36 + .../classification/convmixer/timm/loss/jsd.py | 39 + .../convmixer/timm/models/__init__.py | 58 + .../convmixer/timm/models/beit.py | 420 + .../convmixer/timm/models/byoanet.py | 329 + .../convmixer/timm/models/byobnet.py | 1422 + .../convmixer/timm/models/cait.py | 394 + .../convmixer/timm/models/coat.py | 660 + .../convmixer/timm/models/convit.py | 349 + .../convmixer/timm/models/convmixer.py | 27 + .../convmixer/timm/models/crossvit.py | 497 + .../convmixer/timm/models/cspnet.py | 457 + .../convmixer/timm/models/densenet.py | 387 + .../convmixer/timm/models/dla.py | 443 + .../convmixer/timm/models/dpn.py | 317 + .../convmixer/timm/models/efficientnet.py | 2211 ++ .../timm/models/efficientnet_blocks.py | 323 + .../timm/models/efficientnet_builder.py | 463 + .../convmixer/timm/models/factory.py | 86 + .../convmixer/timm/models/features.py | 284 + .../convmixer/timm/models/ghostnet.py | 276 + .../convmixer/timm/models/gluon_resnet.py | 248 + .../convmixer/timm/models/gluon_xception.py | 246 + .../convmixer/timm/models/hardcorenas.py | 152 + .../convmixer/timm/models/helpers.py | 515 + .../convmixer/timm/models/hrnet.py | 836 + .../convmixer/timm/models/hub.py | 96 + .../timm/models/inception_resnet_v2.py | 358 + .../convmixer/timm/models/inception_v3.py | 470 + .../convmixer/timm/models/inception_v4.py | 316 + .../convmixer/timm/models/layers/__init__.py | 39 + .../timm/models/layers/activations.py | 145 + .../timm/models/layers/activations_jit.py | 90 + .../timm/models/layers/activations_me.py | 218 + .../models/layers/adaptive_avgmax_pool.py | 118 + .../timm/models/layers/attention_pool2d.py | 182 + .../convmixer/timm/models/layers/blur_pool.py | 42 + .../timm/models/layers/bottleneck_attn.py | 129 + .../convmixer/timm/models/layers/cbam.py | 112 + .../timm/models/layers/classifier.py | 56 + .../timm/models/layers/cond_conv2d.py | 122 + .../convmixer/timm/models/layers/config.py | 115 + .../timm/models/layers/conv2d_same.py | 42 + .../timm/models/layers/conv_bn_act.py | 40 + .../timm/models/layers/create_act.py | 153 + .../timm/models/layers/create_attn.py | 89 + .../timm/models/layers/create_conv2d.py | 31 + .../timm/models/layers/create_norm_act.py | 83 + .../convmixer/timm/models/layers/drop.py | 168 + .../convmixer/timm/models/layers/eca.py | 145 + .../convmixer/timm/models/layers/evo_norm.py | 83 + .../timm/models/layers/gather_excite.py | 90 + .../timm/models/layers/global_context.py | 67 + .../convmixer/timm/models/layers/halo_attn.py | 185 + .../convmixer/timm/models/layers/helpers.py | 31 + .../timm/models/layers/inplace_abn.py | 87 + .../timm/models/layers/lambda_layer.py | 115 + .../convmixer/timm/models/layers/linear.py | 19 + .../timm/models/layers/median_pool.py | 49 + .../timm/models/layers/mixed_conv2d.py | 51 + .../convmixer/timm/models/layers/mlp.py | 108 + .../timm/models/layers/non_local_attn.py | 143 + .../convmixer/timm/models/layers/norm.py | 24 + .../convmixer/timm/models/layers/norm_act.py | 85 + .../convmixer/timm/models/layers/padding.py | 56 + .../timm/models/layers/patch_embed.py | 39 + .../timm/models/layers/pool2d_same.py | 73 + .../timm/models/layers/selective_kernel.py | 119 + .../timm/models/layers/separable_conv.py | 73 + .../timm/models/layers/space_to_depth.py | 53 + .../timm/models/layers/split_attn.py | 85 + .../timm/models/layers/split_batchnorm.py | 75 + .../timm/models/layers/squeeze_excite.py | 74 + .../convmixer/timm/models/layers/std_conv.py | 133 + .../timm/models/layers/test_time_pool.py | 52 + .../timm/models/layers/weight_init.py | 89 + .../convmixer/timm/models/levit.py | 563 + .../convmixer/timm/models/mlp_mixer.py | 625 + .../convmixer/timm/models/mobilenetv3.py | 562 + .../convmixer/timm/models/nasnet.py | 567 + .../convmixer/timm/models/nest.py | 462 + .../convmixer/timm/models/nfnet.py | 966 + .../convmixer/timm/models/pit.py | 384 + .../convmixer/timm/models/pnasnet.py | 350 + .../models/pruned/ecaresnet101d_pruned.txt | 1 + .../models/pruned/ecaresnet50d_pruned.txt | 1 + .../models/pruned/efficientnet_b1_pruned.txt | 1 + .../models/pruned/efficientnet_b2_pruned.txt | 1 + .../models/pruned/efficientnet_b3_pruned.txt | 1 + .../convmixer/timm/models/registry.py | 149 + .../convmixer/timm/models/regnet.py | 494 + .../convmixer/timm/models/res2net.py | 216 + .../convmixer/timm/models/resnest.py | 237 + .../convmixer/timm/models/resnet.py | 1455 + .../convmixer/timm/models/resnetv2.py | 656 + .../convmixer/timm/models/rexnet.py | 238 + .../convmixer/timm/models/selecsls.py | 362 + .../convmixer/timm/models/senet.py | 467 + .../convmixer/timm/models/sknet.py | 215 + .../convmixer/timm/models/swin_transformer.py | 652 + .../convmixer/timm/models/tnt.py | 268 + .../convmixer/timm/models/tresnet.py | 297 + .../convmixer/timm/models/twins.py | 422 + .../convmixer/timm/models/vgg.py | 261 + .../convmixer/timm/models/visformer.py | 409 + .../timm/models/vision_transformer.py | 896 + .../timm/models/vision_transformer_hybrid.py | 363 + .../convmixer/timm/models/vovnet.py | 406 + .../convmixer/timm/models/xception.py | 232 + .../convmixer/timm/models/xception_aligned.py | 238 + .../convmixer/timm/models/xcit.py | 810 + .../convmixer/timm/optim/__init__.py | 15 + .../convmixer/timm/optim/adabelief.py | 201 + .../convmixer/timm/optim/adafactor.py | 167 + .../convmixer/timm/optim/adahessian.py | 156 + .../convmixer/timm/optim/adamp.py | 105 + .../convmixer/timm/optim/adamw.py | 122 + .../convmixer/timm/optim/lamb.py | 192 + .../convmixer/timm/optim/lars.py | 135 + .../convmixer/timm/optim/lookahead.py | 61 + .../convmixer/timm/optim/madgrad.py | 184 + .../convmixer/timm/optim/nadam.py | 92 + .../convmixer/timm/optim/nvnovograd.py | 120 + .../convmixer/timm/optim/optim_factory.py | 217 + .../convmixer/timm/optim/radam.py | 89 + .../convmixer/timm/optim/rmsprop_tf.py | 139 + .../convmixer/timm/optim/sgdp.py | 70 + .../convmixer/timm/scheduler/__init__.py | 9 + .../convmixer/timm/scheduler/cosine_lr.py | 119 + .../convmixer/timm/scheduler/multistep_lr.py | 65 + .../convmixer/timm/scheduler/onecycle_lr.py | 42 + .../convmixer/timm/scheduler/plateau_lr.py | 113 + .../convmixer/timm/scheduler/poly_lr.py | 116 + .../convmixer/timm/scheduler/scheduler.py | 115 + .../timm/scheduler/scheduler_factory.py | 124 + .../convmixer/timm/scheduler/step_lr.py | 63 + .../convmixer/timm/scheduler/tanh_lr.py | 117 + .../convmixer/timm/utils/__init__.py | 13 + .../convmixer/timm/utils/agc.py | 42 + .../convmixer/timm/utils/checkpoint_saver.py | 150 + .../convmixer/timm/utils/clip_grad.py | 23 + .../convmixer/timm/utils/cuda.py | 55 + .../convmixer/timm/utils/distributed.py | 28 + .../convmixer/timm/utils/jit.py | 18 + .../convmixer/timm/utils/log.py | 28 + .../convmixer/timm/utils/metrics.py | 32 + .../convmixer/timm/utils/misc.py | 18 + .../convmixer/timm/utils/model.py | 92 + .../convmixer/timm/utils/model_ema.py | 126 + .../convmixer/timm/utils/random.py | 9 + .../convmixer/timm/utils/summary.py | 39 + .../classification/convmixer/timm/version.py | 1 + .../cv/classification/convmixer/train_npu.py | 959 + .../classification/convmixer/validate_npu.py | 352 + 372 files changed, 101197 insertions(+) create mode 100644 PyTorch/__init__.py create mode 100644 PyTorch/contrib/__init__.py create mode 100644 PyTorch/contrib/cv/__init__.py create mode 100644 PyTorch/contrib/cv/classification/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/MANIFEST.in create mode 100644 PyTorch/contrib/cv/classification/convmixer/README.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/avg_checkpoints.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/benchmark.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/clean_checkpoint.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/convert/convert_from_mxnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/convert/convert_nest_flax.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/convmixer.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/20220428-1.5/apex-0.1+ascend.20220428-cp37-cp37m-linux_aarch64.whl create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/archived_changes.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/changes.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/feature_extraction.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/index.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/javascripts/tables.js create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.pages create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/code_snippets.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/generate_readmes.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/adversarial-inception-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/advprop.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/big-transfer.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-darknet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/densenet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/dla.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/dpn.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ecaresnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/efficientnet-pruned.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/efficientnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ensemble-adversarial.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ese-vovnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/fbnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-inception-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-senet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-seresnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-xception.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/hrnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ig-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-resnet-v2.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-v4.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-se-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-se-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-senet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mixnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mnasnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mobilenet-v2.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mobilenet-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/nasnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/noisy-student.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/pnasnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/regnetx.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/regnety.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/res2net.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/res2next.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnest.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnet-d.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/rexnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/se-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/selecsls.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/seresnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/skresnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/skresnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/spnasnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ssl-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ssl-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/swsl-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/swsl-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet-condconv.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet-lite.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-inception-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-mixnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-mobilenet-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tresnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/vision-transformer.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/wide-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/xception.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/adversarial-inception-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/advprop.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/big-transfer.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/csp-darknet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/csp-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/csp-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/densenet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/dla.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/dpn.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/ecaresnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/efficientnet-pruned.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/efficientnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/ensemble-adversarial.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/ese-vovnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/fbnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-inception-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-senet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-seresnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-xception.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/hrnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/ig-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/inception-resnet-v2.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/inception-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/inception-v4.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-se-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-se-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-senet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/mixnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/mnasnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/mobilenet-v2.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/mobilenet-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/nasnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/noisy-student.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/pnasnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/regnetx.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/regnety.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/res2net.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/res2next.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/resnest.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/resnet-d.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/rexnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/se-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/selecsls.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/seresnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/skresnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/skresnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/spnasnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/ssl-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/ssl-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/swsl-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/swsl-resnext.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet-condconv.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet-lite.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/tf-inception-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/tf-mixnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/tf-mobilenet-v3.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/tresnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/vision-transformer.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/wide-resnet.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/models/xception.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/results.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/scripts.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/docs/training_hparam_examples.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/hubconf.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/inference.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/mkdocs.yml create mode 100644 PyTorch/contrib/cv/classification/convmixer/model-index.yml create mode 100644 PyTorch/contrib/cv/classification/convmixer/read.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/requirements-docs.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/requirements-modelindex.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/requirements.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/README.md create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/generate_csv_results.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/imagenet21k_goog_synsets.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/imagenet_a_indices.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/imagenet_a_synsets.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/imagenet_r_indices.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/imagenet_r_synsets.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/imagenet_real_labels.json create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/imagenet_synsets.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-a-clean.csv create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-a.csv create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-r-clean.csv create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-r.csv create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-real.csv create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/results-imagenet.csv create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/results-imagenetv2-matched-frequency.csv create mode 100644 PyTorch/contrib/cv/classification/convmixer/results/results-sketch.csv create mode 100644 PyTorch/contrib/cv/classification/convmixer/setup.cfg create mode 100644 PyTorch/contrib/cv/classification/convmixer/setup.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/test/env_npu.sh create mode 100644 PyTorch/contrib/cv/classification/convmixer/test/train_eval_1p.sh create mode 100644 PyTorch/contrib/cv/classification/convmixer/test/train_full_8p.sh create mode 100644 PyTorch/contrib/cv/classification/convmixer/test/train_performance_1p.sh create mode 100644 PyTorch/contrib/cv/classification/convmixer/test/train_performance_8p.sh create mode 100644 PyTorch/contrib/cv/classification/convmixer/tests/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/tests/test_layers.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/tests/test_models.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/tests/test_optim.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/auto_augment.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/config.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/constants.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/dataset.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/dataset_factory.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/distributed_sampler.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/loader.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/mixup.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/class_map.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/constants.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_factory.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_folder.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_in_tar.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_tar.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_tfds.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/random_erasing.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/real_labels.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/tf_preprocessing.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/transforms.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/data/transforms_factory.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/loss/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/loss/asymmetric_loss.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/loss/binary_cross_entropy.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/loss/cross_entropy.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/loss/jsd.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/beit.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/byoanet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/byobnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/cait.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/coat.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/convit.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/convmixer.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/crossvit.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/cspnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/densenet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/dla.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/dpn.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet_blocks.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet_builder.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/factory.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/features.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/ghostnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/gluon_resnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/gluon_xception.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/hardcorenas.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/helpers.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/hrnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/hub.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/inception_resnet_v2.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/inception_v3.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/inception_v4.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations_jit.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations_me.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/adaptive_avgmax_pool.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/attention_pool2d.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/blur_pool.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/bottleneck_attn.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/cbam.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/classifier.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/cond_conv2d.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/config.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/conv2d_same.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/conv_bn_act.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_act.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_attn.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_conv2d.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_norm_act.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/drop.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/eca.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/evo_norm.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/gather_excite.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/global_context.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/halo_attn.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/helpers.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/inplace_abn.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/lambda_layer.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/linear.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/median_pool.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/mixed_conv2d.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/mlp.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/non_local_attn.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/norm.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/norm_act.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/padding.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/patch_embed.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/pool2d_same.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/selective_kernel.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/separable_conv.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/space_to_depth.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/split_attn.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/split_batchnorm.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/squeeze_excite.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/std_conv.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/test_time_pool.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/layers/weight_init.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/levit.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/mlp_mixer.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/mobilenetv3.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/nasnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/nest.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/nfnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/pit.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/pnasnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/ecaresnet101d_pruned.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/ecaresnet50d_pruned.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b1_pruned.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b2_pruned.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b3_pruned.txt create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/registry.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/regnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/res2net.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/resnest.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/resnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/resnetv2.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/rexnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/selecsls.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/senet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/sknet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/swin_transformer.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/tnt.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/tresnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/twins.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/vgg.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/visformer.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/vision_transformer.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/vision_transformer_hybrid.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/vovnet.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/xception.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/xception_aligned.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/models/xcit.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/adabelief.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/adafactor.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/adahessian.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/adamp.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/adamw.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/lamb.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/lars.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/lookahead.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/madgrad.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/nadam.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/nvnovograd.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/optim_factory.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/radam.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/rmsprop_tf.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/optim/sgdp.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/cosine_lr.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/multistep_lr.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/onecycle_lr.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/plateau_lr.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/poly_lr.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/scheduler.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/scheduler_factory.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/step_lr.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/scheduler/tanh_lr.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/__init__.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/agc.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/checkpoint_saver.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/clip_grad.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/cuda.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/distributed.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/jit.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/log.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/metrics.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/misc.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/model.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/model_ema.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/random.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/utils/summary.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/timm/version.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/train_npu.py create mode 100644 PyTorch/contrib/cv/classification/convmixer/validate_npu.py diff --git a/PyTorch/__init__.py b/PyTorch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/PyTorch/contrib/__init__.py b/PyTorch/contrib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/PyTorch/contrib/cv/__init__.py b/PyTorch/contrib/cv/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/PyTorch/contrib/cv/classification/__init__.py b/PyTorch/contrib/cv/classification/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/PyTorch/contrib/cv/classification/convmixer/MANIFEST.in b/PyTorch/contrib/cv/classification/convmixer/MANIFEST.in new file mode 100644 index 0000000000..4f2d158465 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/MANIFEST.in @@ -0,0 +1,2 @@ +include timm/models/pruned/*.txt + diff --git a/PyTorch/contrib/cv/classification/convmixer/README.md b/PyTorch/contrib/cv/classification/convmixer/README.md new file mode 100644 index 0000000000..c5eb7259e0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/README.md @@ -0,0 +1,47 @@ +# Convmixer: Patches Are All You Need? + + +## Requirements + +- Install PyTorch ([pytorch.org](http://pytorch.org)), torch >= 1.4.0, torchvision >= 0.5.0 +- `pip install -r requirements.txt` +- Download the ImageNet dataset from http://www.image-net.org/ + +## Training the convmixer + +To train a model, run `train_npu.py` with the desired model architecture and the path to the ImageNet dataset: + +training on 8 npu +``` +bash ./test/train_full_8p.sh --data_path=xxx + +``` + +get model performance +``` +1. test 1p performance +bash ./test/train_performance_1p.sh --data_path=xxx + +2. test 8p performance +bash ./test/train_performance_8p.sh --data_path=xxx +``` + +## Validation +``` +bash ./test/train_eval_1p.sh --data_path=xxx --checkpoint=XXX +``` + +## Traing log +``` +test/output/${device_id}/train_${device_id}.log # training detail log + +test/output/${device_id}/${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc'.log # training accuracy result + +``` + +## Convmixer training result +| name | Acc@1 | FPS | Npu nums | Epochs | AMP_Type | s/ per step | +| :--: | :-----: | :---: | :------: | :-----: | :------: | :---------: | +| NPU | - | 50.24 | 1 | 150 | O2 | 1.284 | +| NPU | 80.298% | 407.18| 8 | 150 | O2 | 1.257 | + diff --git a/PyTorch/contrib/cv/classification/convmixer/avg_checkpoints.py b/PyTorch/contrib/cv/classification/convmixer/avg_checkpoints.py new file mode 100644 index 0000000000..ea8bbe8476 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/avg_checkpoints.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +""" Checkpoint Averaging Script + +This script averages all model weights for checkpoints in specified path that match +the specified filter wildcard. All checkpoints must be from the exact same model. + +For any hope of decent results, the checkpoints should be from the same or child +(via resumes) training session. This can be viewed as similar to maintaining running +EMA (exponential moving average) of the model weights or performing SWA (stochastic +weight averaging), but post-training. + +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" +import torch +import argparse +import os +import glob +import hashlib +from timm.models.helpers import load_state_dict + +parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager') +parser.add_argument('--input', default='', type=str, metavar='PATH', + help='path to base input folder containing checkpoints') +parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD', + help='checkpoint filter (path wildcard)') +parser.add_argument('--output', default='./averaged.pth', type=str, metavar='PATH', + help='output filename') +parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', + help='Force not using ema version of weights (if present)') +parser.add_argument('--no-sort', dest='no_sort', action='store_true', + help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant') +parser.add_argument('-n', type=int, default=10, metavar='N', + help='Number of checkpoints to average') + + +def checkpoint_metric(checkpoint_path): + if not checkpoint_path or not os.path.isfile(checkpoint_path): + return {} + print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path)) + checkpoint = torch.load(checkpoint_path, map_location='cpu') + metric = None + if 'metric' in checkpoint: + metric = checkpoint['metric'] + elif 'metrics' in checkpoint and 'metric_name' in checkpoint: + metrics = checkpoint['metrics'] + print(metrics) + metric = metrics[checkpoint['metric_name']] + return metric + + +def main(): + args = parser.parse_args() + # by default use the EMA weights (if present) + args.use_ema = not args.no_use_ema + # by default sort by checkpoint metric (if present) and avg top n checkpoints + args.sort = not args.no_sort + + if os.path.exists(args.output): + print("Error: Output filename ({}) already exists.".format(args.output)) + exit(1) + + pattern = args.input + if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep): + pattern += os.path.sep + pattern += args.filter + checkpoints = glob.glob(pattern, recursive=True) + + if args.sort: + checkpoint_metrics = [] + for c in checkpoints: + metric = checkpoint_metric(c) + if metric is not None: + checkpoint_metrics.append((metric, c)) + checkpoint_metrics = list(sorted(checkpoint_metrics)) + checkpoint_metrics = checkpoint_metrics[-args.n:] + print("Selected checkpoints:") + [print(m, c) for m, c in checkpoint_metrics] + avg_checkpoints = [c for m, c in checkpoint_metrics] + else: + avg_checkpoints = checkpoints + print("Selected checkpoints:") + [print(c) for c in checkpoints] + + avg_state_dict = {} + avg_counts = {} + for c in avg_checkpoints: + new_state_dict = load_state_dict(c, args.use_ema) + if not new_state_dict: + print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint)) + continue + + for k, v in new_state_dict.items(): + if k not in avg_state_dict: + avg_state_dict[k] = v.clone().to(dtype=torch.float64) + avg_counts[k] = 1 + else: + avg_state_dict[k] += v.to(dtype=torch.float64) + avg_counts[k] += 1 + + for k, v in avg_state_dict.items(): + v.div_(avg_counts[k]) + + # float32 overflow seems unlikely based on weights seen to date, but who knows + float32_info = torch.finfo(torch.float32) + final_state_dict = {} + for k, v in avg_state_dict.items(): + v = v.clamp(float32_info.min, float32_info.max) + final_state_dict[k] = v.to(dtype=torch.float32) + + try: + torch.save(final_state_dict, args.output, _use_new_zipfile_serialization=False) + except: + torch.save(final_state_dict, args.output) + + with open(args.output, 'rb') as f: + sha_hash = hashlib.sha256(f.read()).hexdigest() + print("=> Saved state_dict to '{}, SHA256: {}'".format(args.output, sha_hash)) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/contrib/cv/classification/convmixer/benchmark.py b/PyTorch/contrib/cv/classification/convmixer/benchmark.py new file mode 100644 index 0000000000..903bb817be --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/benchmark.py @@ -0,0 +1,481 @@ +#!/usr/bin/env python3 +""" Model Benchmark Script + +An inference and train step benchmark script for timm models. + +Hacked together by Ross Wightman (https://github.com/rwightman) +""" +import argparse +import os +import csv +import json +import time +import logging +import torch +import torch.nn as nn +import torch.nn.parallel +from collections import OrderedDict +from contextlib import suppress +from functools import partial + +from timm.models import create_model, is_model, list_models +from timm.optim import create_optimizer_v2 +from timm.data import resolve_data_config +from timm.utils import AverageMeter, setup_default_logging + + +has_apex = False +try: + from apex import amp + has_apex = True +except ImportError: + pass + +has_native_amp = False +try: + if getattr(torch.cuda.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('validate') + + +parser = argparse.ArgumentParser(description='PyTorch Benchmark') + +# benchmark specific args +parser.add_argument('--model-list', metavar='NAME', default='', + help='txt file based list of model names to benchmark') +parser.add_argument('--bench', default='both', type=str, + help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") +parser.add_argument('--detail', action='store_true', default=False, + help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') +parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', + help='Output csv file for validation results (summary)') +parser.add_argument('--num-warm-iter', default=10, type=int, + metavar='N', help='Number of warmup iterations (default: 10)') +parser.add_argument('--num-bench-iter', default=40, type=int, + metavar='N', help='Number of benchmark iterations (default: 40)') + +# common inference / train args +parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', + help='model architecture (default: resnet50)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--num-classes', type=int, default=None, + help='Number classes in dataset') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--amp', action='store_true', default=False, + help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') +parser.add_argument('--precision', default='float32', type=str, + help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') + + +# train optimizer parameters +parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') +parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: None, use opt default)') +parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='Optimizer momentum (default: 0.9)') +parser.add_argument('--weight-decay', type=float, default=0.0001, + help='weight decay (default: 0.0001)') +parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') +parser.add_argument('--clip-mode', type=str, default='norm', + help='Gradient clipping mode. One of ("norm", "value", "agc")') + + +# model regularization / loss params that impact model or loss fn +parser.add_argument('--smoothing', type=float, default=0.1, + help='Label smoothing (default: 0.1)') +parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', + help='Dropout rate (default: 0.)') +parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', + help='Drop path rate (default: None)') +parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', + help='Drop block rate (default: None)') + + +def timestamp(sync=False): + return time.perf_counter() + + +def cuda_timestamp(sync=False, device=None): + if sync: + torch.cuda.synchronize(device=device) + return time.perf_counter() + + +def count_params(model: nn.Module): + return sum([m.numel() for m in model.parameters()]) + + +def resolve_precision(precision: str): + assert precision in ('amp', 'float16', 'bfloat16', 'float32') + use_amp = False + model_dtype = torch.float32 + data_dtype = torch.float32 + if precision == 'amp': + use_amp = True + elif precision == 'float16': + model_dtype = torch.float16 + data_dtype = torch.float16 + elif precision == 'bfloat16': + model_dtype = torch.bfloat16 + data_dtype = torch.bfloat16 + return use_amp, model_dtype, data_dtype + + +class BenchmarkRunner: + def __init__( + self, model_name, detail=False, device='cuda', torchscript=False, precision='float32', + num_warm_iter=10, num_bench_iter=50, **kwargs): + self.model_name = model_name + self.detail = detail + self.device = device + self.use_amp, self.model_dtype, self.data_dtype = resolve_precision(precision) + self.channels_last = kwargs.pop('channels_last', False) + self.amp_autocast = torch.cuda.amp.autocast if self.use_amp else suppress + + self.model = create_model( + model_name, + num_classes=kwargs.pop('num_classes', None), + in_chans=3, + global_pool=kwargs.pop('gp', 'fast'), + scriptable=torchscript) + self.model.to( + device=self.device, + dtype=self.model_dtype, + memory_format=torch.channels_last if self.channels_last else None) + self.num_classes = self.model.num_classes + self.param_count = count_params(self.model) + _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) + if torchscript: + self.model = torch.jit.script(self.model) + + data_config = resolve_data_config(kwargs, model=self.model, use_test_size=True) + self.input_size = data_config['input_size'] + self.batch_size = kwargs.pop('batch_size', 256) + + self.example_inputs = None + self.num_warm_iter = num_warm_iter + self.num_bench_iter = num_bench_iter + self.log_freq = num_bench_iter // 5 + if 'cuda' in self.device: + self.time_fn = partial(cuda_timestamp, device=self.device) + else: + self.time_fn = timestamp + + def _init_input(self): + self.example_inputs = torch.randn( + (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) + if self.channels_last: + self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) + + +class InferenceBenchmarkRunner(BenchmarkRunner): + + def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): + super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) + self.model.eval() + + def run(self): + def _step(): + t_step_start = self.time_fn() + with self.amp_autocast(): + output = self.model(self.example_inputs) + t_step_end = self.time_fn(True) + return t_step_end - t_step_start + + _logger.info( + f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' + f'input size {self.input_size} and batch size {self.batch_size}.') + + with torch.no_grad(): + self._init_input() + + for _ in range(self.num_warm_iter): + _step() + + total_step = 0. + num_samples = 0 + t_run_start = self.time_fn() + for i in range(self.num_bench_iter): + delta_fwd = _step() + total_step += delta_fwd + num_samples += self.batch_size + num_steps = i + 1 + if num_steps % self.log_freq == 0: + _logger.info( + f"Infer [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_step / num_steps:0.3f} ms/step.") + t_run_end = self.time_fn(True) + t_run_elapsed = t_run_end - t_run_start + + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + ) + + _logger.info( + f"Inference benchmark of {self.model_name} done. " + f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") + + return results + + +class TrainBenchmarkRunner(BenchmarkRunner): + + def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): + super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) + self.model.train() + + if kwargs.pop('smoothing', 0) > 0: + self.loss = nn.CrossEntropyLoss().to(self.device) + else: + self.loss = nn.CrossEntropyLoss().to(self.device) + self.target_shape = tuple() + + self.optimizer = create_optimizer_v2( + self.model, + opt=kwargs.pop('opt', 'sgd'), + lr=kwargs.pop('lr', 1e-4)) + + def _gen_target(self, batch_size): + return torch.empty( + (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) + + def run(self): + def _step(detail=False): + self.optimizer.zero_grad() # can this be ignored? + t_start = self.time_fn() + t_fwd_end = t_start + t_bwd_end = t_start + with self.amp_autocast(): + output = self.model(self.example_inputs) + if isinstance(output, tuple): + output = output[0] + if detail: + t_fwd_end = self.time_fn(True) + target = self._gen_target(output.shape[0]) + self.loss(output, target).backward() + if detail: + t_bwd_end = self.time_fn(True) + self.optimizer.step() + t_end = self.time_fn(True) + if detail: + delta_fwd = t_fwd_end - t_start + delta_bwd = t_bwd_end - t_fwd_end + delta_opt = t_end - t_bwd_end + return delta_fwd, delta_bwd, delta_opt + else: + delta_step = t_end - t_start + return delta_step + + _logger.info( + f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' + f'input size {self.input_size} and batch size {self.batch_size}.') + + self._init_input() + + for _ in range(self.num_warm_iter): + _step() + + t_run_start = self.time_fn() + if self.detail: + total_fwd = 0. + total_bwd = 0. + total_opt = 0. + num_samples = 0 + for i in range(self.num_bench_iter): + delta_fwd, delta_bwd, delta_opt = _step(True) + num_samples += self.batch_size + total_fwd += delta_fwd + total_bwd += delta_bwd + total_opt += delta_opt + num_steps = (i + 1) + if num_steps % self.log_freq == 0: + total_step = total_fwd + total_bwd + total_opt + _logger.info( + f"Train [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd," + f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd," + f" {1000 * total_opt / num_steps:0.3f} ms/step opt." + ) + total_step = total_fwd + total_bwd + total_opt + t_run_elapsed = self.time_fn() - t_run_start + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), + bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), + opt_time=round(1000 * total_opt / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + ) + else: + total_step = 0. + num_samples = 0 + for i in range(self.num_bench_iter): + delta_step = _step(False) + num_samples += self.batch_size + total_step += delta_step + num_steps = (i + 1) + if num_steps % self.log_freq == 0: + _logger.info( + f"Train [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_step / num_steps:0.3f} ms/step.") + t_run_elapsed = self.time_fn() - t_run_start + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + ) + + _logger.info( + f"Train benchmark of {self.model_name} done. " + f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") + + return results + + +def decay_batch_exp(batch_size, factor=0.5, divisor=16): + out_batch_size = batch_size * factor + if out_batch_size > divisor: + out_batch_size = (out_batch_size + 1) // divisor * divisor + else: + out_batch_size = batch_size - 1 + return max(0, int(out_batch_size)) + + +def _try_run(model_name, bench_fn, initial_batch_size, bench_kwargs): + batch_size = initial_batch_size + results = dict() + while batch_size >= 1: + torch.cuda.empty_cache() + try: + bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) + results = bench.run() + return results + except RuntimeError as e: + print(f'Error: {str(e)} while running benchmark. Reducing batch size to {batch_size} for retry.') + batch_size = decay_batch_exp(batch_size) + return results + + +def benchmark(args): + if args.amp: + _logger.warning("Overriding precision to 'amp' since --amp flag set.") + args.precision = 'amp' + _logger.info(f'Benchmarking in {args.precision} precision. ' + f'{"NHWC" if args.channels_last else "NCHW"} layout. ' + f'torchscript {"enabled" if args.torchscript else "disabled"}') + + bench_kwargs = vars(args).copy() + bench_kwargs.pop('amp') + model = bench_kwargs.pop('model') + batch_size = bench_kwargs.pop('batch_size') + + bench_fns = (InferenceBenchmarkRunner,) + prefixes = ('infer',) + if args.bench == 'both': + bench_fns = ( + InferenceBenchmarkRunner, + TrainBenchmarkRunner + ) + prefixes = ('infer', 'train') + elif args.bench == 'train': + bench_fns = TrainBenchmarkRunner, + prefixes = 'train', + + model_results = OrderedDict(model=model) + for prefix, bench_fn in zip(prefixes, bench_fns): + run_results = _try_run(model, bench_fn, initial_batch_size=batch_size, bench_kwargs=bench_kwargs) + if prefix: + run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()} + model_results.update(run_results) + param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) + model_results.setdefault('param_count', param_count) + model_results.pop('train_param_count', 0) + return model_results + + +def main(): + setup_default_logging() + args = parser.parse_args() + model_cfgs = [] + model_names = [] + + if args.model_list: + args.model = '' + with open(args.model_list) as f: + model_names = [line.rstrip() for line in f] + model_cfgs = [(n, None) for n in model_names] + elif args.model == 'all': + # validate all models in a list of names with pretrained checkpoints + args.pretrained = True + model_names = list_models(pretrained=True, exclude_filters=['*in21k']) + model_cfgs = [(n, None) for n in model_names] + elif not is_model(args.model): + # model name doesn't exist, try as wildcard filter + model_names = list_models(args.model) + model_cfgs = [(n, None) for n in model_names] + + if len(model_cfgs): + results_file = args.results_file or './benchmark.csv' + _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) + results = [] + try: + for m, _ in model_cfgs: + if not m: + continue + args.model = m + r = benchmark(args) + results.append(r) + except KeyboardInterrupt as e: + pass + sort_key = 'train_samples_per_sec' if 'train' in args.bench else 'infer_samples_per_sec' + results = sorted(results, key=lambda x: x[sort_key], reverse=True) + if len(results): + write_results(results_file, results) + + import json + json_str = json.dumps(results, indent=4) + print(json_str) + else: + benchmark(args) + + +def write_results(results_file, results): + with open(results_file, mode='w') as cf: + dw = csv.DictWriter(cf, fieldnames=results[0].keys()) + dw.writeheader() + for r in results: + dw.writerow(r) + cf.flush() + + +if __name__ == '__main__': + main() diff --git a/PyTorch/contrib/cv/classification/convmixer/clean_checkpoint.py b/PyTorch/contrib/cv/classification/convmixer/clean_checkpoint.py new file mode 100644 index 0000000000..34e8604a35 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/clean_checkpoint.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +""" Checkpoint Cleaning Script + +Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc. +and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256 +calculation for model zoo compatibility. + +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" +import torch +import argparse +import os +import hashlib +import shutil +from collections import OrderedDict +from timm.models.helpers import load_state_dict + +parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--output', default='', type=str, metavar='PATH', + help='output path') +parser.add_argument('--use-ema', dest='use_ema', action='store_true', + help='use ema version of weights if present') +parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true', + help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint') + +_TEMP_NAME = './_checkpoint.pth' + + +def main(): + args = parser.parse_args() + + if os.path.exists(args.output): + print("Error: Output filename ({}) already exists.".format(args.output)) + exit(1) + + # Load an existing checkpoint to CPU, strip everything but the state_dict and re-save + if args.checkpoint and os.path.isfile(args.checkpoint): + print("=> Loading checkpoint '{}'".format(args.checkpoint)) + state_dict = load_state_dict(args.checkpoint, use_ema=args.use_ema) + new_state_dict = {} + for k, v in state_dict.items(): + if args.clean_aux_bn and 'aux_bn' in k: + # If all aux_bn keys are removed, the SplitBN layers will end up as normal and + # load with the unmodified model using BatchNorm2d. + continue + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + print("=> Loaded state_dict from '{}'".format(args.checkpoint)) + + try: + torch.save(new_state_dict, _TEMP_NAME, _use_new_zipfile_serialization=False) + except: + torch.save(new_state_dict, _TEMP_NAME) + + with open(_TEMP_NAME, 'rb') as f: + sha_hash = hashlib.sha256(f.read()).hexdigest() + + if args.output: + checkpoint_root, checkpoint_base = os.path.split(args.output) + checkpoint_base = os.path.splitext(checkpoint_base)[0] + else: + checkpoint_root = '' + checkpoint_base = os.path.splitext(args.checkpoint)[0] + final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + '.pth' + shutil.move(_TEMP_NAME, os.path.join(checkpoint_root, final_filename)) + print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash)) + else: + print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint)) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/contrib/cv/classification/convmixer/convert/convert_from_mxnet.py b/PyTorch/contrib/cv/classification/convmixer/convert/convert_from_mxnet.py new file mode 100644 index 0000000000..f2c64c2560 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/convert/convert_from_mxnet.py @@ -0,0 +1,107 @@ +import argparse +import hashlib +import os + +import mxnet as mx +import gluoncv +import torch +from timm import create_model + +parser = argparse.ArgumentParser(description='Convert from MXNet') +parser.add_argument('--model', default='all', type=str, metavar='MODEL', + help='Name of model to train (default: "all"') + + +def convert(mxnet_name, torch_name): + # download and load the pre-trained model + net = gluoncv.model_zoo.get_model(mxnet_name, pretrained=True) + + # create corresponding torch model + torch_net = create_model(torch_name) + + mxp = [(k, v) for k, v in net.collect_params().items() if 'running' not in k] + torchp = list(torch_net.named_parameters()) + torch_params = {} + + # convert parameters + # NOTE: we are relying on the fact that the order of parameters + # are usually exactly the same between these models, thus no key name mapping + # is necessary. Asserts will trip if this is not the case. + for (tn, tv), (mn, mv) in zip(torchp, mxp): + m_split = mn.split('_') + t_split = tn.split('.') + print(t_split, m_split) + print(tv.shape, mv.shape) + + # ensure ordering of BN params match since their sizes are not specific + if m_split[-1] == 'gamma': + assert t_split[-1] == 'weight' + if m_split[-1] == 'beta': + assert t_split[-1] == 'bias' + + # ensure shapes match + assert all(t == m for t, m in zip(tv.shape, mv.shape)) + + torch_tensor = torch.from_numpy(mv.data().asnumpy()) + torch_params[tn] = torch_tensor + + # convert buffers (batch norm running stats) + mxb = [(k, v) for k, v in net.collect_params().items() if any(x in k for x in ['running_mean', 'running_var'])] + torchb = [(k, v) for k, v in torch_net.named_buffers() if 'num_batches' not in k] + for (tn, tv), (mn, mv) in zip(torchb, mxb): + print(tn, mn) + print(tv.shape, mv.shape) + + # ensure ordering of BN params match since their sizes are not specific + if 'running_var' in tn: + assert 'running_var' in mn + if 'running_mean' in tn: + assert 'running_mean' in mn + + torch_tensor = torch.from_numpy(mv.data().asnumpy()) + torch_params[tn] = torch_tensor + + torch_net.load_state_dict(torch_params) + torch_filename = './%s.pth' % torch_name + torch.save(torch_net.state_dict(), torch_filename) + with open(torch_filename, 'rb') as f: + sha_hash = hashlib.sha256(f.read()).hexdigest() + final_filename = os.path.splitext(torch_filename)[0] + '-' + sha_hash[:8] + '.pth' + os.rename(torch_filename, final_filename) + print("=> Saved converted model to '{}, SHA256: {}'".format(final_filename, sha_hash)) + + +def map_mx_to_torch_model(mx_name): + torch_name = mx_name.lower() + if torch_name.startswith('se_'): + torch_name = torch_name.replace('se_', 'se') + elif torch_name.startswith('senet_'): + torch_name = torch_name.replace('senet_', 'senet') + elif torch_name.startswith('inceptionv3'): + torch_name = torch_name.replace('inceptionv3', 'inception_v3') + torch_name = 'gluon_' + torch_name + return torch_name + + +ALL = ['resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', 'resnet101_v1b', 'resnet152_v1b', + 'resnet50_v1c', 'resnet101_v1c', 'resnet152_v1c', 'resnet50_v1d', 'resnet101_v1d', 'resnet152_v1d', + #'resnet50_v1e', 'resnet101_v1e', 'resnet152_v1e', + 'resnet50_v1s', 'resnet101_v1s', 'resnet152_v1s', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d', + 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnext101_64x4d', 'senet_154', 'inceptionv3'] + + +def main(): + args = parser.parse_args() + + if not args.model or args.model == 'all': + for mx_model in ALL: + torch_model = map_mx_to_torch_model(mx_model) + convert(mx_model, torch_model) + else: + mx_model = args.model + torch_model = map_mx_to_torch_model(mx_model) + convert(mx_model, torch_model) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/contrib/cv/classification/convmixer/convert/convert_nest_flax.py b/PyTorch/contrib/cv/classification/convmixer/convert/convert_nest_flax.py new file mode 100644 index 0000000000..cda4d34f9b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/convert/convert_nest_flax.py @@ -0,0 +1,109 @@ +""" +Convert weights from https://github.com/google-research/nested-transformer +NOTE: You'll need https://github.com/google/CommonLoopUtils, not included in requirements.txt +""" + +import sys + +import numpy as np +import torch + +from clu import checkpoint + + +arch_depths = { + 'nest_base': [2, 2, 20], + 'nest_small': [2, 2, 20], + 'nest_tiny': [2, 2, 8], +} + + +def convert_nest(checkpoint_path, arch): + """ + Expects path to checkpoint which is a dir containing 4 files like in each of these folders + - https://console.cloud.google.com/storage/browser/gresearch/nest-checkpoints + `arch` is needed to + Returns a state dict that can be used with `torch.nn.Module.load_state_dict` + Hint: Follow timm.models.nest.Nest.__init__ and + https://github.com/google-research/nested-transformer/blob/main/models/nest_net.py + """ + assert arch in ['nest_base', 'nest_small', 'nest_tiny'], "Your `arch` is not supported" + + flax_dict = checkpoint.load_state_dict(checkpoint_path)['optimizer']['target'] + state_dict = {} + + # Patch embedding + state_dict['patch_embed.proj.weight'] = torch.tensor( + flax_dict['PatchEmbedding_0']['Conv_0']['kernel']).permute(3, 2, 0, 1) + state_dict['patch_embed.proj.bias'] = torch.tensor(flax_dict['PatchEmbedding_0']['Conv_0']['bias']) + + # Positional embeddings + posemb_keys = [k for k in flax_dict.keys() if k.startswith('PositionEmbedding')] + for i, k in enumerate(posemb_keys): + state_dict[f'levels.{i}.pos_embed'] = torch.tensor(flax_dict[k]['pos_embedding']) + + # Transformer encoders + depths = arch_depths[arch] + for level in range(len(depths)): + for layer in range(depths[level]): + global_layer_ix = sum(depths[:level]) + layer + # Norms + for i in range(2): + state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.weight'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['scale']) + state_dict[f'levels.{level}.transformer_encoder.{layer}.norm{i+1}.bias'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}'][f'LayerNorm_{i}']['bias']) + # Attention qkv + w_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['kernel'] + w_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['kernel'] + # Pay attention to dims here (maybe get pen and paper) + w_kv = np.concatenate(np.split(w_kv, 2, -1), 1) + w_qkv = np.concatenate([w_q, w_kv], 1) + state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.weight'] = torch.tensor(w_qkv).flatten(1).permute(1,0) + b_q = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_0']['bias'] + b_kv = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['DenseGeneral_1']['bias'] + # Pay attention to dims here (maybe get pen and paper) + b_kv = np.concatenate(np.split(b_kv, 2, -1), 0) + b_qkv = np.concatenate([b_q, b_kv], 0) + state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.qkv.bias'] = torch.tensor(b_qkv).reshape(-1) + # Attention proj + w_proj = flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['proj_kernel'] + w_proj = torch.tensor(w_proj).permute(2, 1, 0).flatten(1) + state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.weight'] = w_proj + state_dict[f'levels.{level}.transformer_encoder.{layer}.attn.proj.bias'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MultiHeadAttention_0']['bias']) + # MLP + for i in range(2): + state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.weight'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['kernel']).permute(1, 0) + state_dict[f'levels.{level}.transformer_encoder.{layer}.mlp.fc{i+1}.bias'] = torch.tensor( + flax_dict[f'EncoderNDBlock_{global_layer_ix}']['MlpBlock_0'][f'Dense_{i}']['bias']) + + # Block aggregations (ConvPool) + for level in range(1, len(depths)): + # Convs + state_dict[f'levels.{level}.pool.conv.weight'] = torch.tensor( + flax_dict[f'ConvPool_{level-1}']['Conv_0']['kernel']).permute(3, 2, 0, 1) + state_dict[f'levels.{level}.pool.conv.bias'] = torch.tensor( + flax_dict[f'ConvPool_{level-1}']['Conv_0']['bias']) + # Norms + state_dict[f'levels.{level}.pool.norm.weight'] = torch.tensor( + flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['scale']) + state_dict[f'levels.{level}.pool.norm.bias'] = torch.tensor( + flax_dict[f'ConvPool_{level-1}']['LayerNorm_0']['bias']) + + # Final norm + state_dict[f'norm.weight'] = torch.tensor(flax_dict['LayerNorm_0']['scale']) + state_dict[f'norm.bias'] = torch.tensor(flax_dict['LayerNorm_0']['bias']) + + # Classifier + state_dict['head.weight'] = torch.tensor(flax_dict['Dense_0']['kernel']).permute(1, 0) + state_dict['head.bias'] = torch.tensor(flax_dict['Dense_0']['bias']) + + return state_dict + + +if __name__ == '__main__': + variant = sys.argv[1] # base, small, or tiny + state_dict = convert_nest(f'./nest-{variant[0]}_imagenet', f'nest_{variant}') + torch.save(state_dict, f'./jx_nest_{variant}.pth') \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/convmixer.py b/PyTorch/contrib/cv/classification/convmixer/convmixer.py new file mode 100644 index 0000000000..e3ea62a8bb --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/convmixer.py @@ -0,0 +1,63 @@ +import torch.nn as nn + + +class Residual(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x): + return self.fn(x) + x + + +def ConvMixer(dim, depth, kernel_size=9, patch_size=7, n_classes=1000): + # stride=1 + # kernel_size=9 ==> padding=4 + # kernel_size=7 ==> padding=3 + if kernel_size == 9: + padding = 4 + elif kernel_size == 7: + padding = 3 + return nn.Sequential( + nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size), + nn.GELU(), + nn.BatchNorm2d(dim), + *[nn.Sequential( + Residual(nn.Sequential( + nn.Conv2d(dim, dim, kernel_size, groups=dim, padding=padding), + # nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"), + nn.GELU(), + nn.BatchNorm2d(dim) + )), + nn.Conv2d(dim, dim, kernel_size=1), + nn.GELU(), + nn.BatchNorm2d(dim) + ) for i in range(depth)], + nn.AdaptiveAvgPool2d((1,1)), + nn.Flatten(), + nn.Linear(dim, n_classes) + ) + + +""" +源代码 +def ConvMixer(dim, depth, kernel_size=9, patch_size=7, n_classes=1000): + return nn.Sequential( + nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size), + nn.GELU(), + nn.BatchNorm2d(dim), + *[nn.Sequential( + Residual(nn.Sequential( + nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"), + nn.GELU(), + nn.BatchNorm2d(dim) + )), + nn.Conv2d(dim, dim, kernel_size=1), + nn.GELU(), + nn.BatchNorm2d(dim) + ) for i in range(depth)], + nn.AdaptiveAvgPool2d((1,1)), + nn.Flatten(), + nn.Linear(dim, n_classes) + ) +""" \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/20220428-1.5/apex-0.1+ascend.20220428-cp37-cp37m-linux_aarch64.whl b/PyTorch/contrib/cv/classification/convmixer/docs/20220428-1.5/apex-0.1+ascend.20220428-cp37-cp37m-linux_aarch64.whl new file mode 100644 index 0000000000000000000000000000000000000000..afc742f7214685cd3bc80fc672695262e0505b61 GIT binary patch literal 395882 zcmaf4^;^^L`}J+1f*_)FNrQBEh=52*Ni*pN=^D(UySuxkn~5|?cg`_j^k~LdeCOvM z`2O(Xx^_SJeV+52bDmve7kp5=cmKtmJ9i%4IsBTX*XKG=ZVtV3NB+^hJM8$X#b+Bo zfFiHeXMZnyH&-4ZF&7>S3lA%MQDGiu2Ul-D9y?cWUQf5VZo*G`1DA5LoiuO!9=*%` zl6YV58KCP`^C;V^`#^;e@|vsFaW*3s>cn({Z*dJ@NRop^2=zFcJ02S^A4R2p$^ZC$ zC3Up*Wp80K$-V#H7Ex{(-M_*~2zmNWx?VA@1mE6bi~5dl*4jO7C$c5re_$JChM#9& zR1H{?p(fePUyuV^q>Jr4-FvAg?m8E~>j&R^8~SHeBOND0+weB%iN#}FR=DtGx&Dl9 z+)ANrt69EB-7$0FapV4N|6}hwaf>Ka^}^57jABG?J-C+pzdX!_&USzBmF6?;Jd`4k z@R~KZD5xHUxqz#WPgVzI1xA%?DN3%N+EC6s2QvJa)H-qKgRAH4@ylW(FGz~Hg0x=G zbRS}6l`W8gQGWXfH)ah5$*;m%Rul)5y=`_JI^;7xQ>_i6F_sWeh!>#_jgHoqi{+v% zqs=~o+|+3fQZdjOFZd-Wa?%TEUV4C(?gaByUB7&Dxvui=<6jC!%pHg?BZhd-tk^mS zN4FV;U_WiA-_s}#^pfzrgcG37AI)eMGceo0=)(rZ_9j95ljdzZ{czf1vA2t0Fe59I zRz_(Yvx`E9;o@FEaX@k0vdko-_v~U)LeE#Pd!4zFPTx=4pYAQbyG<}x=2}?f>%aVt zRG+mW`c+1e#~eSgFHMtRuFyg5&@#yTrsIYGsz?y^o>0M{Hlyb1v3v)a1LN^tOq19S zimKQagJCOX*^&sEeEKepQ6{>HXpdb|3uIb1Xh%MyTb-SPItNkuJe_gG$UNO6Eq++6 zl5oZNblL?>w@0bNJ`)$;Bj2F|X*C34srHnLuZ?cP;>iV2Ovz#&Kp-7ZqD<%8bEC@o zan!jE=7y$tG5&b=!UJ<7o=88lSr$F1hH8|Xal5=OddJ7uJ#jX!g2IncbB4r3;0!=i zfLCsLe3Kp%;3{lLpswZakGA$7l#@Mnd3MWU2a|j6y5n7Ey=t*t&ng15Z+pt*!d_{_ zwttN9%D`?IfXpXq4f?nErt*#{yIwe05Q)YFu^X6|gW#1aAob8$6t0HqG%yJ)Qbq0aF>Bus>W!B*-3S+#KrylwVeFu88 z5_WNC#_V0y<_R_g`)4sv>77<~QSC(o}M%8k;{RveTivb+f+tBvK$1DTc{;JP*sb_-5G<}bmyX{Im0Xz+f3CZ?pY)j{G#`W7Hq zVw_^8|JE21?Qb$=O)J7aR5v-x79dy{h(=e1RRnBYIjx)5OuDdLZ4FWMr<+}IcaWLTAO9K&CAXmfa*HHo_p$p(|PLqjqh$lyz4kRlwbxK z$)3;$r#Bty7a(PT3j~{QZ-{zdstaW>Gi^ZjZe9*=p~nt=~V@N6LoL$t=YDwTpC}LQ0cKsDpJ<8ePf9UARsN}h+nup#@5Wpj*WeI3wm z$mF~)Q$}6A_Qglb`}RmHvsXncJwhGODl51iuHQxDxdwn~X0!qW>&OC}{Rz_*=O zK=G}o;?#_Ja}17H;eEQ8v=K!l=3Lksf`PSOH-4?`K4{MQGlN89PscI@-fUm@55rwt zw03q?Zy+M((t3-YHI;w$T=LG<2dzRPKw3$s`damhB;gwYiAz#uX%Mp%x9YFbPBY-V zaO5^Z;IwQqeXh*q0}yR@4|Yyx_06qaJXk`=rP568^8Vyh7)8zL*s_CQEImKf;@FJd z4S^O%KuN5YtkATUQ!5fFZ8qs5aEWeKMV}<9qJbF^8?pQqlZp|iV+3~y0K1m+@R@4IRs$!?ohv(}!=cX|p2b9yGsi6wZ9juyah+``er?451( z#|Xhs#aE=USu!VkI_9RKePyxgZlZVdm0LzLt?J5WcRY4TeXFal7=l1p3C7V>Z?~(b z;56t!6STBo+O(@N&nAHA0yEf#37+TBx%kZLS~(+G>z3$rXEB8h1?$ioI9j#byz1yj z-qZ<9YpW!rOQNdujc-kuXA}Lti=e>a z6IMGwbT&oiT++fAZ0{V>B-@4iQQ0N{4i@4GmbuNm2>c#_0cuTB*h*83U$e8_;3~Jl z63W>AMo{);_A$o$2Bh6YYVb!mgHMkncCz(Xe@~6zDf5uVP8JVafcI$sakmxIm*_Qb(2~5QXp>gQD1UawEWdqF13>UuwacHm|MnD(zW{L&DHDV_2H@0;tlhct;K_J zt3&DiC5`H6v+jsBto3s~vuc=50c^%e_*jUFuUmZ!*EE8}{I>39QX3-ayZpUNdjyWn zYO2bLpbh@?d}zmXR`Bra^*AEX*K|I#$kZImAUtoIdJu?CJGQZTp) zX82CY21-ltO{1iBt`V%%t(nM#ZZ=GCTJ8`lT~sXyQjEO0&e*M4Ee6$z8Bw5eRi!<{ z%+pp$t#AyYMy=`#2M(*jRCEmV^GC*Xv$cd5(!OmiYJ>hiEZB~M^y?bE(W>`4uv+Q6 z&@F{XL~OeMX3~mmU7+c7-{mWkm9cPxiK778ry!H*gJytF=AXu3+f54cEMyKOb9ePo zk{{=yb+>O`E_*)djFocT&u4)+10vdxRb4cM)LK5|L9OP)}kcsc9cEokZSU z>%A(fM039Oip^(_h7E;|3a-qp5rCo0liX$Itk?J0$lCR@F2*n`ZY}%nE{7>S4x5PT z?#rIRtjHdX1Mv7;ttaX`g!E)`ZvqPiW#_`2qWurvC84ApW_Y1q8xpORGe#!x$_h`( zkjActE=_Sen4)-@gsV?#yVII)>f%DX6RjBBKu^Det~T*WYX>d9yQWC+38 zm5D2@uB78u+qcZj%FO!f3HSMze-zpHxw}`5B;l^`oXgtG1`g|%s;+OY#kb0F+;!De z6?Hs$aG#Hh^Wxf*QtRGDY;$d|*J_Q$_CJ{D19bOmOLRo)w3+FSM^`E{9XzXE59%*W zdK>)RjD8^GSXsF);!T(x`(VDoQpvl>BU@>HVx|%`D(OqLVhmOGEFF)}R*A`pJ*@4h z%%oWtN#VG+Je1mQ*SY_C-)q4^sVMt_Qmw=J6W0z_g2d`-6&O5@`lzeoiAF_jB?q6` z2mX%yPiOY}ojBS)ZJb4D(V(Q8PoWhN@dF_@?#i)>@A(DVja45gRJ5~aU{ZfHLLh+ zz@)X^?WvuTRW-W*dTnB@p<$2JS3#)1Yh~1tza}dt>ZW$zxWjn2cC7B(8hladyHI8P zwAndM9yIDyRpq#@qw-SnIbIf%$I(+AoB3|%czZIfS2+px{4pbFc}K>#fs?FSN2R8iL4rZ5ADcl zgfDy@3+nJ3lwigvxsQ|!Xcr&W6eeiOWIC-?{`#DI&QbUmEiO>Pqaglq@5f5&#I%)j zYE|I|rL`d8;3vJ%taZ;SNAk~FcMk3r?JO*o>8G~CY0?s?xL9iBwavUk+TV=__Cmd) zy(B+T5SCJCc#&_5<9KYw+v@GJlb)bk=hagwW7&talD=hsfOniL+na2N8_6rEqumL| z-X9L58;22V%N?nrEBa#QA@ycYl88CrtGhHD;7;!6hn9x(sk7zPRh8W0ac9cj0->L^ zo`s)pPzx@J%({LVrHFCg#AS%3j)B0jkADNSb3HvS|IpA)q$^|=MV9N>`~|c+)%vSa zezc~{nXPopIRU45TKt-OMuSMU`Rm%B)f|2Xd#S^pbR7Gc#k>JR7&)k$7Uw?dprWm> z&{^@o>{t%>q&#rM=ZD*Wd`TumGOxl6prUK3FhGrj zC6Tv*zy6WLl4ifneTnAdVfmZH+rbpeO#{UMe&hZBbm*in!Nms76a2YDK)regIBECZ z2nS7$RiSOotShI-VBYX=CW*@_8X?g{1v4+||7>i6ltPl&bRE~oqlc;~(k4gZw7x~C znSLkLoogQ-xIbzvL=u~LrfKKnY-yG~C`Rn;Xy^=?4Fxi0Ki3va~vEVb8XsU=q zP}-Lha$zW%MOw1whZWqjXwtnVN`y~=?0r7BRX+Eqtc2O+d5XZ;>pIWW#fn=s`$3^r z$6P7Y6%|>wzK5Wd*FgF7al4@3x5(5DnWKj#@bc0R73& zi{%(=+?cSkawetvG-^;%q;apLQ=VeMsu}+b4@04m`C7UyR&Nzc2uTW4Z;b@+FJ15ob>N#k8? z{LR{FOXecx2}{@Y1a%@+0J@C&q0u5vI(l9J#@nc!mztSJ>fsJnYZ-Su8%Pf%mNgiJ zwaRS7)8pTZJv5LBWl4z54A9hc*Po*n<{py{r&W}y>**>aej}XkU>{;dELj`Pcv%yE zmvfX>IorFr*-?OnT1U-mI^Fn_F!`)~$rqX*4&KM@+AYfWl!IL~myFkVegday^M^dB z67{cEQ%XG7N_;!V^lEHoJ=72te0ALYWn+c)&t>;YX7!J95q}7j%tXh`bk)qC zb8|W9GIsT6@uhqg<`TB>38|``F!OiSWrYn<=@#;axN%VL6$6=bxTgM8)XmuhJNTF# zw);Ej+HqI?sj2-YU^T|8J1nZhzP9hPZU)QMm`UHutuZh=n}0?$S#^>5*{`SAp=D=S zP)S;1k0&MWo09{h(nHwR*-QtO;M)SX!4smYFZa(a&Y}??u4}2RcS{eGqTAO!*SFmI zO5fS)bXR{Qd)jzXSB+NHm>D=t8U1aTBs6Pgb}k|jQ*~oNK^2q!K{*@H8lSx%@T88E zk~~?tUjnSjCz#3wBQxR=HpQe>Gsf0*FY24|Cw+XK;zQ0;9M6maAns;u9)t_)F2_PiCST$r_CUm*XGT39jRaBgkh zz9E`2>Z|#urox@)mtR?qc87R-W+U%s&DWWmUH?6$HQ3?x=1Eunx@DMoyT=q+DiKAi zyi9D?f~H|N(Kj+M(e*0QB~AR=5jdFaFSOM!$5vV*@kFt#gKLZ{K5KcNbEa1F)kmeN zwL)#OoTREK+u-iw>hf}N95(AD1?9%{SODII;zNwxk{CrkQ62PG#=RWe153|?NEec( z6(5vh!kDJqm6Vj1mDn)8ify?QVfJjNg&<;IP2yL$;~{f#Lb`1aU*KqKk3%ILAh zxEDtlSEFY7M9=#zo@ik`UMiCt5|%GpD~2dod?3{@rh$q+@GGcTm@yOMsLAo+ zt?A~hZcf5^*YG48n^0;w5!$`74ZoO1zNestbw98+RyNQNOS_|@$y--bnj_+?p=nJL z+H?VQO+yI;0L>ntA{U_B{&%>>{aXY78?Uxj70{=Ws-yj{y-Us*pw|mvbxRYC&Xj+p zMcOmrtx@Mb^(S*PuW)*ztzBilt&g;Z{F)NhAWhDe_Em*yD(3jXgK6Eu=WFZpZ%^t^ zZ5DnTB4QgT1;{snW4DVjO6u>AoaIG+!12hHghI-MT44iwI*uzpQ>5Nz^ zclDw2gm`_ko19Di%VtN8!NE-ThsQS?{ns;tGc1yFU5`m)Xp;JkTBE5s*%@mS&-{zt zv8r(f`-V;)jzqX5m@b0bdx*)-&X=AFX?6hzOd==TbdJ+~a&HP(mDK_oV??6UcXHrO zW@oa9YP03V)U5Z{Rj%UHOjyb5mjj2nBPn--kKe)hm#di5#uQIV#cmS?6XhI%9MKCQ1oHOIx6-1E;kK|7{WP%i1UM^WKi_ zDZh!khnkZE5jFc_y=_ViO#j>5KYcs#6og-!Gg6!5DD)zXpy#~V z1KJqqCmSqGf=9RCjKGE}(VD&)RTB@|lc(;Alor2QW-KC=xZXtPy-Qm)twaq$Y-aE|Qwx zPSw0iUTd_~-Q4ZfIY*}|X=JF$%-WD@Vi=M7*{oy4RQYl0&*Gca(nm+H&RKrkP$m|O zjh&*5n`rf90>(1gA_=_^MqUgT7QSzfh4-H&xLUS#vD~b;O?a{mG-ae!tt`Q)FV$7a zXSfi$5f=+X2vf559{B$7B~=>FuF`W}t3^eFp^d!ZW5lbLy#v`|iJXgm#Dl^&=RjxZ z_Q%9NOlB^c`#=g@I{wynxi z&?;=kgJ_Ldug2y9==@8S=ONU!;L~70R&ZB2RQpnb`5_2rap`YDB&!eSi|v&`-8A3S zZ|`2GyTp#y<0cGh|(aOA_t>zj9Y0^p+4!~QU@=$uc46XZ=K3xHo|B!eX$id_`5rq&I%4vY>>nbi1Rm^%f?l?z~H z*!;>K_as9R5yOCceF)o|AU339D1+U0wH4Aiw9XFs@#BOjS=ao1v0Sy-fkyR{2sR`U z88G(ou|Tgu)?X<@41o~*R<;ij@oST@4`Sg*(8n%dx(iq7{97&iaj=!4@1}EJh;jmQ zAe9KcrILl*2hcTyR1LT@i~&YZyonH>kO85yQeXPcgnPw=&=UCFLeH;1fhSLxLK&=) z%Avl3z&lBBg$482C|CkIjo)u#`CV`ZJrgO_sl23jjeSB6(VCcDTS|DYxNMD*Dkh_lt zf3TshOeo}2TO9NO5n$*fg9tErg0n>0hDuRw{66srb+ba65kY!SxI*z~@+We$5eJ1# zDTD@6ZuFiE5FvzZD^n@6w*jM{dj}UusIvP{=7|tPCuLN*y(fA6(os-!D^I8pkgiWf zFdIkS_cjz44fP4dB|>$nuuh?qpO9>!kiioJD)T=lVwAFBP_P}IC~o-Vh6pn8nWWE` zAIRBv&ENGE>OyoafPU7uD1i3-XRAP{n=O(vw3Q#t-PbB0ov^AJdTr* zW8~!38Xvz5L;sB+(C|Ka4G}m2NpxTyFsWo2}-BnF>?p; zm}Np;Nl*$Q)BDWGPY>Azq?4c+%G`kyqEI`be;Ray`lw`+q0CgloPF$qK=HmKBDnjo zUTEAWBvB|}^8N5|!^M4G1{}|pD z%3~XRkkW*JvV8{pYkemvMEHnevuvN@`QREep-z-|6@~Cdt8*$LAe`)6-nzTf! z5iv}hr2R7zY=eA@kLTLNXRMSPKJ09L_+yX|fn%Wm6SDQ6YUu0BN-8XS=%gKTmFSuu z$o}tEQs8eTmbcG92+fDr5h#X_d^Ro^5)g_@!Ec29ug`>XVzIMD0FC%}EkXR~pio>A zwC11N3V4^b4nHAf@$Wil=Fli1QZ(cDBueb#zFUE=94HkLE*WY>guj7(R1@$;d>YfC zWK_XF@E4NdpVN8&qL62ebRi-U>`I3M_<_WIll*umU*Q>wL#nU&*9K4iWieyn*NN-D zEV6{+J^YS$dLs${-n9{hC%BOX=Nr0fQ$(*Weu${ccv#`m;Y6lf~wx z>^0{IVpE6(=~Idw4dSat;!|5AAzwS&WvnHOzsr-_r`n4wLHBm0l_7UKeoPA~@Y!B) zPiaK=&|MrvW9fIhqj?KDh+?K1Bjkz@OxWmJ7X0%H8=b3yrV3&>wY#{+FHn%-)R-=q z`}3yb$Uw?+3TUeCY@`s1b8~W61Yewd22vIy#i=o0XyKdh;VWP~3ic^}nhoEQ7A{u= zV8uoU=O6*(1t2Xn6^0zYl!x#2eGXDau;F{Z;NkM{gMksapO1q6y&}U;D&i|D_-QTt z26=dLvTA557oIyfD5)gr#WUsW`#D3qviWM1>kJo`%4J_&EidG!>VyLL@u#AHgXW8y zN+Sg+$1!4~doqzi0mArA{qU%6cr*Z>MUDzuC6k12`^W7FUxYAY zqj_Ip4St^DdxI!W?Yvm=&%Z-S<3Vomw7~dKPE4`H<%=O(LeA@&+z!9 z_+>KuOc|ceB)-k0>+%##x6Z{@e@s(Em!6xyh<|^T}kir+Q z@pEe7xb#Od|5)trhoyTr{I5qd5mA)#xHB(m+Sr& zLy^0tn=%X;L%X7=2l|%xBxutYjCywLA{|=KET<@l--J+=szh zt0OVU#v=aWD=P17wu@U7)beb#Q43x-ESu9w3sDvsGt5=zTu*Hcv~WP4lpp zH=k`lqssnANBMoNhRVP@-hNQ-Q%nuu;`~q;6^+!P60W75o%d^_x@=6{-m%VYxye&p zRdaWV_}QIeAe|4gt!EuFwjCJ($|Hpseli+iGN>qFWn?EZq!pc@$9uJht764}=#zke z%ewiiN}{Vs)Rsjtd>Xv|%Tr)Qgz7@^@oy=k@K2m)(ge$CPX;<)*d*3+ht@8RodrWD zGx@qUX9<1_O53VEsSafBJ!ob1TdX zct@W3LHlutYVN_iogURDZWSvjk!Nj#I{-26-n>W@&+l}=53FN=jMFl`X_0K!!*#bz z>$HZ65SNYbqTRO4s|nBZgA1{eo;Y64Mz=yDk)DAABd_LBEvpTy#-y*(7!G3WqhQL^RQpj_P2y8}Lcp$wR1+n{AJGhfU_+KeNzBxw{ zN_bLiPAw?&&N^+RV0j!37?F{2bLY}c4E!|6N+#xjqF_>`FsN-YOi=S1J5qSou(?n( zOFOsZ;yEG_LGkeXx&5CU(seTF-Rq z6A#mzd}?X0{XL&RX$azYslElPJwvqFnt0fo797UJL|idmTKPPTDN*f|DepYX+N4{G z`OAcw7e#)61&|$HM%|F}&DYJR(sQtM5~&*wvu8q71Yg}axBsSZYBFOAB1ju2X?6y7 zGiC`erPFGOzBCqfQ5U`HHBaz%NOmT-Usg|Tpdwnf=33^i_X`^swS|DSMXjP+N(i`F z(gXm!X-^HF#clmc+g^RTp4Q`a_ppOrZJ1^VfVIehmc6rOr6h@o*QFWE+I?$c(%3x` zPqFJO?13WpmfL)gPY|HCYm)kypy4v?BHYgPkF-0TZ?28XSoj_TAFWhU&R1pB&%EIW!2!a6HufO?hG33NgXdn8F8Httj313x7Bk3r-GrfHd}b@N`7b37!D@cVh}4#AM#s!i9YJsAmc z^Wq1hOc3%5iG3N*;iNeTQ#^O#Jzl3P*fP^m68X~bVK5KFQlhkw06~X42Qc@b@YYA$ zUynU7^(Abh;jm}*9K&GBm^q7z+}XtL<_S-C@%mSSpGv9TGS%FK55O@_nHYE8v@^|D(zVWN;aIPY#ou;yYRNsurBmvnFXlKlh(XWm+fy+Cuaq z^_OK+$cns6gzJ#}tj`A{?W6P@1h~7*9;?kaAFJVJ*u7|Das;EGOF>p~^!bPhT-NcOHZYNXfbar_bEw5j#v#l&4@ zpTF&l&JR#|hrc@%bTPNnLyrYrX&&8}zE}zi4!jSu{}nMcXkBDFd?~y1{lSoaJ4B(q zY(~EwBvU#Xba%Ta7R#6+cWqseH_D!Y2>VlUXvDD?-+5s8$n&o-RTe01uqG9E{@3Ix zqyh6Yj&9kgugl9UwrD=20rc*p@Pgxq4ZkgL44jXCC&yw7*FLHx>Cb)^taZy{pf!9A z`_NcLVky*w$mda9QE{j`JBn9R{#&wl^$C>RNEOsUEvk9e^$7D_fGKpe$1Jj^`&m$D zGAb37yORh0P82zN*n9iFI=Ha)+f_j|cXGkedF-KNN0&t{CYiJ6c{M zl=yP-k{O~RO71D6Y7(&A+VsXphpoFmm0=%vR~$GPa+lH!^5?khie8pR1AHGPMWC2& zvZ9O@+xr`Qi8fPOZ$mIr!o; zttBo>(i|W_?CZC+)IIYW*w@9U4VKQ_ZEaNo_NlJH+YO^%%as7{-K;)GrluYcevkS~ z<2v8->Oqf1$^3KVX7Z+DPr=^fZTO5*z^Awi63!hgG+>*7txwMHFN01`7Q*)zF1C4Y`w~5Sf6gjte1DzrR$bJ){9=R(DwzZ@x?m0Fq6q%!J|gS@iV}d2({W@{ zBtb?Eg5xY(N`Xh$rBQ$18<|Hs_>=cPYCf?^avnKa4pWTPP?|;eB;M!SHnY)K4L)A!A)RxUX+pSv)d2-IQ zs2v%0#i|93L`-wiLiN>eiU7vhr>&ajd`+b-tt^iSM$)XOHNsy|Yt8K}J|k#Z`tf~1 z=AH6Dxs^1PNbXY?FQd*Cu@1i6pfOppm3NEATCZ=FyNGC1q$zTUB}6yuNfySM>*jv( zNp7RcqUnJ4Dzi1*K5aAHK1;k-VO~o=%b+-E(HhU6)7MNAe<5xmAYaqSC3-AIW@*jA z{`Pv>H)gC92`6XHglqHO%J5EIl@c#z)y5!Uk)N96N>7{b-_%Xr7+gm0qFyw( zg}9>?EP(zzC`#;|#X|;_+k1+W*R;Z$(F$Ls&pf9tX^3zs3}ITqKv|PluRC@mD949l z8y+!ypESu=s{_<+ay7gTudr=NFzfBQAhAabt;DIzR=`+jZaiKxAN95MypUXC3krOn zPjPlVcPV|PYH#%rrMi@S98A!3kCOl+Opdu&{4ZU%C@=F>)XQD=jj|yu>54eiuUKoS zv_iI|Bi>BZS#VkOV){Pc{zqq0E0_3r%ZtaC7W9XHqK8P%IfsMG1X%=$$%p z5=yUWFD#;v3e!^LwF3X)B2P3&+=k3Ny)C!4}1hJ>|<(G$VQoEj@T}fyhLQ; z?F??EO%m*ic=MdEb&)+f&2}=)aINx#8M60~ZQ#A_BgM|-T9U;28KSwCDE#Q@nhmcJyxS?o`H<#C+g5Spz(Tg(Vh{h} z+Nmm^pD^Cu2ViG2S)Hc}Mlylh; znx+j3Gq=O2NN%M3uDLFNv{)iY7ZP6gc~9oS8Q}nj){$KI{`KDNz^!9UX?>kB!0oVV zlYYgH)Al;JO83a^?J19xT#tV=!Z=9oC@A?A!m}-HryR&o{PbFmDyAg!LDSBzTbNwabMkOUb9*>Tl2B z;l)JuZZBjhx*eJMUtDrw^ZC|4M3v{|A^u+BHLnP?myI~`Neb=kfQe{vq{P#7OX^|+d+pYlPu`G7i zT>(bWQLU~|jw;T$GEVxsUOja9Fgj?3{qmyewtztB9RHO@^Pc#TTiJbku*~aM;PYjK z3Hn8zVoz+TdUDn6p9eSefpMtAac_zE2S*mvfsk~Skh+BE2n70b5QxULu0U&hfbM@8;zpqOiJfKL?$Zf_Df5qA zvOza(kzsx^jowtI*A=;X6pC;M%r!ioW`25UD1>kQ-DRVpj8u#y_Q;SU2&fLG+OG6A zCoNZ8Zqn*mqTcHi;>eo#*nj+zVzqyeO=4%Zn0AYTIi6{5t_MGIh0WMn2t%4gp(H$2# z_2@rkpR_(EL=Vw^cUzF0v5Zz~%0o?4zD^}9(2Bq8w;JWu@sBCf_xDnUN0221V||-I zUfEZGmt~kfa52WGNp7|nb1&A{rB$YMeQxKfsKCn~N97G7SdkK1Gxtv2-(n;MCR|Vl zD7&~|@ z@QhXNK*augt(;4<+XzoX)Fg6SuTO^3vcSm$jy|{rp3wB)YhvwfJdY_tZm85m^bnf; zywJ5mwEmj?LiasqsRX77TVXTJdLZ|cw1ElmIdMyz$WtB0{dCzQIwR@3eRSoysq(E0 zI}^P5o!HcuOFj5gm){4kXdVN!TJkxq&*K!$N@y=LY8FF{J<1Wm5k0Gf5>-OvBVUCZ ziWE8N_Y8S|*aX_vi%XBQ(KgA7-S6Quu@2w_AYdL(Eo3g+{fr0h?Lzcrr<(nX4Mue8 zQWc54lpmeG1+CmOwLTQ1v71k1hBs_s)7^ev`>)NGkG-Nx|IzSFNZft{gRyd~7J;3| zz~+L39%|*C3tSA94UN%nVjL_!_kD5!hwk|)B3v%On|5`g`EhEj~?CKh~|1$J3~5&KQYeYw?<^En^8%d{}+ zC@WPaNd5%}tb9gs7fVjKvcJ#SVqKUMD5*%?e3eXF3vg%1DT`a0QLD2X*l~{ut@*N6 zJuLQiEN0X*=hOCHHyB_^6pxNH#N{BT_T|1}qZWNTH!sZ8)FI20)4nykon{>f>0K#F z1i5)y)GX-IT5$>_n}l_0+V8fv!VDGTqJgI<#JcQ0(9}7v+et~PBO-=fcB_R9wowfB z_zpj=BlMOsx$rCUcq(^|%VNJM`vcZybqU_%ZopkX`ZI zKlIeX{p0shyT|>xmnN|dGE+)t4AR&O!y>pIy}!??B%<_#{XElwfz)Ed9dF|R?iDS^ zK#2VL}EX;>K!&GVY{DH?Rk=qrTuStn^yOfWkjSyi^tsx-ltM%Wmh=K z?lUn5`wP5a+gmc2&Qjfg0u7cZTj9{Fc0qxt<7tJNCYz*VqH<@<_ zFkTGJA;%?G@zI4V)Q!g{SK5gX@=QWR@_FsM!&Ui9^V`sYTQ)!4_3h=w$Br7IK$2JY3uW*u)D~exK#g93Xco=B{nL~r9Y_cGk0yicQAWTt`{Rrknbr! z*r6R2bF@knnY~`*=ciN{Z`Tw7&$DQ(s1Z(G+3nJ=8c@4@=2LZjPTX&niO{CO&gIY(I2!UDZx$gD77z)D- zKxIB&Mk}eQAP-FxeNrWFMMd`yP2PXH>jxy+*2$hno7dv_>}lr1_T3{~udA8glVf5z zaHPBO?(X+S#HE~m8mQ~lpR#fs2>BoLvQ7>AufonJ|dMzyXDuWEtO#2 z4lMkh3wk!YYA?;cg;GOV&~K2m$o6`!7qz*C?c96_3l3Gfb+zI_3Yz)69>p?n1@c=z zqT7h|+wFU``I&G8A?PkHRgRoL>ClP>T*oEA#WE3x=ke!q@$j-?oM+Ia?*uHDZup!z z2WOiL^2Gm>9MdF-8TX`k8`Yx<@3kX4<9>?`OQ4@QfJtMAV~mEz$sA*&F{oVA7ZuR@EC0Ml~hfU9l6{B!x;)tw`{_dXuM>Id+>1jFzJ-=~namqBX5 zvsRm8JRMkq4{*%xK^!;gCi$~T;+ChJ8_Sl@Sqn?PriRBlZ+#Hwrs$c_dVy9(>hV#` z@M0emt=xcV)M(ZZX)nx!pqz#K0SPg8C%e5ug|r6JecH{fU9LwwiJ8(v&;v53s<$sA z-VEB!ZK(Pr2yOibjdN^lK9K!EO4zj0@xb-y13ElyYXq4f)5XHN2uQU(MTEI_d9)n_ zaE`ng7}=NLNF*Ep19PyoeO%@r4n`djiRiI@Vym3DUJg$6m(r`5BMWleW{Z9m-O4of zIM-#1m#u>f5Wm z#b@Rn7J_Lu;{%AE<0-?nNIysRX>9{F^TSKVDJnKpf06WW8dYr2p6%7Q$;?YV zL=x2dxmR9u{Y4CJ=ISB%|AJx%f+{wg|E@CmeXj&n`c9EHhtY;a3A=Hf7+AUoX14MT ze8i7rwi29N*0-^bJKduX9|&^T5Sl81Tb?cm^9`s?;z6t7wL6=i|JR%H!>h87!h3+>DuqTjS~n#)Pf9>ZmkxS4=C zfhyy;l^RTGeQe%c@Y_Jj`U7o>pxP}snb~4WxeVke+`LX4;LnInJ43!YbB&geyLO(t zX`jOl-?ZCZ-n7?o<@u82n8xo~WRvW> z6GDhfl1)f9$>z>gR(1#phY*#$_t~4XH)o%DXPi4XeZN0E|H1Qmp4aR1yg#1@7E4}{ zQehJun>!irX7jA2KS}8K@td9oRdbMp#g_rwF6&DI^|dAfSq=U#l=CrSwE{4`+{L{- zM-%Ut@+m@PE%D^kwRw48Afg>3bp$E%KH#`1-sJjRIE8S?Un49yg914onbavds+Q#h z5VI4}7j_z+eMeWCr}iJEIG~awy0)<{4I?VN^G>OS&jI72Bvfdcu0pxICkKQWk)2 zmFW@kthFYH+jXS=;D8!(tL({CRK>h?-1$j;feSQ^w)NxQy;o+`(l7rKF{b3Z?7xa8 zg;O)pGweHlqh@ZBz3Qnot`N$yW!rQbWD@`b>dc{-TYhCvKyS)_ zwCfFRs%j89VJmNBGpk#Nx-uTyN6!vm5yNeMLoZu)tYuj%pqSq8G-!`)abmj zO<$vb$7VxrUY-%cC}>`Ld-l{ElPd&VR`N!CJ(kg)nk$w#&brHCliw6KW18<-(N-3O zGkYhem3;AHrKzI%hIm-1_`XQ(sJ9whOwK9%J?iB@LXU=vMCH!U#d7B1mX|w|T7QiE zRCib9rtfuy!?UXR$@+T5)-_uXnuXW{-penE)N;=_Hb&y!(7yPT?esrT1saVp6WmPg zPVRv)#~L3cC*BrN&r<0=s%~|wjdJ$oRm?E|57nP7yHy4Dqee5)zrpE9;!)aJW_BWS zM-XZ-QRoF3X%RTXIigp+U22eOfeeNtxA@T>%mw(*E6JL!80N{kxf1#o_3Ov-z_}&Z zH+Tu`$rysqH^if)yU8!SdjWUlK*g*Xlra^|2FZYD?t1QKtQ0zKI34_4&K`9#a>avJ z8q4HK*E@hK2Vyl0Cc5I9c6Y<>Y9gr^%9HVjfvK+I^WMeMXB?-TYbNt1fU!~v%M@@y zp^qG(f)EyJt~+ZhOhh-AjB(}ENjJR4W z)v5Laz_29F1F#)bl*o|efLEhZypw|9-+CMYT>D>`Q%^20ny|tQ2&5~WY_Pw+DbsQ< z3$Njl%bo@F)`=;NnSCXI+M9XTzi)N<9n^syZ-uv^rpOCvu;0_}K;=?J)Jaq522@J7 zA*+Ksa{s^!Sm(SywFJ{ZEsl&NxX}#yGDl)0>!0Hk@Kpsbm{MMN<~uTz+ocvgQI#{( zu3-}b^A;;&`>C-94Ocll@iGhDJCwT`24FM8O#akOtG8CESpp=-YhGvXyJYz0g0A3K zK|zuiWR^|)uojr&5?9_1LgZl!U~gECq22S^_yfIh;U~RkS=(%nZX5&IA{}1X=ivkW z9y%VF%l1FSdYuoYKV z7?B92*w1=W4J=9Z?Fnw@aHCwWK(n`+qAL&ZttCShEFzpn2D-o zfN4~8$w${DIzl>=q0mF8n ze^4rn`?xskDY|DO4Qi*yYY0FukHA|rWr3(FR;ZdIR4P)uNs#y1e!yzj122Bk>LG82 zZ`Zf|nVC_uCJbBfhv)cdxu}k4FZm6x!fjgAjfCQMh^XRDb-ged*S$P9Gkjm$Hwzc$wVDtJ8t{K3m+Ph% z;lSCVy34*jIOvo;{{to*&7|S@2Nk~lpw)3gMfNZYZ);JxY$3})I#Tw@Q^EBJSH46& zm?VtJm1En(>l1dp&tIxxnbLJO_mysc1cWvI4*gwO`-LVsEXLni(;r0(^NW^vc4&qZ zJH^g#h=i#0d}asE8CN1pO5cnMA?>%|1tP>=P=3W#s5P2pvo{6BZI zdvi+zMSKS47_TfardO7i=0Si#wY68DkYqQ*3DMkA0Dg^bM#hU`SJNkBcv;W zBMo0)mvv1E-*M&rQv+N`457OC4-~7`%9#x+-vf_lH{TI*JeIw~CH2Pfvg7#AwC_YY z-7OqM<-mC<<2l*p&jQ%gs1CtmrW7pYQm0{3cl~aA(j}6}^S_Z!HfcF<<`%s=N4kCv zkPB%3?k~qDdGlHKe*8U2No72TRF63YU!6B_FYqrlgdNMqHl)r138=^evl|Y#RDUwP zTrkNyuy3 zbi`PbT>G{!k?Wc}AeuOLL1LmV5#1)-)BGmRCMg@z8*hyeR7wIkqo$4(c(|q-H~7T*{-nQRsC9!N=rQE=g`vMmbs!STp=aN&ji)1Y{nNgD4E^TB`j1q?>G< zJ^Z#fzPVVy5Ky~)@p9UYFxizQskHPyk z6^Fqf3Eg=4Al4fjiaZ~i4YwQkHcX#?eBsJ2tP#)7s)e73B^R+4P~uL{w3#T~QEcZu zKOnEQMbGWm8^&zMP8?8ba)pb~mn|k0#;tjBzz<*6uHiQA8=K!yB1RPfZ}6 z3F-DBw>gGuYIkMh%o?!=g&I~Q_;k3sUeaDkE)cBIY=pP*YQz*)w`MWZSMPX4%F< z^otp2#a{z~dFE0TgsqNF8FpXVY}7UXy6m}vGg>4u9Q)=vH4H!4$4H@K_^n@S`TaWt z`Mqqhh$KOjcEQaMgPRXRx>S9{J7jM}q7^$bZAOycz3mgEBRpBMWyK7K!IyOXM(_1e zp4<8ulwTtF*-1WaE#x@TW`bzck94e+VBJ}lKR>2@tZ?#h(>o;T-fo_;qb zz9VyFCUN-n7Ys?o-~3RRAX9k1Ex(Cfqo$WwpM6 z2kz!HsMRj5wVdWC-#$97QYX=^1D@?^cF5e>Bz-D_s$J4y-61a2xN3gTb5@YbQd4Bi zR&x}Xmqz8C-27(3kfojDeGNGQ`JkDYJu>!0l3WYR<(R|-xAQ`tnWjaBB$N_wfcEWi z3k9R9sF;n3#u~1G1hpdJo|(STzr#|sz9$$>gj@@ovqDRnW-Zt3yy~_e;^qWKVZ$1Q zA9~<*cKl*B$8SQ#pejb@sW72F;00zC7p_3R$Dk`}-pyW1UFui)W{ zavDL}!*|={@f8fPS+@D->k#)=;MtQ?wYxpxS;tnBEM}K2`jN|e*tg}EZx~&+()s2s z(Z6=(Jn7O+%Jt1r<#Vw^6W*XJ2hh`yoZqbtt5wm#q(9Vaf>#P^yp+*;eOJ?F4$Oy*Sj3oU_&j{ez`Lw7d;4Z<+Ne)T+@Y%Xl6CoP( zcUkGJ)f6=47H~!>*g~ZjV}jssL#6Y9{Zfe%cARF9u7Lb0dskqxt_Az=(3SCJ)MaJ5r_9l_#0ebBPQ&H1ez z!7mMt8ipUABK#8RyT?L5WN&ZHnYG&NoObdndrA_br_scSz$z&F!0g^3tgx^M5W;17on+X*oGp1@;~n_?GLI4e8D$V?qP!hm7Y0>EKJ_se7{4r3VOe1i3Eou zzjkF>6!`0nw#vfFU@}<;xEIH8Ah-Ex0{!15lYJAU;oTm0f0wtR9{xvZ0I`SJ=S+i- z=y(lyh6RY}?bQ9}rF(F}1SO^_aD?;AnjJ6oOA zZ(h#4yK3jLU*K3>v>YV?&q~W_u6 zGtQz?2b)ikrWBk(@jb5j08;av5eD~#oOH{Q9u>p=VVb* zAm41^JK>(0X-Vlg^I``9$bg=zC>8xV7AYA#U{dolY?AbSAL zJAhRD7kLXZb!YrIZ$*>%>Y@FQp607MZLF7%NMF?YM-!x#%hQ;-ZqcCwDp>E(?^uXIRgFJ_J@L2cUhe$WQz7&e`Dr2(K0*#hDdo@H&d&3ecvAsQqz&IS@gD=cF0+}S zSq1thnQ7%T>+e`Z^qHn6g`d?TmdG-E|FsV^z_qS4WZi*ES*i!}0;D7t*#wA;cn_%ww}r?Ko6G*| z=s_9X|3fT6){_P@=2hAV@0=gg8xdWBj>j*3OZ}8LR&=TuvVRsVr$g*BDkXYUlKU}7 z{tp$*CsmSREeh2xhOpBRh9JxbmF^_q&XDt82&h$Ph;jJg48Z2^!!0aBPtK&A5-tz^ocp=M20Xy zQg%~F(f57P@GQ%>P*#=y(52>TZ$JOLf6l;56~F=hg+$-^2K}C_{{!XFVsPKO?JA4} zsaWMmc0}Tq&3y0kh-R09kp~PB)qeuj&82GU-VNQo^5!Z6h*!|C!>2lBj?u;gX8#o0 z;nRNe8j|%w#Rs&;h^j&I-boS#K!W0yj%|iqj#VaOzt`#v$En_tVR*@<)F!EqBU*b^ z5Cc&OlO3Le(05zW+~WtYJ0rKN#38^T5X{W2i?o$0&mjhYr*h8a4Z_~zb`rR{_Fbv|?CubEAAEhY!BS;7F7m1uoo z{@lqqnasyreXi!Uw3mCM+nG6$%6it=(nHgB$#IiHoj+&Zr*H8p(Zs;cVyOvNT@DB> zP#3;GiTypx!{79C`P^fYn0tOMY6KZz`Q2mZHFVvjB9IxZA@9y(gEv|kHi%3m{#P&) zWd2Cp&x=RGtq)Ktgue|lV7#b`j&Decn>=Zyca_R~7wq9-MCQGN`Ua(lHzN`SVC+I_ z3y#rQv4qzzvjf+wn%Jhm?@o)RPJ1TmA-bcDHt4K8S zmDp5oeHtF8jqQktRT}LN!V)H_UoeA`8I(Q-_gc*8TvwahScCJw@_>cPE)~9oByGmQ z`#UClw}7?58x4wHXBT5z#Kz)Fpi@fik(G6&b1~Q-y&?Y!HCvc`&G37!Z012B>R1fS z;K8MVS*7;mY2HEgH(mi)v(;e>go}QLL<^ABS2_K?jdiqg3a&=dHKTGP^wng+_Ytl< zNduXp7KiISl$Wd2S@}b)UA`cO&s?qeV}=8nx+ z8#(@&8<<=IrX6n!X^xoP3shd92OjhDE(lx^&+{cPa|k{OvFs*f~b54L5~gm`kjbmrE0b_v#P_=|b8!dBz~V=h)IaB-Wg z8L7?^d*t(-C#RMI+iwXB_-GB3x1Hw)C=^kvXr~#b1jKj$Ice3XZ#z$3*QZGYnEzdpV8xe?n$ zpuN)g@)qSyCQ`iu;V+F*<&iZr=D{U3Gb{8G{SB3DpX45!jNGKS0<&|0fKJgX7tag8 z?^II|ew9|Sikpu`Q0jD)WKe7vKe^+l;T3tf3S*|AHR77(FW95ir>IQ{lcJNaXu4$d zh2$V+6B2i&l%NORs2^;M3Kz(o6WaG)a#U0Z?)z?qc+3P1b-WDO&ak%|W?fez@8Gl7 ze{4rOpZQntk@$H40RI+dpX(&25S;Gn^$dyONS5>WP@|Ex6m?TkkqP`I33jQP>;dKk zmmwfNqi4|}=FPF2-O1;}Xt__Tg>5r)YHa*w z?%I0$Nw$N|_b00N+~d~*8A6h`v_rSF8zwzF0-iX9e78WMJx2jYmnp}q4_6g65sp-{ofViF;UOFX{C?WKmyUQ{gm+tLa+^KFK2%{WWh?L}U4S&(!I> z^G%m?k#QP=9m)YJJNKLj*-vBJJSP{K5zh@SP@8^HVBtb*=XxPG4$heX-$0Evd%z#w zGHb)Cor_^`t`Dx2<*~|}pu2t^c{~fh-!jk1kKWKs>+xRfT?mf%4v~wOG%@dbN>aw9 z;J_*?vkjPpjuFt}gdCs0JxG3ub@eU4@w}Xho@Jep7W$hFRx2cAG8aOBk!550!}8jX7-O^cH}s#s6IJ3EOjGy2O(`5kb(6hErhTpJRPkf}T$tLy#%H*_{KeWc zDh#5nq&z>pGL64HFnuC%20~R1z;oT0h4lxf4^1N~5zAz+fXfT{-1=N@;Ie)r&k@LH zz$dha;H&*yig*)@T;}P_nX+{#EG|bpDXX2?0zU5W+!1!szFspE-$nb5THt*wRY5w4 zS*lYk2MYluC|7p`-th>yNXXw5y`Dh{H;I{7?~4o9IBIAcK=Dt=ePiI@3Z~zVt_XG> zK&76tL{B`sL4H+q$-WZFPP@#e=e-I^Rz>ProY718C5}~IfN6R_db2yT#@0G)5(91! zc-_3dT1!Y$?luzBq7Zug><%81chWrk8jfN!2(X5^cvne+)RD?<1S}lYd@e?5t#+V; zn|&fPeek3!mHOsx8G)wpKrJI&({imSe(MS#_QR$}T`l64orNm@3g~bubpsnmworCE zs8wog7PF~dvEh0LfiX%f z+AUx@%6iCaS4K7Ci$vTwM3npkKDic@){+hM>6P{b4 zEvQDNH)%Em8KUqP6u7Nv{m4xO&gED`vI>}9<=v6`DE$2F)>im8Unf;BW9=RmIG?QF zJUq6-NM6pCA~5$lohdz~7B)hLskEL;@%=jsnLnmcWRPzjUkv}mPpD;(o(6BEM>O_O zBP$9csD-=sn-{83|RLAnE*Gh^?CC3~72 z(eEu+SZ&M*sbTm|{UO(X-CD-zgi$aXFC#efX+*QVD*fTgtpnrUcJ?B6*k@GLJMNd} zZN$XNt30oiE@MM@5a;$8htgm+`!_#_1p4N0BSJB0UxD8PJLIC!f-mNv zDDw_@S%f0*O?H&4ZCrVCgie#iJ%x6KKW_40`>>XGe zu!YxzjYz8b8|I(xuDQrCcRK^7({M5A z>C;b!P;E^UKFG6r9OHgx;hz2Fe@$}5$}sN!Jk^^Y)_B$H&G6mR4|6ShAV# zp>G=Oi}ydWzx5EJU`FNv+@wQno1k91pvgKmk<`JD@MibmO+a2fHPfKc!wLdC;S8Rr1UmPXLT3$P4LR52nt;ICPfmJ#I$|^EAF;XeS2RMBgEa_x-ANq|&x{&E? z(dd1F@W>reSns7~uE&JpcSHTx4(kf@>L6ZF4|>spnR)Qt|HcM8p6^ zx-N(LFj{w$>-)Vki4Rlc=dIa$hyPepkfAwoN|JSB#vDw0+A_6O8}_8tm>X1$fcWW_ zs$-Kh$IXay?O4yJ@3Q)sez`mjo?l2vT?g%K^Wy9shq8j}9Z9MkV%PZ&nU16yZ`g#o zs=ksx&>dRfe_a~xMz{+%;2XTBsO>a?7tO$z3Q*!_tBvTbK(Bl#W_wC7m$p#X$%gGF zX`{~LsN3WIwpkFwlz(pU%6_D~_w$AjMiqZ3M^xRG_sQ5+f4!n&$2XjygOxuRoYF12 zzB=u}&I`YK=++a6{!mEbsBVxioj9U{UpZ?1P z`<37Cl7$Qi9ut^JbzZPml0NnI*7sJmK>=u6TFqjcpZ>|gU_IJ8@Fw1{xc6=lFjh7K ztr}e%*|c=J&C3WWqHr>Z@|~$rkF^d|$N4_v@~Id9_B=cLvE#CNI|nkm9DBr6^wv6> zJEnM|HAEE|c%LjOAK3gYr71F9_G#EF{wnEK@7011odp^%&dU2TiEDS6N}vwa?6oe$ zVgvb#0$SFBwui0Cnqnb--0vps_$l2URfPdx8SgHXz}KA5s)Yi28YF05k}-v6UNi$- zz1y0@4E3z(vNHJ<5x5b=lYRH>&hIoYiH&<-kiKV`?m>Z>x3SbZMq_%Z=brm8*{vby zN4by_fG&iE_xvh;V_86vk#NwGBE^Ypx>U;7d-jiG{R#*7?>MKX{^av;MrNu&^Op+k zFz+eBJc_YSe@G{objph3^-f_tUhK>8$w1t8DEQa)^w=%EDm>^c;m`wXn;dzhIV7Y( z`cY>?q|Ah}JTvlS#gVMeuLPOhSy9&L0cNhu4R~F7=Cl+d#Pmb&n)-m}0(W1q`Q~xT z`T$#d`{+(!ZM8ZXFE=K|=KgbUsfqn6);|VwH+SFpa_+(EVdJe}ZLs9LHfoQ?nEt9> zIgm~A)-42EMpM6{>O7~(r5EFMSm+RHs?0&U*Rjgy6cB$@jwvh2W(0k54U5WqHg<^g zLM}0UsMu~9`n8&vzjp0~7_zJ}aaM?52kg*N$DI_?IptnAV& zT8^`^>;x>ol5+KyaPUZV9G$jn3p;^bIdK#h^sW|v7+XB)t$dbk{a}N1^UA{^+ski9 zzmGruae}Z%C=2^9+yQZTV=J?LySks|q9t*Q`X4bns?HcW+yvlLFyDd(;&S$y8NBKC`lOSNn>-5TDR#U=I+= z79&0G@oD)jV(}3F5qGl-7|ad!{JOrQ2B@>FCcPVghJ7GljH(~(-Lz%i|Hi*Zh#n0m zdj1|bK^u2 z!v9gT*t{lcw!sRFNLV)k~0g)mVWV|PV(z)f_4jn5v$TE3VS90?#m-H zo6Y7n?(~z&N7@@bTckU=2>2LiWi~l2$@8ZJtz|_8`;R1Coh~O-GV)>7>M)ybLkEce z6D(gg;H|~gJS^t2!{l_f(=gv3Oi);AjZ->kt&V(%iP?z6hxE)R2)zF4*Q$mpsHLym7>1pl7JGnRRu z61rPI%x_RD{I0Hv9lgQWoB{!p`Z3BqTh9C}#9QSyW-oZh%hEw#w9>hs|Z4ofN1j0|yH6t%Bm&E#H7LQ8JB! zVdM{CHFDq&y?%!WmU{+G&P{SJZwtu1G-+iwP-&G)GJ#VZBSxfbXAjNYSAVh_XS0+& z$C$6!%zE_>>|0G6Zz*X?#FY0G`z9hOM(RFjg2p$Iv!*A3y#tPXb)|OPW%P|0m#TvF z3Wl!RW~|(Y#7VoWv32tnCn~Bj!B8)%l4US67DlV~O;MKR^Bsur-|=pTD8#=Dc7ps$ zBnwugqxVFd@)Xl67X1$NvSwc(B07KcUTp3SO6mCO`1NmYuyln4AK>`{zRywN(wt|NSI*u|?*9w&f>}0w*`o%u zB5;p`o%E0KDSD|EZ-hoTXO20i>uwTM>mFQj@+?{>U7!_ryZ%OGrx1Ri+PVAMg9G)% z|Gi=I_O8`bB)ZXWsiSc80)NafhEW}Jc{XSi>U{SfBN{~*sqG?8M)ry;*0PmS6iIYho@q*7Ag80twp3u zJZnf@zuJ*xNZ7&Yg?)Yvmcv}&ylk`xeZy^FOZeZ;adP#y5NanlgKBJXA3WPN*r{S4 zi~Un?3}Vgt=nTKXYrBW(zW{7b2t21Tq^OVw%QizKdLP{voR}sq|2e#X8GE=>SbB_C z48u7dl}E785OJWII&R{FW;=SMP5VNipOOFU_dRA|QWf|gfc-tL_s8Be>9@q%_QBN^ zvDgYdJ5ZRxaf3g~K0fZ|11_DbxW|r?-4vYfu_6AXa4<#gnb$86dpYx=@Azxr?3G2H@l`* zUr33XA?fAqGMuq{rpBZ{r#kl)D#_8uu~RKR&?ssy?_&jy08z6!zB(!^oLu<6PuXhdvY5A; zMR(^6%gqs~>gF?anXyNhP@~Q56RzU;4b8KUWg!GWu>5+zRiT}I8*9Yt>x$^{-NE6i zf^W(i==!P*lAdLHgl{edbxCQ7-_<$)Ql&dR0MX-1O5}NAk+sT$?F%0fKM9s*)i*v9 zP{kk7mXCffI=m0YL@;z+kb2v6xg7dOxqY1;(xMT_vb*fw+q{k57rSuIC8^HTDg)df zxHWQF_%J;VS=iOdJQSFe0DO<%PsKf0HS<&TzNW*8`yap(!0nig;*AAn_4-bRtvaTr zj0ic5?lnHRU^>`&;$gITdSQX%3ayOO-dRY$*gn>w);}jQBzm2o+v1T`#J4@?vh)9; z+_Er5`^Py*@KCn7&hL?X9OpO-r3#o|6-=@GmmF+ta8VG^Znw7ZAtj;Bv7|0VA{X?q9EP2X*N1<^ zq{&?A?vTw^43s@P@*{Jjv}7XD)1&M&G738Qor|0&|C8THqiF|NQOJVi3LQjLczbHr7Nv2HP>L)69sVbra$52r*D22 zoi69ENnRs}@;Zp!X}{7VvrvZfp`$hQM15XX`*czxz^LXt)3H_xpBZCt?vLE7ZSUyc z%O`{zcTD798&2p#Gxn2gq-!DV!K4=#n*XS*n6A2%s0NLAN2f2c)}{lygtj~%CDnTl z-+dD-c``!mSju0f+&T;qJ6u)(PfEV74^i-<(#8H`L_mq-y#Jb$I=SmCGs)lGT|}(9LBm0W&Js>eTSCbr8;IdoIaGv39+!o$Pe+XbHD9{WJq=^ z4^_V`W?ATm36;WIDh zz3iDQHSOeHm2jgujgE% zUIT9B{910si^>B(f2icw@q*<7>~u&)TI?Gc!JkzzuVudNnI}2J1i;7e>kXv~!>L(; z7&_kD+v|SCt{_a@dU~z@Ct>BZ!6M*5>Ct#hg%Z5pGtg#IDMbEi*1E97%NDg~n`aHZ z=hJXg-qSX?BCjuCzu7V2uPy&-)DfR_OrHJ<*6dAh8P}_)>z^hfa<6HB^eOGw$1s^- zCMz$lH(!U>gZ?5&tQ{j3N94|`)oRd>gkttb&Shzfd!@ZCqHPH>V4H19h61c`+0j5u zX-n@jU&d=mG1SS~!UbMq33ZK;U5H!>(ThahD@t>s@I=;@{pN{1Y%mLxPGwY`poj&- zW}S84oW81RnsrHHE;TxlWlKB|_=zn+e3?jwak{%#ZpO#lPvu!%ysmL-k1p-WC4nR}W24ug< zps&E)O#Ywe?+=VW@d@pdP*(2>S zk5RKDAtSD$Tbh(;RlSBBndN6(cK+!zeuMD7pHHm#zYN+i^%hqJlpH7%RR0~nCjMyJ zJ^Bb6Syv!6C=}RHjg;+a*%9`;L9&jI>_FKx2mHD7OzcJM~cD0aUQQBe`)^| z0aQ^xW||e!n8cNCzqi~3uFk^7^gs^YAP4B93r}bfz@bF@)0O3MsSQpBd?HB=e!j?K zeKQPl@NK5T57R#W`=eW3rRVFsxTvt1n1cnWAQw|kzl8lfTeN-9P~l;NR-B^<;a9rT z9xoINd*U&*gBHtGA>d8>%6EL4-HYg(oi8j#xRXtFn+)d7hfYJkd`pbfm}g=7e((+ds}Yi- zF_W>$TdQ6T&(pI>{4I4rTOajGmui=^=JVk`o=KJDObJh&eD8k?y0z3KLjTyWFtnB& ztI1V0tgH>hC)`U%18N#X)D*8ek{-!uZ3_+gw<>uTs$?RDQtoA7*W-9aQN6ML9bzHn z<$lInOqSVm$)3)u!Bt1gyVKmbol0H?!m!Os;MR{C92@*rOu^$8eHN0B4%1~UN5j;F z7@EmA<`;*wpIOjr51!-=!j^WAmCrT%sfu0IEt?fU?ume<;R@FAmP+!-hE(taol6QP z832pg48Wh*{!CzAw>E_yP<7kl3J*j8bv_K=PtK-ayLMF%awM!0CH}I&;G( zM{fOj0IBjPQZMpLmc)@3BvhH($6_IK_4Ax(P1!Y<-i`Qd`{CrQ+9*>OzaS=CGam$|)F zU&{2!^JuK?j>cQ^uqQO%4A|8OCYWnV7oR+vwC9a$L$E4~W%tCH?c`PG{Mc~a??Ta zsKfu6?i^~0ee+|B=5;>Fg^l;4pl>q^n7-EvpI~( z3D?Pyj=Ao|Y@k#8#vq7&M5au}wsn<9sEo4CBZMqxCMiGpVejpsP$TOJBFA9}V$R?< z=!9Zn8#>-K`Q84#s@@db?HXD}ap6eAw^9eb1oqlHEkf@`@_ta#j~a?lVJvkS%4udj zYYubWV`3E%nw~p=lP?wJTUmki!bx={?|AKdZg!#X-+FJ@Da$<_lmJ43p1d4u}nOA$-MT&dSi}gxKH#CFJ%JLwmstfqfUoaM=T$|w{pECJzV+qy+!3; z@YC}Y^5bqH`wL=QqQAg8%cY0W&BjEUc-o=kumZ8F@mH2XS?Q36uBEYbQ<2c0WX2HD z*ID@Z9B@xY!rr4VF&kzU>Yuy{1e0v68aBqdjAK!zvvib6yHrPae!xOiDC6?iQ*748 zx7MFb)vd-a5VP+1MX$i2j1{NqD#`EhbXTkx_J&M54RYlEaXb&P1}sdeb3f$k+Y_a( z0`!zK#87R@C%oCh=f4p=)Wdtj9f*Tl$*2{RjH=hLIK*8VaLC_Ym$wU8TM;uNc-NPO z1vK%nC#N~@uYFImbnxc#*0OtSWJ6w@%-4y>nAg^kLu=pdb??x}dGSd9`zc;MmYswy zP(Ox|ZTa^iCXjGtB<$04<k{@)o#S&Y_N`;yT$ zfz8B+U}MIQds72t|K8OBkc`$}rTqL_zkxpepIBo8_=DCa_8sQx$sbn!$tc8XlbXo4 zz$4m!5lksW2U?j`qJJC8{xs%;1$E6-v6f~5;CV*A2B5Z-iqS2WLR6aV7IateJU165 zcNrIH865}clADW|{G+flYSiZ9*|745p(%9=DBdyq5(K^fYiVh_fk(1{ksy(ED{`Q(zCaRq57RyK1%<{i62Y13VQvj zw%qKs{s#(dN()NNar8Kg;NljlMd+%vG}BioeJXoYmo!y%fsrW$21Q@^RB z+_&O^4vP%bHfOY7KZJ*qF3sU=x$~PI>#B;kPQNmXtx(2?7t9j2Xod)?qRaJ;JAZ^c zaLCrU01LBo_Pr))7ub1);-5~>x+t`p-i$J@Uqn0#$7^&fz7srWv3fW1IckIvN`8*Q zx{wT%8KA;6MyOH96b||YZ#6pDQOD`Ma~RsQL4z@pZTOwIj`Na(uQ@o$kSp~p7oK@Q z*(0-0srM%o^n_x$VRfaCIS@Xx;{$@5AzAFtqr zY9*kL!t0~lWqYP)^W1a}dQ(JcpXDx}a|GO>kL11L&| zCdY$cURoo*yu|Nc6jxW?jT*Oo3ZW#-HDz78^L7*PTmIifXIZ`k7>WQTxiY?AjY}g4@cRIC1GD z8pRnRwca}6rN81&paPri#uEBuvr(v2l<~U1r=^s~=6?W(KzP4*mOwuEU5Vs(mS)F& z5qah$o_+6L5&RysJA!MJ=x8)8W8|o#k6(Yl+qc3V8c=V~4R%N#w7g4xnt0y~C%Oj1y@Uw1o4xVRDU-oG>iGf9syQ{0Q_L|2Qbv9LU418u` zqT1iRtDu`hx5(gS{0p+ zAWs0DhOzI_PnLKFn4gHczw(Q!Ic4N?RIuQA6{^gK3}1fV{X8ZP-=JiN|gcA;)67|GpAsOL0I}ybpRj z-l_C15HeRPc}85| z=c9m6#HCE&)D@k8PoMmBd}99Le-@u?7KKlQ#kuh5yLRxY>C#wyasf`6Si@&1@9Q;W z+|Z&&?0z@aFb`{(lkxgs6ZuBDwoL-|-baL_(_*Zlcb?4q*Sdy=ygyFM`>uB6{rdRz zdj!0(9Q`xz=_9Hi+AO1EnVOD7Vwt$st@gA9DhF@T*J0-(y>6s`@(q|^*f9>DFjilQ#;3)q zuY;$DO=#vd!K@X0DdQdRyuYq@OL1s+-DE1P2RCG`AO?4$DD~R&=2Ll z?}&;!DNax_b8nu5EU~W2GFh4%$p6TvqT)_Uu zoOhPO=?voZgTS*WoaXr0kAc(Vk@id4-{;)jAa|fm;H%Ry%7-zzw~)>}%-7e-JV!!8LEi+SLw*!$+d@;$6^-ceT-rORS#T zSOfD9YVYg-`N1{?P`ogsD*BQ#TmuVfBkKE#b@Xz>Cs>K*L2*ra>={q zRiDlt*OEYczECxTiElEWM3M;dcM+b;{+oFfWv&Kkw@8ur#xqyO;iTT>kUD~}x`!Eb zL|iBm%I}GEREPEAcrRWK+XtiQc4#|v zJG3p`#_UJKqxPc-igvF)#n=P2ABjgt`ej{GjK_z*=zl8Lj=078kOtjK9&BlLCkvV9 ztI*hwCSY8{HSJA^+K-0D(e1&ubZcRrDva}U@a4YQ?d%`h!g9aPiP}E|I=1taws&z3^D&3)4kE<+gK_Pk-wwAWMg6(oFTf&iXZPN2OsM|Xpj`%t;S0>Lc z?AoTIbSiVik2Ovf@k+aGEKiG{>Kf0f zDWgdLtvEkUTRZk|BQM20x`s;r8>&Y7V{fYV-qrWth-&xX53%jKerVh7-njEyXKQ{2 z5qN5Ufvo#yoZoH)-`JpSzC~5){oat*L=*P54D^$(e1YtS??qHz3AefJnW`(wH@VifV1ME)u3U$3lKH&WdRtkB${5e<)89HS~3p zeVX07r@pSvzq28?GUwnoSVOza4bj8H{;I;^bq>bNw&rx}5jgs~vN2zX5BETmn&Xez z=%rs0+Wg@S*70M$mEoTBJu1(l^nE=Yhcl^cUKBt^FZR@TmpL4^nm*c-3{k)_7 z4a18&y>n)skZWd~0q5!b0?aEw9VYL6m7A)rK&Z2mGLt+zS%ERn>!@VPRp778w?Lb< zjI$f($gSHhW9tS}zTS{8zp!}jJZdwOMkX7V7`PL}6) zr6TCtX%7W0i89}eVVn0tU-KQq^+pjgLyoIrTPHQ0Uaoxh256r>-!aC1I6_sO93gL~ zyjt#Swm#TTPm70}a)i@t72T>ZF3LCq7)PE{ztXPr+Osy-mDoSo-&0@CYfS6hz0$il z(L)(l!g@68nyUD*)jizJhrbUyY1ifXthbT<{<~l53SDIl_Mo>>#UdEv>Mh)N&%pKS zVta0h5SMVAUW{{%94GYN6yaQrak3t}8{_n1oWx_+a?i8vScyICVT;K5J>VY@T^Fxp z+l)V>EpP84Rm08nL)f`xo(Mf~6?6}a;^(W)>CNN|4%#IzrM)OJrzbH_&Wm*l+>2H1 zs3E`~+F>RDPdG2gm_h!2&m;z(coljnzSCxDoKYCL!?v||J~2<#U3G$n2BQzcq-hA& zxf5Xp-EV+ezi2pZ2Yie*ED`obuH(2&aS5LnH>tH0sj+Lx_G=GM4~-$>cbM zxSKJ}Shrs{Ft4{>d)AHdcDmhfJ{(@LgnK6DL^|+-9w=+%Z50DZQ^fyCc#pI<_LFc8 z{dYIrcdsA(AQ(2MHfP@{oW#!=Bh?wv?AO98Rws(1O(U=`&Bhot&#(;_gM8bS{k>_l zvcJ1;6$7hws@yxI&AfD#ACI|yYOoK{SB$>B-jm#@QY3 z-op6CTbU2Md*H5Db&a@hmG7k!)LmQos0QZe#GI zkUo!u&FxKL=YZ3~v;6F+#^v9sc5g4Z7QxHppu)0|$Da9@UuBU0lNJyl;x#}d#%aB+f%F(uHY?^f}- zse!r|MCd=+{|k;Wtf6gZTT)XNjTcfalX^0xOkPu3&=}__4y-UK$straS`1EFMh`T z3v%?4aLp18AMe$sTG|q-R-pf7A~Y4WJ)86fp8e!O;3mGa{zh_N;gtMyvBi_$sM_I~ z2KR!xa*X+8EbjPD#Nv*Z=N~8H@auT&Gi&4ahfBi>-*TGtImm{%_t(e1zZAN!4|v2i zU&Hk!{s5oW0-v76ns|Xr&tZ+KDPsVie88tQz$fw~UnTY$%w;Wb>bVb;Py4gLtLHJ6 zYT(ow;FR}x*a_VrNE^a)&GyhJ)_k?98X{HS(xwDhd<%SpY?hOZ@4$&F$YnXuH|oIm zheNj*-cQsHPsisFeA;jyW6J3&RlOXc-j62uv%MydZxe8>DO{5PxPs5#eaaM(i41w@)g4&d zd{J{gT^Rb#x@sMdp6$L?+SpV6D)U7A#mihht+Aljdfxe~)QQ&lvK%4tOo5bbKSVz~ zcQ~KU?!`Ow8}BZCVp5tH4iipe&*uKkeVc8Y-Y<~;j#Xuj5N&$0=D{4L$I)-{%sjgv zx{8N-oRk#?K*#xB#i>&!z&!3=yWQ*&-!oU-9e!_8Pk+_Lqw$ySZ@L3}3uFo(J}FnY zlOb1h&Tq7@3J+l4)J4d*qkW|4-(tulO4jHj(Lr^)!7nhnrPH$grFzRv_~o2FxYS_VD%TAZ%;T3cPu;A0104Lhs0 zTRfC|S2C_Oj(=sphWn${J}qUUYT(!N_E0JK^-$(~g-Z1CK0P4!L5%yQ9ZFw~j!9AaW?JA^$JAa}be(YS1uW{!J(^ix zjBuiiScCp%S-S7!zR5g}y>??Rpnsm@lcwtyKu(~0%8Rl)p^+<=tA9P zP6j1s@%lE%#)|}VW<>TBo;y8>HLAZ$45Zvmxhib#F3(@40EX0SgC!lLephq2gRK8N z@-^lTVoW}C{HCkqy~hC`@^p-SU7r0)*85nusiY%5uk>UI`*(zsLo8f52=8t@+)={e z3t1;4HSo~(@QMwGlReo+oWK2QX@{6aS}$~j8f5IF@;9sEwJ{D^$Jn^`Cte%wko9|w zYftmq^^VYTd9Ay=whY&T(k3r<-GaV`;xpZq=6NRn8qd7Ny*xdEN2G(?B+-p?J8bEB z6?1vWx^`yw?)zS2Zh^E`AL+x}`MXEJ$agWCJOksEb&5(Hq0ieH|MYgo|4=*QpD6j9 zJ;dBCQg1MeL4?U?z-?24`)*ka9lxKz+WF9zHIub1P0f~zpfAStdC!hW4w$EQ$hF%g zd7s=$g!^H`GD#8=5@o+t1-{u83%jQ^?6O3(kGWM^=3sc2*1PPTI7Mx-_5e zP&mZzqcAR>pVEdc>njVf(|GQKc>Mh)?A(TNPUGGI8D`#gEtj7+zi4LQZ{T_4h10=P zNdK*Kci-r_kvc|4#aqAGu6RrA-rXb6XS>?F87E47Z_Sst!*cW=xpF^Wns*tV_p0ZU zcgQ_`DRIi!(?8#)=CfDMr-wX8{4cZ{uV;717&+GOUk}$blHYMJ5sXI@OI4n$P5xhFcoI@wi4^z?&WnEhw-MqQIEzh}!*UB$o?mJYk4y>7Y=v7o`24k=H{ZtirQZ$X(69BZ#UANfe5s!_N12qo zp*PPC!Czz@9opBi56<7Fb9-G5K5_y3qMN?IPUhn){q7*`fS~z8;y2*kbXjb!8^~B~ z_`)g`^YjGg=*4<23oBZdI4WtWN8tJ$iiSy>q{mFgF8~KQ-|iBZuk;wWI0ECJ3x0t1 zp5PwuLz{Dz9p~;mTpyAw-uc6+aN6+B;%Jjq#jc*fbHp*~Ox!n*qtBJwE}RCN-%>9qhJ55N?rIM$fXwf*x`(aAdO5Z;r-^%ayj=S;jPr>Gh4&oO zU_5u`2X!ViV7uz$8;tL<2JVyH1B3BB2punVtv$2>>$wr{4eTU>n-c9IRVOMXaM?EX zUXxj!XZ?1&8Z-MRzqza(dhK^c=Rx>i!twLz7C%KVFmg6jY*X_HY*X~QH_yl;(nH1+ z@cqfS$C$d4IycpDIrp?cZiA9DK5tMtOG$&=JLMXJzjV{E?!R5tf4dQ%@1iZ%qgWe- zgW2^rdBhrpe{0ZIARNgHPyYIvhW$cKTO14H3_fa5zo9g&U=1J#;|rtm%$Mrt*Yg}O zZf^A_)!YW)nXN|d7Fj#6d*Hw}rKg{7J&0QcMy?CUPSH7Vh%c0#j9eemH!LdG2gm@M@IUZA*AH=hoQmzEvGr^$HLytc!QX#ZXT!Lr;X^mw6{IUGZp<9Ht84fooo96`eW?o6{jNju^3}BZ22{g zaLtpUaGG(p#`80tt8qV9`^Hrs>styQe3yDw%eRW&D2KjyHiDPOG+w%N&XWIXP&mJ% zLDjIx(Qv=@9B>b4S_q$Qi|e*#h3j+h%)L=C?@O3RJ%2oI3=cK@Yh+vE?>lD|zI~$c z?6Z1B8|HJ+##9ZDgHd>T6xTiqpBtv+SMhW@pdpq{dF}d!SRKuGEbRP~<`GBq=P~_x zLc=p8c}H5n-MYR#1KcBhXz$y)@QSCl2~poaIUD&-+`IX#QO9djYdqFH zfB|8`Gh*hSkIzR_(V1sF-2*3yGo8hNXxU<6C*kBe5KrI#ce~$+C(g2@N#B`cGEYZJ zfPRjJz>Bc1{m6ITPZTjVaz-~8c?x?3Y*?rBtuMddig&hNEkcYl@pcix`!N^N$}jQB zxc&4;3PQDqO`erk6okAMGwYr2W?iu@gGI=wF|lR12=&wafNd}@3+pX0$AVemW$?Co z89P3(*sN?8tjAH+j(-5pV~GFf^CMs`1lA0HN#|W?#q$nr=ehN2yR=WyA1p=Z(MMZX zF`u>1i0zBX*$~O!LOp}_WS{mEr45Vraog#_*QM{|Y4ht(pf z=buBvBD>%4hxXAQ3-7wGD3JBIm;;&pG$g7ss;0R1+)=(KCle#F9t(gs4$uYRL)i4k ze3_Su&+uN+cVTBp`YiR7cye=ts#in!5KgsGIU88BW~JtHU(_kOx9)GB?x)o&y60GH z@Xm$!g3w2WylbIZkpQ(Xgx3=3OaG+j|mA~n;@@ZJ*6FY8++eVo&nRp(*$~w_99JB>~jp-rZ4Cri_V`H zOMCCwPFr5D#~*NFWX_{3+8@sO{uo^)=V z(osl%qC|K;#k@B>I9leuaxmUh*Y_>MbBcGN&9bOEAW^()Q!nBYeZ1b1JgfoyORoQz z^YAauYq7;sMW5RVA~cBgA0O}4vaMd#vM$!=YaHLpdVGUrou?kn;)k$vIr?C{k&y#} z@oS{b;n=5HS2T9rRG*BYFEZQPtbybk?W{renQ#s7HEH;>FJld4-g>O{#s}>o*1saH z42+`JiSOxD{jQ1!N%zv_Ff zJJtcO7J(lSc9IVWamoHOSR4A$W{T;|r?KI_k$!KYk}-nYO&(u=wU=$UFUh~bV)B^4 z4`@TLTaX}YLb6RmHW+}hJb}3~Ujx_Idj)B>`#EVgC~H}C4;0lYp1|)n)hig@K%Bu? z7-Irn>nzuGWw*Sd>c2~zE>ONivc74es9~NE#^E!+iOPo+*_VlXY^(lE*vWB5<6qtX zOZ?+C)*X+3-u5H-SEuoB18A5w-@nyG>J@Um?5`-@{8o);!B-T{aILsb+>@i9|3vfK zLv#i+F;4 zig>bN>?nVISK*{zCgUv_*Twu_oAV9)NDpinn+$w0d+HV>%G~m7hx6Vr_BQY-z_^QW z4#ar42h?IM41XOXK8Ne!l72cp<$6TnPb+=<{ulYdYP}xht31Q05K0EWjQET-sv$mK zPkc^2Cq8c|Q8|78=M@8=Qv$!OQ~Y8+KA97Yu;cu_RyF^cC_Lo#$h=ekCG%#VoOiH) zWX=a)j-s(K5gIFkyt5%D2aKvC%y|SgzbBo&j_<7Xo>mvhgHD;I@@0k7T=Pd?R``1M zWyOD}3$PY3@#thLZmajM)iAsopGTtPsBGpq&~jAvl7dzor#wX(-zIB&2?-`wv6f@Z65nxuH|?!kzP0*^TYX+JWR|qRA74^9S%7PVt?^90+^c#9JTK{UfS0um5$o|d zt{sn(fo^ys(%wM4cfNJq@~zTOzt(aUY3`tm^QdjD>qkCh9q@Yxv~=ezZsAXq{m!aGkPH4&Wp3Y;i{+eMAl=tKdrB?!5rwH z&9T$(T zWj-3tGs~T|d4bu~+*=faEoHR~$6w$2!Xdrgg`2JTOsLNq(g%MQJU1MF2O){r;im&EB!FC?7h)O$T zOOC^SgFP3tGmv!*R%0AI2j(@}4#5uw(5HnkA+G!m@aHqK=E2#pb0}amPttI&z$L)* z6XqV3LGZ-ElZAf}@Q=8=5whuTsb{n-Xyn=OV_2gTCifGMYq~RuvrilV{FuX&HnTxT z!@(Y3%xg6K6tiV6Z1C<7coXnoYt(hZoxXXzzEWQ&TuXID@K`>#ND|^OP;HbC)?du-EYe9 zR28yzdq+RdUO&bA)#YYSRkhjUgPg=#6>nqxt1iA1{UrH$hD>^4tdnoMN!qFkUs5#@ zqJ60HwyOFj(Px{(a4pA#ahvE@)FY5L0J_Wf;B~m337lUZj?}ZByG89y6LHU=tJM}$ z^KD1NPUe_?26UAPx_Sifl)a>KDzN{P(q`1X^^C#R&NzB)rOi4`>g})N`zQJ=^+Y%- z2fd`HKr}t=B0Zhd^u)bM($iwD1?cF-*!*jZ+Y5o7PFbvh)0Tw550=D$cW4w{VXf}r z*xtU=&v7yDEB4Evftwl1AHOyOc(`Sj0PoIdc6SoaEt>WU`-|LPVvn_V61Bg~7oL~! z3}rUPD7~bgS&e5_;TgPF`;2}@(I#!c>GJH{n{;O@ZTiwkn>_+sYZYzw++xsXkASS9 zVCd+5&PAJhTlYVCrb+tDSM+JmR5|e(&n9!?+ps>MQQEs-M1PEztL}80-!3dKc;jgF zfr-6a*dFQg@hzG*XDQlzbc@PK?HhBae>mVMY13kr^#yI<`{E)o(BQ44%?)VButEHB z3;R{E=HAI&{3*gzxnZ7yb;I{zXX_ja)Er!CqK3Sgb6QlZ`lao{4!#mU=3kPhlp8!H zBk=EW<9-0YPq#Pj;cDFV$>$#T|5)sO95dHm#wg)AwJgVltOz(&C z`Qa-5p66_Qep#)mcSks8pHR=&=;uq4@I3DExySGv$L%|%#j(CK3fMq3%=RP zXFKPE|8?~X8*rsk@!9_b-&wObiq9TC51%a~pY0ssv&X<^ds{rdeC#D97SD{WSlH6p!v!t9W#{mbK|9Pn>;%Iq%m(mdXGx%=tVV6yVW(Z$OlM zR0ux47CIBpa<(*wgBf_{etkc>o#X>yCu42%4(N6;?&Ca{#XNkg+voRd+JVRF+6ceD zV~dLUZI9oF^CL092G5sgz$qeRF0jNTGK8f^Liy?9q=rlt4g}qzZtT{Lz}7ViqOsI*E_s^m>Ia}__hGJtL&0l z$=DO-3%{kw?C+6@HrxwuacJG&q4gV+y{*o(vE=7|FZTEY=JaNse_sZDS^73wddPf% z^^jrQkc~e8e}6C>4!U}`)qxjMXO=M*y#lTJa(>(A9A~1Q(>gNG>o!lehh|G&kRBLX ztKm{;52fFq@2^|j34c8MbPqehm{G=^XBLFK-34PeYw>KiddQcAS)a}H52znz0&E@t zY^oOPIx>Jw24G{>uwlM5!Y5vTmU*n~ZEYsZyFj1*s>w<}piHy{bm;@1+6Wnla`4Zk zOp*~8O*{{~QoFy{{qHT+FNn|* z>?v%o2cEZJyk^#ZZMq!&*vMOtkT+(7H4rgZ6pQ#-tmc(?(3v8f&VgN$v#x(dMi zBaCxXm$XX8!`QnC=X~Ijk$?9o(7^%V)Ca(=4}nvp*Fj0Djt^sM>Mw>Y2w8k9`u(>O z+*@pCjfiH(xY{q4wOV#k2BHk$PSJ7;_@{Fd&^n#oDh_U~#nwZ^^2^k;{ z@AihydBALzaqCUeAKV;pcWGaLr>@%<`g?-b->-n)-WU3NB0iI#yI+pKged*}K|A#% zOY`gIUOST)3eTe=xw0vV{Upov73Z)?Fn82X>8C!gHQ#cpogmpHDx08xatt{d&i8_k z&$Ni3Ue{PVp;^=zu{>)oPV}z@JPqE*_T!&lbMYv@tR*QT?-@MMxJUhRfyL77P7qGY zO0-STE@9-Ykou$$18AT0QU|+FgdW_a>~MWIYx{YvlFNM)9Fe*(iApA?9L~HKJfoo9 zRN1BM(#{|1Z4Y6Old)sjave;~?qkZ{@K}s}!qO~GsOx{y*YklF)r_64<~fih@FvEz zrh1fr#KkH`c!>~|{gw$o_wi^spY{<$&MykHX3$w#Gl=#Po>et8NA`BsA$nBf$HH&J zPU>YXn+^M7df;n(-yG#H_iyBh#`$a%9Wyso6~@Q&2cPBxC*tIUm7CZu_(>E`V4c3@ zxNg`cc{XS0@f~s9i~Zr>S@XVLfmzL7Gn#lt=P&5cSn+Po?DJa7Qs-J|6W zLmp6dN>sfb>X<`JVqI%}vk%mHi1K~0`ze2fjQSMUD_f`#DLa>$Q+ATp>oKM@DbuAj zPr`n460m1%7teD}elx$3c9yXlb*`6Ac7LNeF^Gav0_%lwapEm>$M z3+=N{%B*a+FMjvNZ`P1=OPbH1Ow&s9xzLZWKg&KzYr%YzN7)Bc0@U*i`(SyDeGq%l z)gts8z=O~5S=tA;05)@l%C$m#NXOc|XHKi!)KSPfKtk6C+c;kM>n*W5GHH?YXVeAC zmM}0idrvAl3mASDc2yJ48^Zp9>#rEsK{sLW_eY@*y>~P$Yi%*#)xJChOX@w;XRgsQ z6@tW4z=o^_U}V+vG2%&RH!Za4)3s@fVvE&UV_Qovex{eh1cnk1mOVa13dAlgb)Eu@@4_rt2EKU#HCH25D)B}_KoQKi_P0$0u>!Ihdw(+q|lyB}pKUP2G zRoZO4S5v-G_O`{FluawzM|S*1RTny1?M!LMHV(L?4{Ir96mnmIp(}Q>VjpwS8y7@C|T6L*)-hwDyYFV5v_2Ey{ zr3%ORnIHQAWHy^VpS5azDs2AO=~L3TbzbDm@iPN9Sd%oVQ(fe*18lqntvb~@>QwG_ zbgH+5w1Y+IRLkOYDh(r<&%8aIYTh|?DvX7CP1QW@4+pqy#6HQKG+RJZw1Kv4RC>~h zjVku6txlD$bSmh{UvE_USM$b*PF1gUs#|N6o+5RsdqKB5p;K))bgBZSQ;jfms&0Pf zi1-Ne4sMU=RHjW*r&_m&I+d-jidWu9oocMqsUE}kTIgU(r|KE#tNL6=8uK1Lk4|-r zI#rs~spjeTVsxrH5tWbG)XI7q$->!U6+v%# zUM}TEu{~=@PfJ#djpxaWvo_zKr1HMib)N=c*h}`b(C2Dx4$i0eZ z$N^5F=|hWJZ@+-Zf}I0~NykP#E{H-gVmPK5kfEZa1_D1QX)SGOqMQJCm5Zz~E z|CfHYw%jv||pTk;ILm#D#Sq0h3my+bSBr98$3v0r*BhQ;&_tAGbZ4{m> z@E^u}EEZkck-2Z_k8IqNGJb-LIpMrqQTk{f4JQX=RIaxh`w06T4V>ezUU1qIr*yf5q)ti#=3t) z`}*SM^$~sXuNr5G?@C{MAzH>?+U|LIzVbhbsaXcRG0x1ZQ#$xhSrX(qN>#p+Jt+H) z57Gza=h6p-a`y&(#!df|xzYY7W$m4D|2(2UCaE)S^LoV>TToPYt?UGgnrvwo^vi$`t9w0 zo;irS?DCw7;~5JcwkBVmbFP!;oYnH2b17xzIQ@2=)NdzHzwHz`=iFfSR2_zXYrz~K zw+sy$_WPuW{XWW{WG8v!idb3m&z#pIvHGTey_74LV!v1#t>5lkZ|Je9fiM1|aCGl_ zB_A0+Lfnh!BeXo~T$27Jv`f;zFLb828naz`(3(c*e<{3rXjbE_}N|e{<3(Y(Esin zGHp)rq^aX>ym8RD(u$%8=w&KPc3x7VNqI7m)aZ!2c z?RVUnTUa<@>WspY*|TrFsql`nlF5bB%BD;znKSaHk$7v=oi`O#404uEEiaosa%{z* znP{+V`t4(HC>&eF|3{8gO^=G)i+KYN-H3GY|kQ}p9}eo*1q zX@wMdRiamdlYv!B+#a6)w5aa8a6Luiw0)ikoj%5B=sE+dP8)!N{dw{pL#BwYS>huZpPm+kYc+ z!FaB(x^ZXO)KdHo$Nf92Ka0y|JEo4yFPbtXuWWYdxKT5vmdN)j%2aFYjla+J+)uq; zQFi;(3Zv1eevJ7!Y{h)uHd(Htt$bqHj49)6WmBiju}zy%J`vwaZPQCDW=x-It0=Qo zOf0pPR}_^@7V`h=$|ub&?RTYZ*f86Tw%hM?j=cL$#~n7u*s&w;cG^ZcZogyX2ms2) zen!rohDAe1k!jkBXH?iGl&SlZ$|E<>;*U2L-JYIaOk=V=ed9chJybNgw6J2vv?-X! z&9)g+i;Aa|a;7C^Q)iY=udsoFXFOCowW6FC@0wFFv23bMF(6yNlCp=UO~T`orrO4p z7SEVqd#G$&scn4Obep2{@+*ZvJLO;r#8)}4@mLs=wgLab!bwvnRTLKXD=(cg{ukV7QoN1-y_=%;SF>PAebU^v$j>dRx_{Yy{ zbb~~Bes;Wl>ddmqdek@D#N=6Mul#1CTQO9BoA;Yx^RlP^MgU3=6#*&BE5_Y?GvDtx z0<)Y`HiPg3(MG|srF1s#T+=af)qr&o zU%4jw@rl#Rrk2Tzr4=RD*~XSVRB9`lQBj5=O)BZA*1jBXDln8yQ?1y*AVCZd&zOXZ zrL!xh7uhC)mEuv3wwQ<@hdZw0tcj&lZ3;aIT1;!Y&FGz2GGj{VbqXH$w81!I)CSpM z{b$GriVXbY0%gn9E1xuBY7w3)waqFj@7O`oh2A@CrR60>)36}prH|js%;ctH zn>CSyGOY+u1w1%`sIm9E@k()BYd;UqD4H_0=po(DPip6;D@A_MR1!rQKslwXXdEUC z9>8a(mQB636||aybA-rIWL6Xbd)au!<}d^s$Dzg*p*O$*6gsUOYsfW&WCU^(CDW!A z8cp1MbM!A#Vms6tHx>iV#=s>-+8!#!f@@AtT!#O+0+UK4B2EHhDZ-|&A1b#^Dp%t( zh<2#$+zjfWGQdl7DcNtot4Occ*si)tQSOzR;>YOm5RvqVuj|0SS_-lv6(hFoAgCbfR$MyWHuU3e0LHxO${^f2E*FE~3Jhf~Vdy2uwvXYV+ z)2Ek?1Kvh(mj4XS*Ri9cg51-lfA`b(uhb71jMd-{k{RM3O=^eG5>WSC-!WyKg^e8fYGp(HrzKnvoG>%Qi;Lj{3pN)&omOLLQ9) z3uKs)1r}(GCiX5>A+@BU%CJi}-9SS~mYpIMQblF78)`*4#G?^yg`H@! zWJ7kzShdUS5dM7Uf6xz*?bNuolGOdGy3haL|D1ErJ@?*o&i!f2I%AB-5W*uJ6+p@6 z_PKNYdR=q#jXO6{Vo;1}Kz{p9owDaDE?z_-ayL85A64p#uWGv8h>El2uI^tYZ^u2W zqG_k)r_POLpdKi)KU?kw76kKv%9gJJIRobbnwoVy24XEOF1A2r4t&P^oom)}H=)0B%sKl7t zRW}=MV0zRDRuH$@w(C#zr`9(O?z7fFv}+ZYD|Tc(OHHId?Qqt-nWxk=u5X~t2&DIj zsHCbd$huYcK_o)a6y0ik=H#f=*S7oYgSwh;Wo@4Ivg6~1K971&>WnJYLlyExRej@4 zcvxE~TCS*xR3%F1TJC!n2iT~f`YC}_Q5K1&I8Um_AEuu&JdJm-jFddC*z0O+ys12= zAj{~SNf8e1Hyhuf_L90=b+;R?Z=P7RdryOwB8%@(s)ydXZ#P^AJ+EcY^1J6_^5~^o zk=Rm8aZ3$S?81dQwQi|Z`O4J#x}xUYoM*&AEgfx!!K@s(zfnoh4b9DW>W+PGIlY!A zrybi|N5oJAU#lxA?wk~Vtl2y#ebyOCOCzY$;b)p!ly{AEp2?#5Q_eCKjkmAY=W3c& zt@Khttu}5UJ~y@8-cnTPcBn`2f`Z$1&2_iGU8mg_zJuxgje>%zvxp*i&{9=%r!|T{ z>iLa^n>RJ4XXKTebga{pio52{yY+Z@3n4_!#G9q8vJ+zSWld94-7Uq|0vyP*q7GqH z2{+uTtJM>GtQ_pxJDwVg+mJTLs02QD{QoLNY;A| z%{ZG}$H__!J5=xM(;H9i0ijjA2=^Xlgys<6k7w*q@8e1`y8p+*! z--!i#@cW_`hyL5&CJ*; zXs4C2$scaW4up}8A8p847R2U%w;@-8E`xS}R{wZI9t9l*4S@#6H{|8LjE(%v=sF8Bk+T^ zf)0X?flh+1fv$oMFK)UyJ)o7Jpg++3r|>hV z^=~mhpqXp%OETJjxgiHY^Zsr_UIOh{hyE1CI{(*(+yq*>0Xso6Wy}w?Gg+>5z^_JG zo+Qd;*_w*yCRxq{owUkwCup8cmX|?KC(3ep8vJXQ;ArBflF3apWtiM;l4?0)?e$eGpvYej@xvzpBbQyFI^z_$Y&o1=mggv13 z9@q(*byk);cEg@?&%J$&-dDsOS0&N1VeiQwGj)I0j0~g^Ztn1247&mC=73c#E zG(c`2{P-UF1r6Pj9(ndNbF$9L*_qiV@;oNoCi)Gm ztTVxDd$NKjiKH{4XS+2u@_yxN;4M!$ciTRN^HU7Z@5vSFt}c-J>8B&g-KU;#oc-$; zC%@Xv(=JlKxcqGKs_J}S+As+J8-a%F#DZm~95 ze)dWs4j6)l+f-^L0A80p_*3758UHeeOn@RPA>v zaMX}|Qsw(Ba&Ky{pY2K4%!jV}vp^!a(PML4ZfM3?pcZ^M{l@bycFQ{TUjV$ZJkalc z_aO-~({#krXRZTmo_xJ7}=x|)LQ>{N#`?Bq2RGy^v01^6ZvPOYQ?!LsuE-Gil zvom802YPM~pRQ;7XI)Qo9zNpJ29>mMvobMDj7#-OkD^aeu(Eq!n=9^qa4cB9CGGs% zEHv;^6x90bPD#YPw)_z)2fzIflu$$xYUF(a0w{D=6!EY<0_KmDm$hr z73EIb+D3wMSv_qoxjc>h(Bvc3m6W5y7+na{tO`Dv-crwPvg5=MxRbMEWLHdwckY{l zpVal+pNh7&RhUH$PCizq{u9V9Sbi9&@w@8%QX~DI&&QX%)MF?6Uc~8pUS%4)qT9*# z!y*IR3{M-1v~+nMwFaujuQtq2nwqJpHV@l-dm9<=+GP=`4VxLPl{TtW)^|35b+`ny z(QJ3BhF;x4uXNHMXNcu4*}H^iKmKb@hERAir@h52)*?6XEN6LVPiebG?QhY~Ru7xs z6{6>5pGl@-%+6Vakp)jYY?<4uz^?W8y`+1?asq4(hZN#Bs4>Djfk!28c853?y5gp#YnOt z{G-*Lt`UY(p6yfL?a8&%*sb9e#cew3Lw)(>>`9*AM;tKyr%B7r?zUvi2`<>pq(yHV zev9&L03m7Oy_z$S7+zoKcH7~nDS0zl*t_jh_zkf6UKiIO{L3B7$CfC?B`Em{(aZrJm`MZrHfaET1ao z+#h+=Q!ousmjzv&D(kRe z4EbY|g!jU2z**-cI)cpB^h!*-{zp5hb=Jcu=;5KrWm;Cy+JQzz)3`Xr-#GHjTt7pr z<=*t0sh)hLRI%Do`q5HZ?Ul@T8DhlapIo{hwO@Q`;!%PB6Z7T(?MC5EMSCCUg5YH!s1pgQ9VrQm}IhigA@) z>>2s5TO>T--P4uLV)pLri?fS*CE3$Vujz^sHy64)tJ_4a>cLV`>_`OUOQdoPo_X=wg5a0VVNZ_~t zN7Mf`<~57;$y82QJt%0dgTwl@FW=d6-jQ-KbS3NGp#TmywC)X5eklEeXc&jiO4eLe zrSV8DaHc(0#HG8~{&Oi`WZ=SQwOXjjOz-HUA`krBMJC5STd1Libl;}f^_K*Xr|O!X z^Su%N9?+;3?eN+Ln%nwp#acL8N1Ra z9I_qeO@lzuO;BxzJ)Z1G{&6LIb!GLVPLcOmtN10uG`X|&QkXbD3OK%}h; z+ce{W+f|^+uQYgO*IB3NpBS-6<|iD-GV?o!T<|C67rt2oQ8Up(fu~!8{s>(j`fXF* zYN3w>)vu7$rDZwKbIv3~s;NfodNiv^3^Q&VpWNGjw7CJ?INI1p=LEi|kl4sVwkVvP zJm1EDAb6gUyF&n!P_uB&K-M$e8YyH-+&^T9k(ABuG7Ot$kwjxwr-7 zW$#R1;JKG)<4_W=i6_i=@}qvm_J;Y=u+c*;Xg}|^W^6JeZ=_@&6(CM#u3C3&u(+|% znBOv>f#3UDCFkTE_vU~Vk;FFYz9)JEytK1zaOH?2W8{tg)}48b-iUQ*cVm_;o_7u3 zF;gQpP-+zA{P)7fZZETBYs?IP=8HatYacAEv~ct{O`?s|kWvRjG=&j7OYELJ311Fr zxHlP5E|A$@Ukil{AWgBo6)gurc8+z@nzv(8R@NL(wH0SinNy+kZ}rty*vW*6m>f$& z1VNpCls$VK2zA)k?9dk9Do?1r3`xUpTrF4mbg{xwOQZ%K+`eZbJ&c}tPzB$X^4CwL zlRHV4x{Phq)q1F`)0BMMdh@un=Y$kcqBpy*jGhK^aWN&2Us_7U(-NLiLPak1LPq>? z1V}1DHI6{_O}$<^hO{r_zAiJ2vKO4(#Zt^AFja}nF{xJn#(;3fKtsCzgw&b5J36>K z$T%7%3!30PyGW@Ve69q-JMRs%xe*@*iv^9!o;%!?*wOgy+#5bSZ14S)Id#D^pi6V# z6^Yi|cnUpWsxzSynH;&J==+zzyJ}PztF!rNLDrTj6}sjkvFOAdq+iBNX2`6YDtoRC za$62pvON{irJr%l>KqtH?)~;VDB-T2Mxwc0)VdH;)B$2m%Eewcq3e}$MBUnJnBn)W zqq5~2UaJ)z^}49aWwr3)dU0s#7-O9-20^$f-kFF=;EvNl<>(^-@9wD{A4KK}8~WfC z&PW8!7N^|MRz0{iRXmWN-B)pOeVK-1VPr1AXtzJ_lhZX;pBS5q1?2%p`7j%pZ zX2!a+_ou!(KJ+z6Igk+q^N8%;KiJvOW#0I_RoEIuG>W*lgZli`pLUEE=c1pmY}$7T z*2TPre(d7j(+k9WXKF&{&I~Fk{TPI`)QL{C;2@4;QF)IkY*#uhIxe zlK7i;@g*14Uu#`b^=U-*<`m4_4Zoe(uiYSh%`vKx{wqoMP0nTD)C{$#$+~+9id9ez z={%)Nu3R7&7en44PGWA@)i|XEl8%^5Gv_Ve`tXHnd51U$w*OKlloBRbA76h8BoT2W!8$w0{$fN@%aWVn?rUM$(p?s z&c0l(8Z+}nxgn>m9NvI)ETD^u+`_qs`Y%1E-bV7W^U>$EI(jU7%Hd37;IQ8qkE`!C#%`aE&zV$Ag zh$>%Eoh0JzKksB2b;ilECzY9np!o=B^~kTLPnncVCD!-u!ybo41`uA^UE4k-q6B%| zx#S^(Vf`o(Mwnv%M%uM*@^m?)T?G4;bS_4|Sv5t)!Q_ZzbsX&2e0H9xMkE){pNDcR zTt)sdiN6|%M|-=yRGB}#w!MLaMtUQ=SQ6_oE58?$6|nOr@K39nI}G`J%+CxYAE%;H z|Naq+^5={eeI5+@Odc^w<;_Zs(bcVvCxX@lp^(#gb{EE7Bd1*G(O@+hvLAZbc0BflaD*ek#`Bj+o zNi(f!Ke9^VuA-TPt=%Q#dZJG(&y_J+UvBdE={z)kXV%82*6KQ8uG_~VE`yM@y|dk< zYwUDy;)!?CozMGg8}Yjt>CS^wi^Em+@V}i0f95`wMN?!(xlUNCI@5nUxl3VAVBPpl zIAgd0#Wv_l1sB(g(S7<>8szv%B$##4PLH@T^JI8m1Ez*wl&v!YkIL+5uoj@zLazcnT6H z`S|OR7RTUR$JYGjs3*}T=Z%CM%fjR~OaC(5L+hDen;0D$jwqbxZo8f7>e`1S!p4}y zLdl!1zi2%>Xgh`;G+h>Q^i1@D%~lynhAjg<06 zcC(mX(TElrcj_v!<}OMF(e5QGuSU~w9@u`EA6rA^EA8zH@pGPSGjHlF{Ho|wQD+6m z(cF4G%Qk#8q5~sN#aO&r-I7`h8k$d>5BOE$3jP8WYYw=}Um!avfL+_{G%KLWv4yCB zISc0Fy6xJQ**ymm(4HaP+j96$CDsB;U>5v$MrR3D_kjCd#}6s1C7P4k2n{CLPLVGL zIv)-?H_uGHFHP953=zpVlc`?j?$3BruDTpibkErkKQ45p2jg-B%&PkVJ8|#^PhP}O zVU4aF!mRQK$XbwdG^^oMR&~XH*C4k2-NtG!v?sT@=W2+|wEx#wfc}~jw|@bhdxJ0R zyV>Bi&g74EAS=hw@)X$j-w!MF8`q}M)cK&_TTtNAbtO`+B|*;q6;!f?VO6IV4$d8l zQ@c-pJjrn`)hV^s6@7|Wyh{jCMK`?u6qhg759iC-(7LUB+J7^AS7#D@Ar3S^RQBY! z5(O&g_>SLs#ld(C|Gk^ssYj2TmKS!WR$Uo%bUGks|Mho89>T3kT|g7}L-%La_kYWU zrZwUKa)f+lzoPQsn;D8x#2oMLIQ{t~LMZ-5Z7_1nQ8><_WFAR9lAA%@|Do7&J*SZS z8icZrj$61`Ogh63tSkIPvGXY|p_Rk3o!uHd;$QVu_UKx4TjYFkqTp>-%x15Km$#O? zt1id<3Zkrbl3raHb#k@RfQswAaai1i(dDh*I9BUR3*RQto4H2ORf;Mbr8qBE#i%-e z+#E5r@(UwPSo-N*4z6JHlfce6->gpDAN-b#qHZsKek{=|1*lLM>1oa@`1_h5|5=wz zH>1LGNwyrn?okj^0OQj_Q6?IJGCqf$E=^TQ^KmC1^|!P*1**@UU#%}QkHeL((CI2%#K%#_pNGhJG7OK^s<#E zw07xzOk|Ew+D(vMo)65b&lpVAa9(~L;wm)rH7xqO17VszKFD}vaHN$@LYdb2?Ir_` zaXhVf^_mb|1P7lId9UAO1AjNyck>Z1pyx*YWq`%zC`ZNkmK3V z9d&gE=ra~sWemQXS0mjk%{XgStNez*8roB;p#X>dDe@&JJhj&!#eU~X+t@fgi#@XZ zNK6!s;$Z#rcARc4*sda$Y~s{IcEBq~_6GzYAgke+2ko}9q+Uu|cwTMbcqQ`D>T)YglN?4HYv-_o{lx8Ysq%3X# zQ=IBx6aq<|BUtAhz5G_n$y9gT&+1o>xhG{Q)e&fUh&84Ux3rqZ-Q05gXcTJPe=c8o?dU!&l}dN+^?TbB_+qPmv6~-kxbPj{KnB+P8%oI#18S8 z1issD0ZdhAgdNk)2zj>J>`l?CZ;ldr%SFubVDHyX8p1n<8$PKd49BvyXj&c(+w?d@ zK9*19AapqJgsbAvcxw-ylm?0?Kh-PUE8%gFlwjiEoiIv~Mi9-N5i=DD$o=L>Y4A9v zIp1Zj1oAa8vaJjBM)nn7&Ey%cDva6(id{8Xi)U22$^MLaZ4Df=>t&S<`ubPwEcc$< znd8Z1O>!7vjT@KwUxia#yZYkyvRVBQXXzFo`rq|c?Q@!(rNISfGKc3YId5O!GhbXP za3_ti5n+#c&Mk+K4o)##>9?MK`9VBOsO!czfA*xu(ep&3xVZP=dkg621Y6y5Cjd=q zXDiNHzpVeveAI4~${Y4(p(!Mcur5FfIUC{}$OeS=N@j1V1sJXJCYY}G8%}L~o*`e? zAXe>MQ-cQI)6#j{NPLk==!elV!7^Q3(9J8I61%} z|CoFfQDl4YQ6=v=*QWtH|4-X=-g;8qO`k+8RVzQLk2Vy%AD>%uWVjN*58ID?Iu74( ze?${@eY`DJP1mmUeA~Il%EE=5MH7}0k8Jp!XAzeCSIDC}S)QV5wKCozuz@jrsMC%c zkiPVIB23*mxv6?lF*T$?z^-8^Jxc*5y#o0DkjZ6}Ba)xJEcxloL}ONJBwdKJ@F($D z7)L&*pKu)drq@I-wZvY|Qqe-%=of^1D;4S5a9V5cu=VHU!+24m%yyFfz zb>V#TTjufG@Vc{igaUc>oynvegDms~q+_Wpc%LY#o;hl7=`XS02UzF6^iyGR&2{Wg zlDI8%92+}}X^cq`SXozl?%A61#pRf6Hc5tLvE18=N@Ic&{6{c(%+Rb+T^u>YRTQuP z38-3pbl{0oRjVt7r!hNuwnMS3ZMUl~8&HG)r`v@{K=X7sAAMz4kyRIZbzZ7R4!}89 zxH|~2ljE7V58Na@lKu_Mv{Jx9-s>Izc}UOu>#4*QBfkAvW9rg7@*Mw&O8c`fvKbRy z5m!@xXc5~oeQbD3aE4PJ3+pgs=i<_mWu3*&7xKA)IC}H%8k6~n(|hGo=Np}g{O$*o z=CXg@$GO1_mu6C>9nz74L>)avUB|fHW8VgE;Hu=l59PO7QI5PBLoBI#GhbIVr{V;) z;D0EVm!5+#Tr&+baI(P4pdJ2?u8fj{ zMrk_%-946*=j9{P+j_VYdqJ6J1(Q%8{c zrTz%|5%F?a{ZV7wlD??@Q*$rvJUTD!zc0RMKcIYL_eNL!P2MOT6YPU;afpTyI}GsA zFW@Dj@mV}m;y|>&Ce$raIZp8p%Un(g;b}Uf1aLv$w~$4@b%;UPcJAH4!9VWkB@3JK zpRVg=&SDj-%aNAqhwAd*WTG=fE-X#rJ|eYWX8x#6>3h#Zb)C4Bmb`UPkgUa8tNjRv zNbp4W^t^CY%sv+91P)7Z-K7@N<38EG3`^H;rpF)4k7xq`XI>r*hfGv-5^ zD9>Ltg+PLLFkHJc>=b=$SvR8Mt*O%0ob6%JAYn1`(f_n?I_knXk9*f@XP)PsQ>qh0CR|x+_{=oN zP8;IwI=$~fJzgE>!q-jhwC7&;j%4$8wlvm!*quq|Zye1^kXfNWpVRfNxMAbnVCvkR z`KRl_B`Dtr43D$q*EhUdu#heI0%I4%3g<2%n?j_06cA@MxaB!Hs97+(XX6Yqa^eFk z-}z={^Hx4M415L-qQzW`pcm}nY9B507gzmFP0-*EU^#r2&pvP7YYw-n4c3{B2XTnS z)voQ{?xw8rh`?&rut!4tQ;Y+y-(UQ_)?abY*y##5(mZWG`Ya|UydK$F+Ec&kCFT^a z)BdS#xyPO6v&g^p!YVST*q0Fdm_`vE5$@)yuET+opVx=emZV$6`*80U_&2NUU@GUxt4TAg@!NVykf&eg>EFUe*M(&jt5UP#qi{J9 zTcJkQg`G>u55;?c+N(i5$tGvT;r}e^vFA{W*zddolS{jnXfg92$`(6fW)dBg1u z0r#}IdiQH@&Z$<1x`*|1_V+S7+H3XYs`vl7UE6PNZ(S(qS#ftW#hOJV0$t+@x!A1i zx!1wa@WjQ*?~-19XM?L?)5OJlsjA_FhCmf=a3BU6ndpOJuvK(XN{q*DPj+ zHg~!@I-ma;*vKm9U?C8`c>TC&;@%Fic$CvnPSM8m1fq8RiF z47pEqJ0~V8=ZS6kO5%#qY7{kHXK;%tasm_0P-61`L0X`pV4waL)M= z-n;TOhRYsSh9?b--+lcWPfmBgy-Mw4e#I_I^SW^0J-Ie_lUL#|+h#G&h?jgUUq5oX zm_LbHXig6OlK9J=E$TPA+uknwc-hu)dhz<==Kf4bHlS-#+QV)lQ^I5SXITeKi$6o1 zJQrisSKbq9ZxoStLd_?X9>e~7Akr7rUHvxEZA%gRyDI6=o^1IvNeoa+PSX*UByoIj zS~O(#S82(|?%wpmQAsREG)!jHQ*b~?2P|U?wyjN`>C_3LxIx9Hd_8M8gn_=oHoFS~ z<1zjRd-~&i?{7kRbIybdE<}5thhD!pjYvS7g}UGx;W`oj6p^LfFuc9?zjsy=kDbEV z&KivL?s^NU>4T)>8EC<`u;qn~u~7DDN*k}vfJ6j!VcCa3g;0rhah zBQy&)8Q0h-bMOUdXrBpJ2mzD1HHw29hwk8J5MMDlzfOyWYtij7g{BgtgW)tO>p6&e zf{nP_Ybe44N1mZ4h#ReQkK&TYmw(#BhI=QBmc#88Krt{Q`imhc`eF&jme&D(vhfplYy>o^ zm{tHb&&^q66i2`~a1}xYXXR!;5cKTm_}bP(Z*E%&EnoC#y`WeAP;_R1t9A)nC1yQ@ z;=gczVaK%1s6LC#xb>6kd3S$D6*`T}LMYauF9X9m@~Bc2M-i$&HHjDF>ld<1TM@((S0r!kOg za>wE^%cUXTPc)|e{-(cd{>Gyh$&$KHdv55SBBsycjPsgZ>z0!=tKTUBr+wqZ=Vci& z+T!S+jLPPA0hwsDr~F)qP!jenoEG7#Qs;#T!Qt-r1S<@>P&WAiEDK z=;VA^&fXDss&AxxF^>+U4`%>Is-FYzq8Zzd{0l@Kqh1DSM|)Oj>bZ-T?N|ifDRu9k zjaG;AhLNThezrP(p+||gUKsl%>+!QwCi?8`14d(htu=3Qfzj8#a7o>6kE+155gWE1fg}J zJ(UZU_f~lz>j_>BN`LV6@{2M0-TOBkqO$&|69EK6x3MQ`d^8z!I;A-SaOnzQD_ApO8UhF*9_0Yf{{3zT zZXi!rr>iGwf%3pLVtUeBD{7*d71BlU<8swNeJ&*2$|Y>psz7TC<#2%FfOK^N^E(YG z{qxv(iyXSZ!YX@r?*ScZ(N~v)l((qYfP0CT3Ua0bs0JUK0GNjkxIsucAd@EF6e9wm zN3$xomRZfIbyHp~&?pcRUB<=dP=fmD1ZEt=r}@-lr>m-?r+Gn?`8l_S(IBuk0DM;* zF@4j8{o5&*)&Bg6X?}6HEF{o@Hk;}xm`z!xD%{dAb#O|-ClGCz{23qtjI`9EzAJZgNT*j}c0%b#Ba^&g>|5qg+v%;i zsjZ?2f2+lPn13G}Jkaj|(;C<>ZW}BaY@l?%GmO5arp^c6a4Vx4DAB)r11bVr5Dll;AFKd}!rDDzNq5){X2LWVm)S}UCOc3ezfgimW0tn1JbsF zLI#rwjCk=I+yoz8+SF>?SyKs|&wh>uQ*gGmpJrzXAVP}>@;AgB<-X+8^Ks7~cw(!P zLty2O&l-O6GXI`FQoziZQelGR(swY8RCFQZn*b^&r>_K>G&+xh=uKu&UEH)=@9)J~ zH>CPlBc#}@l8#cabm>H+h{C7iXk&6+p9`SaKXN5)^|>oS3(ktIci5is(#GEHB~5tg z>$iNnksze^DP)5N9P@J7wqP;A;ijDcP#}{W)o^SKCV=ZNaloE%BL#ibs;S zus>WpPA3ZK@Huy3ii8l*B^#QVeUv@VrK%d~xCws(QL3j^60(yx&DrDRBUC7Ax0@i> zBiH2MV!QjKLVzA2edls3P=SgFIuof=CeA(Nfh5%GJHlev z4R1V>YBm$T?&1z6eNU>{x+^TK-%Wn5p}L`IB<{$&E!SMKeU&gmRoJh-ZQd+w6uU~N zw8K(0lF(7=v8%lsIYQ3J(KqQX&S{^ND!ZL#>My68AWL2Q4j;PE4QRV`|7 zoY&2Uf&!|8d~m2YR-I=2tSEW*OmNZQ{TtT{HBJw;z4s~HB>2KZo_m#2 zk7r6ixw)+4N!VXM-BHk1dP_znJ2N#iCTo}k@ha3m zgs!UXCZ+Ig7XiGV5#%X$=Lk2q*aV+Z$LFeid}mv4Ju2dDKq`(0k;H>YoS&bji7p86 zjGm2as1Mmq+`JbQ-f42or%ft%t@IUCD%4ewA}}9byW_GCsxfJ zFiKZHJ-$h{$idVvZ&+JrG#&8TVbhSAb$ok&GvS?QTQ5XUhn6Tg(Q}mTZzOjQBAVA- zBgafIko)raKD~+Gtn#mLbH=bGR>#2Qxm7&R%D60}kHN==N!y3AtzV1N3U#>+R5vj! zRTAcQMX2NPSHoz~>t)yph2b`8Zt;k8t=HY4$6jXZEvN!gfF z_c?ab>boYSv3e8SIOVVeN1%=Jj>k^>vWsZqu3MG7G?o(W!X_nZ-*c=>aU1U{w4u$( zB(P1;$^{Vw_$aE$3il{nLxdqQneQd5J4&|ighNBZP#QRz!v0a2ul{|W3ExauW=UpA zRO7OLRoKCil};u4h>B7h|#f=`iE)HwjLid(v`5zexV$Qz9v=U?Tt|r1xGnFsLGUU3B34JXn zQzkjG%+C!}qm^Ah3OwHYxl_XJrJiGyPO)L9NmcX-;Z&2NImADxXxI->VTh3^UK0gA z=AU~vomp42;ka&ct3zuYy7GMAjg!FKeK;{(Hj@7HSoNzFIwhrxkx{is5s6$yf`~cK zkjKME6p3o1dv8U?j%ag7gX8JJ8eFB^>?$0kKYRsp*F;n%-UI@ZMg7(@dlJ%&ZU!Fv zXDRUyS5kFjDE+vdc5n?od_8e{0WlMm0XZ@5+b%94qi9A7|Eo#-gG(cZGT2gWia0 z-wun=+xn4r_LJNn*=Mtib#*H&qY-=jWHjdTL_eSA4h!gCue}VnqwiC(d2=JJsrhBR zir>NEPvD|+86?wB$lj1~_L=bWqH>F8Ya>?Q=BgdHCv>+p`*e(Sav?4oKYohi4|2Xk zl2VNK!Vfd`jRe`$`GhU{7H0#TA={^#1-e&^3EgM9{IzNm5~N;ZZ0` znEH1k>G=hdij3a?H?|mbg^fg`goBS+$?i{IE>@f;93+L6vIqxBf$ZL@NwF~eS(~Z* zKH%!m(@$KGZaChMaNCn0p-r+ZRYK0e_brJe09Jki87eYO*V0P*J;-y-x*-IFH^ylU2# zt0E)Asp8}3a6S04IpSM%+S*xrhx31WkWFODMYG;UvU42R=K5rsasHikoc_r)%=(Nz zn^WVhrdWlRZx4k^MH-b14l!X}1$kG34nI%XcuPyT5g}s1eV#S*X=5SoB1Z%0?QVy& znO}r8dhC3}uWET9a+F`hw0hB)RJ5bTGuCN$4fn@nzxTJm3Frfr$;_y12UgN56xz?m zv+p?lJNw-wSs?;{LUkH|Ue(bT<%{Mu##N9uR3J<4S5km{Z2<|#dq{%+num(Mo8!+L z;j}GQL1+(a4RWDRkJCN&c#u`2-;-40^v~#6Y-Sje<i^Nx#NTz^yv*WoEZFUvdLf;p3sv!j;!Ul0c7mv`z;n>g>T-3V zT%pAx&5@oN_5EnqlQFJN5wDbWJYiHvF`2FElmua6{L-*-a&g8OanP!1z8kk33Q-s@ zMYj4@s7mh^Ld%`SLN2d}5{n&&KTg`InFoQNs^_P9{9oTYiZ^;v^5OhO6sgt9N_F+Ki_<> z!IuQv>IXaeF9PlR<1>q$+Rj(VBJ|NXBBrd}Vo@qy=x7fFx#?2{nBim}?pn*>FT*Dw zhc^Er?1-07{ti!MbzpZHFN67Jkutg@b?46JA8?K#ahq zYt&d=EcB%jZwxT9PsBxzPUgN2H?VbpLh&DP!cfcS$>)H#V2WTG9RE&Y-+E23sZC%I zwnOTZF6U-Y^~neOKn*tKe^L#!{!0HugXH@D;jaDYox)uk+FQa6%o-r*6D~QYW<_y3e1LXRU?rT`Zd!!U^=%z;?e^gFC zJQpB00uP{+4c{2G`bT5^PZJ_0ur_)=4DYzN>NTsF5BH6 zzHV?)p4uTlLHMkZ7Xb5#FCg%^2f%#r3tumI6z&DU(Dg0~1^04f)VRArtFH^P@!6%fD=N%6JMle6SXL1<2O)dF&qHkBzDsiFh)1QK*3%L{!HkJ zfWWVF$0i80Rq*@9nmrcHbAXSabo_=8<@)yxTL5eT{uBV4_#)QZI@le(f%=@a1K7JO z%CYtp?Ck`=da+>ke2OdVedQpoHrS21+93@9&;ht71ZL3^vjG&84%y%V>|J-!T8YEr zVg)myVY7eWPE@=T5I6%s(+257xgNCvwe_X$t+vM+C{W3!ZJ_#+6=O1yh-pS>4m=Zn+Zdm3@a z7>c0?A)%UxBfqjj#!+TtLt!#vHV7d^b}flmq^DNctWpTBO~4r;S~k7{y~X?;p&Qry zC=f)7N;U*bp==_S!o6!F9t$Klq@8L4#%(+RZ^b4QE(nnugyT@1+WJcLE(&yf+c=_{ zxSNdu{=KfnR_he7(fTtKxY7N1=I7pY$G06YS)hlZ0v`0SA~fNTD>VK}x3 z|G*6Z*a{Q+FP1$Ow&1WeHUKy055!{PM@e9de5eC}A;SVhEdtCZvDGIg*b%!i4yd+A zibA^kumJab-hhU_0I@}PYLC4vFxlA3L*qbv_)&2HiaiBn4rS&LoB#mphd+Z93v|S9 zVC{O~e*oZQEP-9|8`e}4dsN2&aLj)xmc%lMB7i`9;c`M)gm#cg3#1(+RUi|}()M-3 zRREjfW~#2}4cLDOF+K3T|1L~3$c5XcB*fJo=?}rgY|v1leIQ~&DByopNC$xafnQT% zbL=D7@qGg(gnH478HVQrpyU5t97A8+7(y{oew@QqGNv>ZUo8re>myF2XHlG%SrroloC1opx#k^dzb^aGn@P&hWpay{7V1e3J!8G_{+`*8ri z1CX15U&J||XEoza>ePSmLdTCE{p2Aux>pI-5{RHnU5p_@2%4YNrqB8muX1CAzXe{u zT(>RrNw^A`utWe~QwZMLPDN4Om?J zzHhKAL|AW_P%L7BGmK_9C=+X-ie1XVimP9tqd_UyB?7EXHP+=(IL1c-p@y{rVO8Y* zBS0PN5)ZrVgcX9Zpw{J4)%GVjSTCkn6);u>#*CF}VAY7RYX4ygh`;>Dn_Gy)`AD9m9um7SDl6>k(mwy+$-m_o+ z8fNRsdIluVt7gFNG!|>@j}5tjjgXJU+O2~1h<|49Nq7dN&tt;^fxbgg$)sbq!-w*-(#NxKZ=rZHJS}!{tZ8EK1#sG_YB~oY6)q00joMB%D zaHucwhwq3P`)EqzjL$?ZjmMpK(=8AEoYd=ZG_M+PG>dg*p$Rt?e>tV?;B?V-ugrdt zAIy1^m0UB=1w>*RFaFFYiJpmv3TBB@IOG0Hk9L=G99{`(EIu&#B9csUQ1PQqudNNu zJhB(b4lQLiptq!;-)zP^cMd)k{)(}xFDZQ;IMSYYpiw?oDGMrB(&$Wzp5ae(sHfq=$$O4^@VhSX)y_AHGagxNX-N73Hk9UEX?=N>TL(hS zK9Oag$v=sFVb5b@&P)npEKqH$qiO3hD!LBHaS4WpGet?-`pzWmn>WuNo*7kSdtJJ9 zDZKY_g;E*}c#-8DXlwbvyoe%LF~Ez!ORj*Mg?qbiu>L*1Yn_`To$78Q0waa>Ka8v1 zum{)UKiZ4Ef86c1F`Q-`xqg9mc!%EYQVh1Zf)13g$GfTay3%b<>DaW%KdlC#&@|j# zULh5-6z7InOw#iu)onaYKi_xcOJ+QwE1|JZt(zb(@-fya()jlVR+3%PH6d50HsVY* zqkVEjl;s_R$Qe<+eQo)Cn#%^Fu8w5~4||)R>IF$wsWNvGADLb`Nf$Geuh1y{ZlB*a%J{i2^(5nd z#>Juel65lXZF$qKsJTLX|CVx)pPJGuy)>s1Sy^RSny4{)ikaND@fxCY=3I0KSq+T~ zIg!o$ejc`wV|%H*KQfC~{lsjj{_6AoQ%kK^>b>-N{pO5dQ9Cy1p`Cgf zQTXoI*21QH7_E0JA$0zjitXYtEJyG5X$GZ3u$k>^SHY@RG=iU-sk)s+%>F$i^q+6H z{uF)9IwNqiehifH9K|}xSN?32HomTB%y(-FdNnK4FXmt3w^0(m`<r1stgE|^2%bx+wp|y5c&qsG?{G^TC z^Y3h8P`VbuWmds;iR(-u$&>+(C+@xC!yO5+cb(9eh3oZsb9Zko^q~PeLc?-2TJJFU zhd6MQiCo-P>2dUV&QO;&SnaClQ5#H|E*_>>Cx|K%ANuC3%>ZgAH$Q3xI7Ve#B{hf1 zs0l{_!?_e;h~UjX=`}=~g4YuoA&M4oia+xg@${E8u4IH~+TFoum!~&Mb8&O%J{gf9 z;!6g%eF~-D{)DjV*WH5pPafIpbiHW<9y3s%Dx*tI#h9|1+EA}!lW%PvV)}T&y;{$i znd0cei2`P7o}X~-ed7fCMXcet7tMrwl(r_s=z(igfUk$VM1+>}7s_ z6njNr?E3DNsrrR|0mr@5n|0MHQX-J*ub-PA-W*+fDdZ^lCv&JO>1ygx$Y-T*En2*7%G5u^Azi#1`Rs}@8`ILG1FAGKcE$Oqjp?7x=s zbi_YQA5p4XM9K7Fo_4HyY2Lmb{q7TPpJP-}res#CDX6}knCCpib^Q7P`jd5$LF19L z!uz44qqiA0^3cqlc6E|v;>#>kaH@;JiA+dP^K)4XwQuZ8a|W~?8wcNH zxKEi!KhO{@C3Bn=8xOC?L6roWZD07bb-sWO`8&x_01{nj8V{<|wJddDPd$|~iOdSP zTz+Dl%(acbR{OJ3zV9v)+VFGWKabj*Pt{&84A+|+F)sQa09in$zrX)AuzmPV{u_Ny zHX?)kH=8{meT|;V*Qo2M>@Vq&OZ#f1J9m+7kbb@N|K#`N+Z7}3T6}B8x7^Qb!?zT# z6|yN>l^-Ztrp5?Pj5~7Ab^4K==?p}fOPC+|qE4$GX+2h84{Q>y<4Z;Lnozn{mF`oa zbib?94J)4rFCVohUPi=~W&G%@vY+(MCS@w#b+YoObe8{(&h*K)VywAb`K-d-2S2;=Bh%5>kX`>X>dBpnHk)IT{sv)9g!KltMRZ~fz*g?I`>M?7 zH_%6sy{f?2)n?7^kZ~|J`YqyYGAFfX1cL?7#M=Y&r| z)(ukiNV@(fl#lEf{mu|qdwzdbj;EIsZ}Enz`{n@JS!%c+=0H5MoACF0*9{8U&1yYX zKmW+vvd({j{MG7jseZ@dd)23#{FBd8f1K=Jb~dCJa-92tiRLYqQ+xqg)Pj%HzjU2% z)D4YwlYrfPR`ux{fb$7L+e|AsJIr``>e}Lu}lKnLAVYB9Tyzmo@(NN!l{FHR% zFUdBF`aJo8s>f@g!&KMC+ynZ@&UaKE;#>Shs2x+i=1{Qp^%(6%j#+!Ex}?51nmyQx zx|RmR?lJOh$U~0L)0O{4W7KT{<%5}|PAB_`rusF; zmYX#X8haXZ05tD&{Oq@DkJbb(wnxtdF1AO@17B{BnqiNg2t>C>)dAIJ9_hH)F1aJ@ zk_lr(D(j*SWtSY8{^Vz$s-K{k=chKd?qFw0wJ)2Az|IYB;v<&4h4pgnM zUuLbTGFhdq-Ut3qEfpOOVQp`Q{3uN;_!3R>?4AhPsc5u~g6`cxyY+jbdx^j}As)O| z+LGL}PNZqe*t}-kIq@L%#ffoZlv~)IbdoQD^omf%ps>iaop?|7MRZiHp!e`OE~m68 zomS}iV0_z#{+i+pLi(itHj($R6W>zWEjGo+NZ)1=1(b#nuk}K(;GHA7UWM*&x{=Lz ztJD94P4Y|Ls7oVnPL?-H4|e-yit{hjGsPkNKYDz#5BU>1f9WD_mRsbUbt0`L_`cQF zye1v_a|x3lwmd-TqCPG+;@Q549*^?9)JEF?-Ba5&A4-3)MU2`cOtb8ek3A5KDg)v{ zc{bvSj_C1-hf$VxRxSt1?-kh(+fPS+r{vU(?f*J;Ke#kbUy#}1HVHmjvnoTXE zDcyABo#wZ6ocAaW<-rxN#(>6H-~{@w-4^(-7L%v8jpiGZ`~_sAp{KPqxjveoqPbX?Oqw!2#YDh-4CiNYag}$Zokc`60D#FY9KOH2$ zCh|phQeF(-Vl?tg`L5q;^1KD!ly^DzKSqt&Xzse|`&Vuxzx%-H;Oa9pXPa+tLO(R> zVWdfY-(u)SnIgl^uZ#F&AZgLX4dNNwNct|(rKz8*Qhtrsr<{D_O zRnE1a)^kBt@h9>-C~b;Iz5x2Nf-2y4EBe3-!3Lziy;aW3S=#B_oaIZ_T-~A4rSxj$ zd`q&wg{4`IK9R=IeBN#f^81kVXHxwy;(PM3YRA~+SVQ`=mdLr!zet(Vn$*g8A=jvg z=RaVxsjQ?|)aPc{x_T%d!rj%Ajy%+Az9;0kqfh6!V<_YQSce)v+z)K7wzB~}-tS=R za8!S!r?gH)I#-B1=Q6*@X1dsC7IDVfkLtS{Fs^CbDr9}U0v&z)H1Y1^zvndBE|Xui zuRi{NBQJYW#4%@gQE;S-DA?0g9J42ig5SsEcMtqVeeCHijE1{~qUE7M=G{#BbFyyl0$S0+Ee?CR)zv2$^FNHPR zSJ=xQsDiKNw7PFziTFD-&j!Sk?X|mV$M5R2N9(jtg5TdL?W-U|={;=F|Tzti9R^b)=~ggC#@@gL>!-;W;u--z=w z#9xW9LVs6!34H#6I8}&G^YBacJXR*+8~%s%J9;XcUW+knj!<(Cb$ZN7>5z|p6TYoK zqvnY+^!GIGH8sQTfK1=yO6YejJq<)U{WiWiAuh}m< zulZ`(J9|r=YIyUMsxvtkMe~xWAs>w9b&NiF19Xb|Yj=$P`t#E&|MjfD4*3`GMPLKT zzGt{?a*ete>8wpvbG8GK2kPs%KY;o!pz%1xrEjJ0;I(;(-}|RjeMjcuAE()T5Wb|kdrmhcwAUhD7v$w7jL|8*afn0jf1}?|f_;AV6wTEM|6cHP<|rTE zF6Tqc?OqGDPmGlj-`N#U@Nvcq=^sSDw7xm!v>G===HJ*yL}LUh1Fe(tGW-W}szv*< zh@&ftglT&^e80i)KUDrzc_yCvcFR+J3FVPv37gIHAIMVZ`XS%J=rgEnqqMJ0cBtU! zqr2+#`Yn}ttVpiOW_?H%^5(vfyV7ZP-)y&#?K!uC=Hdohl#liLiQwu~d_!e<2yxRb zDeY)K1!|95ioXi^pn2wpP&TsF#{99(9J!A8c4v9W9@9L=@)K$dNcmL#mdXFbN#)CI z#&g@LaQ!pNM*2qO^MdcLsdlw4t;c><=491wuDb+V`ZesW>!h-`6?G<$FpGvCouoY} zN$s1_X5L2`8nq8>l)n<`wnSMkx#MIomA*555iBs)-Mvxfg2{gh=5~4S{3Ak^{fmk? z-;F4HT|&Ii^|_ie*^q1TNSz&Ik~&L!8EKCk?STrAPHr)I1|XeHOA@olpQSMv>B^=j z60^+f^QF9+KSuun`*iP7yRR-c(PMX4-(+_$zA0zPk2}2eBJkQQ_@QQV&8D8Fz@i5- zJ2s%sUWG1^UqXBCM(H&7euVNro*x(sI>%0J0lb~)M}^5#S4w+rFvh*neY1BQLazJf zdZfFYboe3oPiFU2*J6`r(^hL1<)yZ&w~zMWxH5&OuD_{?%JB^NW?Afkg-N6*ef_6S z8ha?!+|Pc{UW}!hFuu&p#dkLf=_^s(0klqeg*`y)i8;bjLw;v%w|u!5^aJ|tMtdMD zm&R#*rF}jCUfvP*Kwo?xxJ>c;R|LPm(sxVjfnT8AzmOxce(@9GqkRc=33vFY{eOaS z%WGTB9*S>A+TQ!9?f3Kl6};>2Lwg5CeKPI!pnX>3ETX;siQ9a%Z)z!gu*E%uubS)z z_|vy*i$q|PLj>-X`4m~+`)I7%Pv-Zv6LP$=L!eIkp&m(=ji`$>$hNK!yjMa$Q6Fje zwy~b(?M>}`4c9l?V?10On`<||%@l~{S(q-DK`b9;akM>Why_0qy^w~qY{aUWsBV}@%lF3q% z$!*~>aV8itndEcFh0DYq2btj8wbLMzJD5yvQ!=SO6D)WGZOc0gzNq`h9WJw}M(!K3 z*_$$i82*PSb!R^wY;@(Aq>u9&-v9ASzW+7em*9P8esujlr`vNFj(mQg@fN)=xQ_Uv z{+^1!e@QsXupRhMPlo0-N!}D^5uUYLSJj$g-*yoU%CUVG-qAWg)j8FF-AXmLMs>as zb;|b?U5~h}y3T1Wrxx$81V20Ko9cSe1GbLMsDG+^`5txtsLsQECse-322lGy4W89{ z9&en*)Lsj_wku-(K>Khz(D&}3`Ge}a!sie=_qPhW)Xf&UzJunt&pEb9yL`^Ptu7cG zh5oUD{J&Lqh0ixM(7dJ5-z2epx_rM_qO#{?pUK8Vrk#C0*eKtfH}+Jh__Xignv+I9 z)WrRW9Z86@*q9F)6HylMtD=34@c$}=n(L!Jv;sEG*mF#CKZal4oWS$1=6zn-WA?p* z_ZtsdeDlpI+ncGMhVGj?s&;h<_X@Hz*P?CEo-EoYvg4SZH@m?e2%1xVOZ82BTOJtQ z(S&s7oBM7bXGxNK!S|x?CVNJ6L4JJaMc=j+d22IUYEnefc3SUlmil7xtGG#i?=jlT zF8uRFO3gx{?BessgR48tX*I9jD|{QF^RMj5@ohYq>)S@U2%neMVJz?|Xzpy=*FQz`wx2fl*}es`a&)h5hfh<`G(ZFjt#kyR@*&#SS*ctp{-wG+3rWV%#RVu=5aZ(T za`*8U>Uc$k!nXirG|K)%l%2{-d)cWz`97*_Q{-G;XdhJoX%auuEh_uZQTE%x<0gcy zNV92xJz%US{r^GlhJQo7>wbxPXO5zG=Z}3mdUr#Z-ZgdlnIuc{na7g;LGI)`3-o#9 zV>LXWea1)-f}9-izo%JJwv)UFdOU z6)7v%P`|h6xE>qZekS+q>}}n6{5F(lOT69JCvo$d#>J-R?F;60v_RI$Ld_FZ)B2cI z*>B3L6K%Z-brwJx0eo*3_ACc@ki5Gieb|SQ)E2yCAMj1IeB6t8AH|dZNaOUrvQNGi z`7kAZw8rM{_xvWbw_1$%R+%r(1J%6?V}vHimgY!h( z#D9?T;hN#sR_i_SYkTJVXum(%h`N?hfsPYV8(T2is*G z^g2d+g4{bFhYUzYa^21u?>T!+``ayj<(PkOFt{TFbdIw>{G}ARwt61l*e>iDP5FH3 zsIre<_!e@W0pGte0AmBQsA-mRO!a&7g)Gx%$i6P$9@xy}XpiS|oaCcEEliFWx6}8v z@P#+srhIiN$9$4wO1pqR{3qn$72S@F@2C!?{Eczw6L~&oyvf%X5s&gn>3^cv8z^6V z@A;aeYRvxrQFWd``Ln71L&wyZ?5AOVZi;^|;m8}U#r*d%wP)DagL(q?Vl=RWz&7i- z0h_LS`?=snbx(Ucf=d$Lqxvw$%HoJxOPBhOajT*KWV>k{>m16oE>{F7f3`uQp`oV; zG%PWDULS(-f2KN%Lwoy|eI8u>dXdHBod(^?4ejZNE;f)pcrhlkH79yD!Dsf~kI-iJ zn9O3;hPDLHA(SO0NM}nNX!rg7Z$dWSVEA6b?-MrbtLGqy7p0LQx_Fk*H+S1*oi9WF ziI*4SLgGjKKRFZP>5ankhNJ47z*q64@*A=+A5~-4%v9*v-D(`2^|``m{jm_=zAQvC z^*v7bAI<0+I`eGn;;HOz58R0~lKbHMBCDsi&FXoh-s-63o{*0k^spHKTf~#|p{&?DpZ|~wU<{4>DZrA5(PVSATMIhUn z-9hIw+^Mi5w4V}K{c6}ll(Tjv#=&-xC2ij#;mdG#L0;^EAxA@VdZeS?7eaG-&B>x* zK%&CxefcA~_+AK-e(Hvin5bJ4wVvf6ur{K7nYk0LS z&O_~h&LREwGih7alMT)AcJXvOs`|W*Me!ap&13wx2xNl4v;)0VIS8x&K;^%w4e^kN zL+F?4c8S1Z$jt=)qzl<2QBLSaCFJ#5QG$p3u0!%%YMTFNoqYTGGn%`2KGkv~eKO`V zy}?k~>3kN^G@t2BFa-w4y$b#P^%hg-TqfzAC5q0iK-%8V?RuQ9<}7|j*q-ot7iw?b z|F#FxB$g(zC4_AXKiBdT!v1L2b}mI?3xUO`doY95gNCbqRrbWUjKF%?bS+v&3(uo*av?LzC*y3h&resS={Pd zVikdF$*=2a54=7^u6eCD!3KGAY5gd>bKPijoSF{|o#nOrXr25V`OQlbJx0Gz`Fa1K z%Fn-P9%5^vhvtB-t8Vty(K#g0=I+_P#ZQg#RfWAD=DSayEwX|hxN5jB4>aXx8tS?t zCsD3tlTS@!?Pbsb$|KP~f-TwE*$+(iIjtrimGv*QUJm<6wy_vxtVP`xcJ3`OckV6d z`}w8W6_V>Cxk|27C*9~f$dJnV;peI!$@yHZ%OBDG%?YF5;dOUDSa1&cH{#Ty?NVK5 z*!3B_3&DbucyD}5^CqMl#(Ov3AHn>~=r_El!q0zXoC-hx@uE(T*O&c*nlJi2()+w~ ze5udPtNGmZ#@Ye-kqvDEwl~?&&wdTOp!S!``O{BPf9c`=z_XvJ{R}iNA>9++e$r2J z6$+1BCy968?C5UpSpTV7JK$&DJLkfaLSyE4pbxIzYL4kr_Dwl^T*AEmC9HE(`BFbq z^IpHgvsRZ=$uhaKZtnd|>Fuh|qVCgLA8VhMUC(E^As4>yDsm0a{kYxdf(&TCRyE4z z?t#ATLNH7YXb%_3f&8_H@STWr@BHVds!cr>r2P`&Xa=2m6v>_pkt+SwAAA;?Z``fR z?ELKF^9dvAT%cYHql&vX8UTk+KZs%9*btHLQ(J7-o zvj05M*?z+IYpmDfvh}UV`&~k{r)G1qUaL{^>hq~et8Rr_hoO1doafZOl@rpB!k89; z#-^2ZrU1rb9@=YIi}==N_f8ASQiSowZlt?Zc#pRPgX`z_5VB5ae`WDqLY{+k_UPiv z6l%=lS`1&nZwg#b_1d*#4E6tBiwUww@roBdDi%G0HpOtJ@0n zLgtlZ=+-i(9cp>ULV^w~j4SG;y z@9t>?AJU0=&+(qldRw4D_PbgPi|8RYh43H>bDKg>gz3dTCuKT%&9c>4TvIwl1RK{V4W;@*fW! z7)X0TQQsNv{J>IiwWrA4$J2=P6B67zABTP*pUdM=erX?C(meBiZSqO`&`$&m`!F3k zRPq4&Y3NGPv+(zRt?Wf+6#ck1ihgWt|3A`?ak2DcTc>{f*{BP5Z_htYd_Dbm33C7a zr%FG%Y5jQpe02SI;>g$2k4EV~_3?l31^UDylZWznNY2;x@e6yB=hb%QJJ3E0niq_A z2SCmd z$aNHR%68Q8_PrPKRyvZaloO=i5?6YE6|H<1w~R zCetb!=uVCdxA2Tl7QP%jtIVeMfyq_A@yX+SreLsO^J3v~nN8a>4n4Ah;+cf0#vLzw ztGf!i!=r)bxJ^gYok8R;QvYGh-EF>Toh8(N&^Y!p$Rb1YY(5t(Fk9U_X-qYY+UVin z>OBb4kC1QTp883IKYT{*b69Vz2dMUI@>9P~b4h3Ac_Mq*JoZ}X*Fv+{Uajv?a=~s7 z(Ca4Sgr$9E0?91RzyGkBV@(rjHOV%NH+$QCd(3_1IzYAs`u;#x2d(4K-4Y~wlIH-V zmA%y7;aV#2&fH!N+0nO^=H%_Gjz7ABeAD;M$*2RX{EqzZHt70RYv?;V3r643-im4g znIC<0MF!GXWj4{?wT9oIE~%fUH5Ky9TeM&P%2eTNwp_ekz~`$6p!^GtKDuk0v9D8{ zTTul)Ue*yT$PES?lTGfOv<8!cK=wSxrp~5$zX%psOln<*Fe)4AIAPTPzjNebzj`E{ zRn%+7k^3xLP>y*h2jmy1>MELOk5g4YJbUZ)uP@mT`P(D3Uu=y5Hm`v>(X4#v%CEA)eTS*F@U(Y}nge`v|!{ zlzDE#N@tp=$+^s2laVZXrweiHcC#orXzA+Thi^!h8F6AHY|9TyY@64N$M2>1J=6Bq znnOr`0>U^`;$f#n6ud3s{CTiHuye;;7~h%X8jpKtl5N+TaTf8Q(`@pO5F?(fFuT_z zSs;t^!0_FyIB|ImwRrHn6vqflS(#fhUk zTNB(2f9dddF^wn z&knh~aRcpBv9vpTih{FfFDWK9UZOR%(`}EwOk?f**?nQcuHDBU z-9_V#%MPovHeaCrQZ1(K=@w;A_rhL%_DOJcu_d+U6HC%{&RGw2OzI{Y4uVIUrHeej zI2f`^5F#12>+&&);_@Ptx7A|4&YZDj4V}Y0K>LU@Hm`{j)c!M{bcp!FLOiwxx)h-E zcNm)$(48i3^aYTIhwfyebm^QRrDekVa~7+g;R-~&-bD3fMw%HSZF>&V zbc!z0jx44#nQ5jPx)<&DsJnqkKM{E(`Bub>%YzrFo<+eK{D#gPLw{GW1bX1MeXwQ% zo{OOiaf0TqY@z&;F5o-L&%FZ#-QzKp(xLO5zXYAC&Qu=T40^sLMc#?zfLxzPyZR;S zay9DoS+uDa(8g%}^|jB(``&=fn62ymmo}A#VGl?qRBn>TR=oyG^-XpCceGc#JSUd! zdFgPdy${gm1WS%!oRnw}tQDcXzwu~~)-=dW#J9JYRiE<$WGe96jk<##HIZGQwjj`M zisYVf+mke2mivW;)^{`Q=ML?cwUJ%IxA42=eFk(kpdRt-5Wi5E-(5REG|}0Bx+HZb z3jOBP+V)`TFWZfLzZzzzsQ*aGR{aUByQOD~EV5aBjJ_jZ*@~1$g|82MJg+|E=k6lc z=9Y)n=KA<&wJZO`8!sAq9X8hHFdoJ4R|iC_%?(m(bKikYlVdKqM^*2yx=a-8LH|;p zkmsX&wThvGg*}9i*2>&nkq3Oc7WG2+=5(&h4GOKxU3r-9zEi%HSNm4A(zogpF?NUl zme1~&xoEBJubpxqd04Ht`9G0!)AHOMt+!p(F4xt@lU|J#(I-nZ}S@KZDZ6~$NlYU-LjU+#vW(LCf|p$g~TxWwkf3MqtfqV7(8~G^490c(t`M&*$nu7^`qWnY3b8@@7 z_vQJ{^)%&2_Gnk@Y~-Yy>7Gad#S#}zK3_w-lPur-1Wk|T_Dh0-fPfb7y2Li z=B%Z!pXR$aJYsLE?=A|=mbe;>2aaW*!`N{B;x!nvv}eG-$w{`a5D1&QL$>4HA2~(G zEBM|6dB1l8eeYJ$cY6_dz9^Em*QZ%(s*uKIXm9d{~iOTo+3(9ph z!r+Xz)})&f4%^^Y)u9ij@oSOxeUdCmsDE*+5dJ{Udbxk2JqLNINKtvo>}js4nBe}X z0(q%0n>^&dY*;CpASaI(?VW6K{cRY-LFR7A3vvpOj;340F%c9VvHrO=u&wO^X{M#x zS7abfdv{TjmS6qR0;HK{F?j@~+071_*#pm^kK9Q2ib9{{d8WHVI@QuUbe<_Nfcg@( z?||fEh7Z*OA8LF@aMU}XsqBf`uOb;x`7@wbb?_BB%YQmp&`e`awAa1xuU@Z+^V#X_ zf?I1w2Wo@wBnDKkK|R#ax?UgDS>|&4*+%$IjqsfsS1egW_TbHf%73CZdL#U& zah?8CfB&!XZi~5p%{kQJBIwou=tG<3iq5+q;af%e5SS6qV66Es=A>1 zy{~(J)P=jVE*=>m-rHEO>-UL?KFWXnG<)FnJ49d|+B*4ORL>dE#Vf=W(q>inkL_or z2bG_dmJsr@ilm?QVN5^E8O6_{bjZ&_{yb^S&-$mikMy(R4$&OCJwX1{v#{0ALw`t@ zp3(jmoslDdi*)OO&iz?(9eiq|)&cUZsO?gFCLgQv6YXPp(>=EzQs=bmKm6&=CBNEu zoR4=5yHB>A>^<4~4%m9KFXQYaTO!C=HWdF_WGgH;x%c~D6KPE+_)b#b5JnjeVzFKqt-_qF)T04q^Z)F`| z_pN>4#*T#k*}iL5nS9nE_9jcsc;CRmqABi1dsBRW87DAcnbjwjxNkB;R)c>jeAlFj zz|~_#(?A>aC1~=c4-rl2u$k#M;#p-SI*|Dt z5UxbObHzr7xN@V*jW}+4|HF;hz+CPhg1!Ruk3iG=AA!EIqY81V@V*M~s(@F?G_FFt zAA`qLz^+2NpxvPHtfJq-fp_V^@N}nlY(^iFhCJ7yju)ejUFNjyTTp+fpIwVTY&~9O z?$@5N$ozc9QTvK4tEna{v2RUNkG?foxfTzd8CLygU;h%c@ri@&O%ul2n{FGN?Q=l> zli+*j!)M4IY!BQ%)b4}79LVc6-sgba@jl=S?NRqwlA3uQ^7(Q<^{B(ogOwO@j~k48zC|ps^_R`!f%!e*am$Ptvf_&rBCSTU?ZV zl0!oKWocYN%b(jMpuRc3*blt~D9UlAf=7*wwKYWv4!s?!clx z@x_NTIu?^p{qVq!eRQuPzAHi;s+V+qUebI}-}y8sbm!AElTk0`=yQKE9mwPq?C7Q* zmW~6oFNl2X{lV33`_;I`odSD^bZe{@f z@ni8kt%cS>?zRQjmA@?>#iOm!0%jf#gGH7anruJ`BYcX zagtpTc+#0wcjDGHV!7q{(F<%Hv?gzxk=bzoGHOR&*FZ+iT~)iNKcMcAt;KWjK_#nx zT2>z(2;c8Z@=AidXfLi)-)D2$xX(uK9e~0#G`6XIWi;lZb9;*qs(Z|hJImdu6Ymqb zbT|6Wt^3uQh@dfPdiZ@Bi@?7jp7sN{cmC-h?Gu&vO35|#7O~`j8UufVa%Tv!gW6w)cZBCh z;4A2?9mc>kcH0sR9yQTfyUsCiAHVZ}s-FXFJ)jNPUYggAbMJg#%W~-%xwf%he&4ya zk?fzMzvpsxVr-ImK&>g9&|}JffIh?4IgNHg_leRxD9vlQjk^l;xy(k|ql)s=y&H6w z5tU&?SG5O#bi__`9HiU8Z_#xrFy8B<>ydPSiPdMyuutjjvr+Ss;xE*BCEatH z!+Ert!g;)+?@9RbyTX!Xe<|A()1@8z)xK=Q4*ZS!3H`oZ<8Mp5@Mc(lAm{OPpI~O| zG$z;t+5Y~PFVAYwK6gnw6^%S9+N)^VuSw_V#ZYyxh2(GCqiTJnZ%DI?xn?OCn5Enc%v$#D0n z=xwoXAKSb0E#y`HjUc4CZok^|M`>T`emkRW#Npe2Yr3t|{%-92J2_O}#+o7R3-ms& z=TY^#q1Qy~>~^{%Qu;IeZ-!L&Q}=7}wK>u|vKOPFKh1^RJYM*o1KZS{`Sf=sNLPwU zu02H{8NMjZ2~=Mu3SNc3+5p}oF-{jZx_6FiqdTVDQ_cAOT$?&iR@f{&qSy6Tk)KtJ zu}7g0zCyRzQ#e)x3hh=;VHuvbI8U+L;wyGqJcUmY9q)14#GB505N<+0wp{)WIJe3;s)Ln82KdvG<)*OQ#^FR;xK?LWs9rTrs5t$|ZH4k67)!OJwo?xQ=7s{5%puNKjm zCDq^3E^X%y@m79-{vJ#AXTbs^KUW*_)p-iEsWxk)GhX1)fOqzYcDFgA-8CQ{{cR)d zC(E@SYD+)gtIkBw@28QjlYGg?Mc~r*_oVt?M&9Ue9|@Ct=M#H%U->w#FN;8~UWcT1 zL2cr<`=npCL)b3!ym46nZt3P}I{bC^6s7u~(ffKRzcLMXch5SV2Fdzo`u!7aYTwbP zI$ys!DQ(*`P>$})b};T4q^BYKlkDDr4A>y%r>ZmH zu$MHhaAk_D@H<~bz;{9Ak8Hpe(h-sme4+Hc>W&%mA6gP#=;y?@(D#w@@5ShMHm|)& z-lJs|m)pFeiT@oiXOw#d%==XOHo7z5Fzm@*c4p`R-CZvN4y5xP;Lc2Y7V)4yAbH=# z`&g(>lJwt3i`*Y>LcVDKhY4xXJ(wRNy+KH0F8#f@ee%xZok_r6$W(sHm)*;GsxADU zGU6F_ueiR`6yrR)%bV_t85NdZgwI3vC>FoF+JdS7lKevR0#tugkJ5g4`+4a6x~psW zIewC9E#2*b{)+CFFw;E^HsPVWBVTM)cSbG@sypgvPtRA5FM1$ek4uQx8}WKyihth6 z|3@14B%uF*PyZd1k?uF4?=K}U-`1IgO6jTfVY7d}zGsT|O3_%|SQDoH`@7oyY&suYU0~d^e*@DWt7x+MLCN z2ZN)CpD}+(_m(b+r~6y`$hCD!gTAA`2}1WWP}+mr!p}d*xea~Z`lLP|(>bTAZP61b zUt+I(Uv<~F)@1CkK5t&=>R1e&rn?7ot+a+GlIizet7ss)B=M~^FFs{`ew>JF&xZf~ zg4xnedm^b_(3~R8q3*|ce7corwGQ-XZ4Xz8@@08rT+dxP=0Sg z9<0Lbq4!n5Y27k?;WZtH@h$DUpx-o(ZL`Gr>HeBO>hEySd{;NB@27HoC0E%4YZJGwS-Q&deEh=VjsvLQU3h=c67Q$+^(Lwx$e;eEg_-sv z(B3e4-r4NW#CJJMEziFo%VEjKt?Irh3@KO8zK`?{acoi?(tG)sYc6H501DWds&Op*)1*V4uoob4wCPMuWC_yo%s3U zhg;Oy$Rm0#iu_9D!`nP7@s8H>!p=aRQ)eJ&FZ5cDmcTsVL5~vCZmvtUyv1dTbLIip3vA>H2| zrt`or*aPoqz9u>c&CXNXTJ*PHHODdwc|03_Hlj74&PGsKI`^8POx1y4!3MKk$~vRv zMyV?=wa}cMduj{(^bF&CiR~tj7dT}9U~B3N{k+q&vtBK-c=qbQwOALt%!fK3;hk>E zT4}atZMX;H5Xkfm$oDnK*PDy61Ij~du5`};?ZcAyvte9X^_l5GXSyh;I*n%rp3jO0 zHxCd6bRR&{f(0FE3Bt4TF8HOn>hFwfaEQQu_~6ZhMMHlzXSYLOyuax_5qRCw)#ru$ ztBwU%H_3Yo`pR>m8;Y#dCYICLj|}h49=Z!??H)NcTrQe#_PlQC?xXanp33koozs8( zGkRX3|CV%;-}^DOvpaoMSJ$@a^X`8P)z8K~s=e|$+py=-?wFvn)4WZ+xJR9p=jZdi zd(^#kUzOI%czf2Wc;T6LDmaSf+vrSn>+MRO0`hdM?LU+v38oe-9BRME`_?ai0vMPF7>Yu0vek?Fx|2kj+s?|c$+ zTWgkU)9Z`Kmk`%`EWJe2mSAwb)hc{BEjCZO-PA#SLHZS<$va%_4PCWIk6V8z8aC4% zFU?nZ#zF43LNUra8h!8((J&HeRu`st;`YdUh##y4E=Gxhky39>{^~7>o(mtVcg}3c zV7Oh{3_1t9u0_v9z&<5Ijw65k)oUIpuv5)wL!?h9kw=%*e`}Vo+Oy~`rrO};a_mfc z|2p)(_7TxkovZd3)k5#TM}HF+bZsuJEr^ln7V%*BG*R#iH5b30_BCb7GfMsabe~0= zCG9%8yMpepSlcS+($|wtW;m@Ly8q!sr%opMm+rY(CzA-v4cEyYgqc;j{=fFV1w5)E zTerG<=hX=iA%XB3Iv^SpLX_|np}P}a3W5+7XPilzq#@CeWb%+O$k+;=!#Qy7&=Qgm z0(Jr_$uRfeF=!Y$hbYV(91)QO2oNwR&Y)pNJc=fY?E9~(+WksUJdgX``+d8KP z{c_0HG}RqTZRY#<1M0^i4fSg@-kk)W5Io5!lX#}~vw3yghtdjRi@XxD=Yqj?oJBuF zXE{19g|W0n+4oG^R|NV14-LznJn2>m&ko4M%($7&8PIPY2JO)LCG~0M5bN{9x$GIl z`}CmE^p4#%)olQ8jJ+UakS1rCXR5oepPcE_L!WLxA1G(Y4rR-EpMRkm!0~~Reclc^ zJxsAZZtKU=1RW0{AIseTR(#AeRZV=nA5#D2pMCXldh)B!b52cr7VkQ)s{_au&m?_W zUjXe<`XYR*1s>i&-It_aIm?&AKgb&|Fz)+<4Mjh{F8sFCyo|qBt=u2&%<;O_LbwKt@|zvO5peKIL~Qol>O?Lo)>)GG!8N> z8D&in>rd7}EN#Y7EEPmy3&tvhM=L8>c`Hk_o#&0~{@R=BoYcB_m$NSrSbUWjG zcRp!3*L^&G@7#ZGJSO;p$K(CyZhkx_y=Snbt1^sNq#M;NimIdbrJ;R9XTfXlHwEY2 zw1yJH{WN}g-q$xx zfcvjT;{DCoVh|jpPY^6F=4}I?%eufbU1WcR81nnR43;!uEp%@3nQfUa{8wq??0D$% zL@)m1ERC`4hcK^`W7zg{+89CfWO7&eKV!aEe$F>04ZQB7?~Q)%Eg!4n2{$y3JwP}C z9hSW%eT$)=sJ}=*zfnImeG#Z{+r9`qi%fS_5Dk#tyY)LhKibIJr@Gf@d#6EiV&pk7 z7fY9X!Pqo+Dbm)UuB$g`@ASiV2AGS|xfEIEGeLW-?0WY-)(&ph$Ljfcmiaetdqp^M zbp_p*+!ctUL#pq-WG+XV4DNd_ihYHKt{__O>Q2j=-3{@(zmKEiZNd+gKR>_QCg!Z< z=@4dh0(*RS)Cj`r`w9Q9zE@Nq=%H(d`JJFa&hJ32Jj+aujka^IZx(Sys+kiKhv z+a~6G{=T_hYWvU0s7l$UU^(@Cn~+oOmjm(dxEzT8bU(W$hcW1GxUagmb&u9oSCsqx zkAz&O@AbgvUg*9;uZiWdi8W1gx1Cih8lV>o*^hp6P3P+&_BMI%6UxXznYI7i^V&m( z#Fe&YVf$Q7`$q}$H;7-<;K6wGyN~!?HvHS~BwZblaif;b!rVKMZmxV!IBNPRh>t%C z#8H3pbD;YCVQF}_TvyX3wF#e2oCg0B{g)v7?^8cI*66|odbHsCr#;Y8lk_PS*Cu@8 zh(1x=xgYuH`U@>>n@HOWX+OH|E9;9Ox(o@TOFA2J>bhw6H*%j)+%UJx+%896-KtwuH&Tp(RqppxlXePWt1AFk7bM(d%2(9*pBuPmPWX3B>M!w zzaP55Q^q0hxYU$Q8L{##v-A)Ac7Jg$;)6441)W)b+BD?1)F-7+Ozot6rO5Y!*N1W( zXnIIJjgxO6JMz{-vCy5_KjpsyOiG-LdE<=qo287%X5~u&#Fr>uy?|~OWE}IVXUEXhr#{(g9bO* zwuh9Kbk-2f0iW@qof9rRpHlY{?G=! z-#2wU6H062M!r8XZn0QHbD3Bg-68G@wbqd@Z0_?Hb1|}|qPt~jAB6Z!LD|N&)=qjy z_t{c9ZvStVeuBeUnvM1(z$fuY|4gXpPguaFX8kG02!WvY0?z9Q*Xa@bAH4B;~ywP5NymENrs@{Xw zKMfk-_wjO@7tR0fp!ySlf#9X@w1?B=XD2`~dwQ~6k!?C6^>teJe4DSY$MOBJsh_A7 zB(v!3{iBGR!3(rrN-&O!XYbcd=ViZ7a1zWKtOR?_H}6fL{d^<$8Ex!H_(V-3`NQ>4 z{Lvh`|K+Ee$^L?PX&(6fg3;&i^+q&Dwj_k}@6HJQWa3$&zfc+E2jxv;pwHO$9@}Rh?=O<$fY@^q9S?-NKPj;8eqx>utwT1Ya*RdhJ({Yq>TSR|3TN5N_m;4EH zx8pR|GV*hl!>sJ{QRqX*QRyccygD4;>~#Jz{H>~C;q3Eld=K_j!HkUG^-gC7{rBO6 z)6w27rOB{|x?hbHY2Q2znSNI4a-Lhp| z2M_M8h#zIMp>%Q{DG<%lH#h0}QsD(F0pvwfaMS zIi6B6a^~8__=9D8Do3aG5-F>%n6^(z8u}3($-ut4HGk|CD@oG+o`wmTRgmDvI(>y| zdC zjBK{Y)h7HsO5ls7;yXzXXteFE)o)ZR>OOIs-n=i@twAoerqO0FB zokY(<9$8Bxwl1t&0{hO9mMfu-2{t+IH;sC`=pxYWWgc;_>%y>?FGBaf?Ozhwq3=Ku zjNrjstmUbf<3jM{+`m?PPxi6#pPA9{B2?v($<_Dyc7QPSU8%-HZw|KRm6We|R@DBf z-A(l|{^T`mzDJN}{fT(9dzEFXADhhb!!JUs=cQMhC&2P^lc+o04AF|cFisD~+cd}z z#emT~J@pkIBQL*mO8bl(a*~q$Jx+MF6Y4d`%yjJBuvmEWMo`scEDj>!nW&&L2z?_= zxM;79E2@VV3Z1{#j^!HMq7hvtIcCzfIG9P99Ru8B`B2oYrL%U$bH=kJpN zqO9+jpi!PLCz%gbGm401JOYbNjcfe%091vwbjY%@t98pxNbv$k&ub$y&>`-WWhTgL z=DZuIc*aqDF5j?TsWZ@VoTsvT0hbo-4({We)?tyoaktoBRfClSmP<-5=}G9 z_StTIekaV9aZb=u}PZKsqtFU-9QJPfRI^9?= zr4D5fO-?H`3SSYKW`0NF)oI9$^^4D{e^@6hRB_om zAAE-H1IN3?b}ldz_{{X*{GSe3TJdy?)U&OuLL1gaQL~y2b)7|FYM-DZ*LuT^d*>w) z>ti$q%D$&D%*t%$plZSBC;I@pqFxg9B?K|hV+`(#Yr{gF_0(q!GMT$C*Ir94@vl~Z zz?*mC*HTyBd-u@IA1E0S!%3r3CLU_3hn*H33}N7Mv4>Rujw(+P^$D-(znI$u9^{ml z8%`tLnFKGL{0drZM-tFkpMg908Wd=C+|LFHafE+86rKV8LilCG4wHx}VU~KODbs5E z?>UO^a5=SG!n`M4xVf+`x>;4d;UJ&1S8kP4o%==&b?%D`A2f1a2$M7DC+Wx)^z|Ki z>ERV_N#sMRqAtA9ph?P#qrO%-tiadLb}h#3N9RaBb~(*R?x>S~VRsQQz_8L*b0Yf~ zk`fp@E&S!5Ecc(`nJ2z!=yl^Wwx!+I(;W|4DI;m@o&taBNa6e8MbT}Uer4K5Zk)J= z0t6?)*u-}toRxf2XcCNI_b0(p2SK91axO!`iv!-MGY@=7#1(p%=+RG;WoXorW1~EO z{)zh2?Omlw<&}2&p~d;)8FD9fbi`4P*aWoVgI+u_(jed=w97VLjt~X zrS@vgYEh#8S=!xT-D}v*$;Tb;GzCFP!GvI+0qB$5 z!{wW#lbBZ$exxoC=9sX3{?HupZRqBwQPgo$?!v>?UaMr+21YKQ&nT@towW5g&2`9n zhP#U1IoGdpEfM>Bfi41B(a`L`L65I!uN7C;OMhzDK*JdUeqX^DF47Z1P5!*GQ?|nk z03ilniCk>wQh!SRXG9+>xcq|rU*LL2Fe~~9Mppc}5LZRqTMwsC?=xwr14osCqI}QL zhS{mc{#P{}kXTJ>l`b5xFch9^&?CV~GecS$MWxWgxj;~;a z2HiJr19!XM{WTbsX0imIsbHPcoSJ5_8B869wnLDZ*4Pt!+ArbjEE{Na#?tTt8$wSZ zIFP$H{H7>-F7Ri+Yr1H}xy-)MsV0K&mV3lEFtb~M8VQRCiS|Of?$0`--T5N<-nOx$ zlhuW9c9F1>)n~}tE~wbAwJAmd!|7g%Ge@C=eYC+-Hv%(h&;P#9X_QMxR)#zXzD;*% ze}AKoXKR#A@z%iI>DepGxJGgW0+e$13ZPQrqt zI$QotwQ3O%?{`xmaM#x=1(~GU8lfHMuW6#Z_})(XW8i@B&7T#>kK#P_syx*8qw zTN%QfkN`LN)ounVFXZZV$U?`UI2 z(;dQJ@-?vD#Wm;nGPs`KIbV){2+Hp{F%Itgr9OHf<XdzB1qZ3m@e&4$FV> z9c^<@_Wt|n*JpChg?q1=m*=y<6QfsjAM2LOb@=F^zm8lvBhb4x{P4~=-+6HK_!#oN+{u}kxh$RXth~xM!2hS z%x@|kl-#DXUWO(TeoPN+e!RVxcp797rCzy79IsH|MRuI$;=ODHvx^00&t5meAKmLo z*k>lkm@{Jh63UlG3IiSkv}*+r-iZhT{s2PHp_#kq*ZDIQrDd2Ay!g>2t_{2&{~GXZ z)`mX~!~^9=T0-kY>Bvm)Psp;S5*>p~Whw+rh}W&TBHNP}saS=r%IV-kezQ;0YKyB7R2A={KA8ke&RUAjyP0*53z>*lgb5 z+gr{NRZpinsr;Z%;33kR-D91x9g39;MTA}?Z^wZjgKEv#N@*8HTvGy~tc1Sj$?=F5g?$)a@(1L1gdVg`L7BOu1 zLw+tnCxF^*=PWswyyh(ZUME$Z|K*pr@H>O!_tV6k`bTF%;-ZLfBWCfl1v4JnA;X=6v6QD>TmL%DN zL8J_-0f%+UV^xnQe3wBBqRqX7mIp~SahPs9E=2jL@6 zTZxeDM|ywfsHyR!8jkB;+FSd`UbN943$7D+Y$;^E%->=|eQjefZg+@nUY8LaTx);yv(7V^KE84lv zb=$WO-iyu6d%FPU?YJ~Hk7W@~)HSl84tT0KXUVqT9CYnkX5TIY`#v6#hZ^)MYyp|@ zTcRx{O?fgc0bJ!H_~xIUZWLbzotf_yyleVc&gz&=F-&kifLE=qHJ30S73A9(XE1(y zNRG-_t4K~lYa?y>)6{*w5!QvOaotEI`9&;dh=&*>oxzvY$W?cZ`1#$&b4Xk6TF-e- zY9Zd7d83BkV}~gG{E52r_j_Vb5zrRP5dHiJzgr6W&RP2U?Z`IBUxL{X?v?@BJ zNFo&2zw+X-O#LKM0?im#bvqxF4)MJpPP~M8OjAEGPP#mC4H-_X3;r84fov%M90rP{ z-+$6-3UdlzAcJ1>9on;_jq*|Dxqm;%#ZZjWw?EZ_y3@fK%v@n8MZDhrx}+1J(F6w7 zyEnNgf0->g;^49++SU(s!}4iuLF|4ApWa8S^e2A77;W(n$f3x4Z1!z>p*?2 z{C%M7_ob~An5C36hQ`}!m4DS>;p=|tAH}TmDSIpv;n)iV)g!$GPkR>QOE3>gt{LI1 z1+_e_P#KmN%jyOOsi?b18@`x1soodjdo3G(+S(*pJcSXbJ4{v3DR^gz20os7ZSrh&HjM>MRX6-}C6 z0Tqnv2s;*sI{yU>mM(m*Vq9OK`sI~wqLj-%lXA<(3z}39@XPhgdtECry zmobbNGeZEGYZK^{w zmu0cCrmru~yA85)JPqM~hm2igf#ff+(Ny&NKJ1KM9+I(DJUPK5c9S_WS*4w9Q}dbC z@d;7X*_Oo2-idj`pfef9lGgl;{e1Dt(LPnmcDZ-4Bw4+sJoH6)l804|97y+Or^w5) zy`k)Ch{OH;HS0?kBPJa=-Bx5JFyxtayLiQ5saEUnd|jZ?(|%z z`~%bW&G8+1ZuUbFY+ovr?_~>a9o*x|#Yg+2q9)#=%*EWXyy^*(a;67ssB&q!EENhxkU`&u_qE zCJjYQP#5z>bKm>vhuCku+bzZhus|Hj$2r4f?VcFmt`&CEQUBQF!`;>nNM4~lLq z`}M|S+A**^F5aupMB)8x7}j^e>f>9j6F8pj0Vgmy_;%CCw6o*673Sf<2t}8MEX3oS z2v&aFAMhT=4n^&G)+~c&m0uY_Z%TXWjW1H zM7{((wgf`N9z0jPJ7GnAVm3vaJ^W4xEs~DR{f_P*1z24A2D^ z@zVTOdFr*~ky&ji=X?jXN&T6RCeHXnnN1N1vum&a!K(@ah~g!vuR_TS;Ez4m|cg*jD~@>fB()b&88Uk`;G?#FT&I zGm|DGrPt!EH&O44Jo59F6LPd$5C^&$rjqagU^Hw3t+C@g+QM~E zYx;DoiMGabjgJD9)CbM%S} zzB~~%fiz$Ki3p7wjy$&8{euOcl)J=^K870$oS|5|8~(tqvxj9U&-A;pkI@L^<_h}k z3=IKi3q1jBoOh3Rx`2=`&vGx;&-uJh_79I>&;n%2P8^m)TU6G+V~h zzq%7{y4a(y;v_JmK}rb`a+XWSz!yKjF;mb?apI)GSw zN1$%Jn^ZJ!-98itQ{e^j9VQ_*L3-zpqYGuGp)}5Xmr-tYe3ExsR6|_bco4f{FUsVR z_;JaT;m|iLAl2hX34oxK%H+HJE1|FU8C|k)0=q1XGH{si>bKtlC_&u5$j(k4UB{)c zczS?#W?E!IttCMyaHet-|9IiXy4n?oBb2#5mMrgkJ;qBHkDhW1Pf2`YH|{FxL$X^L zz7?aSQ?cxt9NA|(!-Nm=`p9{Ed+7=_p9C^*6)BU5>gC$>pToB**D9_c>3bLkNr znm0tUPl6KmoLa;&!iSG)4e=)|F*N@6HRB?H_1R%S7$DD*B9q-u#$N!1~dl(zqy|!|PADHS#6+wdT!FfFD7`Oijpip4HeZ|K~5L!sglbyI@Me9MR_; z9t%C2cNn~wvyXn9jRKSDi2rQ$^32Ns!3yHwV5Lpz-8zgjuPJN$jk>Kb?-_5W0FMwy zaIVf3!dkka$|9rR%MJN0-HIY`C__`yy%zY@o@MpM?x4PIlgQ_q} z{W_{FY=ja$gw%Ndu`7qvR5ddX)d14XUBeO#1jf$j4tSqBqabKP0tqZ15sr+bTRf1r zQb77%6N|)0@qYv>ZMD6)%iY9=kqD#;Igpy2E;vIxv$EU?l{OZ4l7C2_XFZMPRH<4h zmYwi7vaJ&l?+*|4V>^TxHnOY_DSjbFvI$pDV%Xzz>eZNE78rSl77}*b-tN4sY7cv_ z+TKw5ONm#q)3(=+t1ZGyam(h?1CtYg^`7foYl;^?M)*zi>-z0e^WuHx*okpS32>L| zIMr%C(f!LTz=Ipcv!a_4%d%X$DcQBMD;Mzj%Kw_)?nUWiEJAu=PER1aWJ($YO`U?7 z%S1_Om%#hz5I*!OsLbOBEiGi<-gG;>+AIY6uWTw&{;0mt0$=qn-e?XAyZTt3FQ(_645w;H(X0 z^yt#4#K(VgzGq~GMJpbe*FIr@+=IVAuwzG3|6F|WNE zS|}&QJBtE$%loMrpbYw1*}Pj*X=}kA{${g0ONMvk33oI6-5t&$uTGmgSoCb9vD26) zyOxca0-hK5>k)pf|tzjd|Da1y=bh6c)AbWI)g9>bb{pCQ7S-sL9nAr~~52o|yy zJ3}{Z@Kr2yu_tk1+2}s`ZC&EW3cLGyI*-=*Q5wBxUtOJF%|iE2Udv z+e;E+&VO8<XM%IBcH3PAXo3L9T6my-v&Xa<=305r3#^5)}fkuV3q_Vq8B0Enc;LQgXWMvH?y*&R|^5M&~7qb|z@w z(Zbx(tKMW4m9~F^4$!u)Do8n*sg+{HYohwYEADwg$UkY~f_^0PuD9qOX*I5^TK_19 zi2c27qBCh|nx095ArNwv)mB1H*UC%Bx~Y?O!bCqcI-QMmEyOUds-AO+tK)lhrS#2( z#TB3bjy?C}gMko*CEs7heIK8nOowqkI%g2(z&aqx2gPpTClR(LUsSmQmXTI`3!Kow zK3*NU84<_it%zTqS{Z=oS}Jv?BPLo4pZ5qR&UTU_cfBQ^wguL-h#5l+>y+zmszV6Vbg;KMK11DC_8C!~Owm-^Rw9f^_|SQYvf# zoXt&)ZgR3TCV!N3g<^xSQ7|^M)hZlwL*2-z-r@UCN_D)QSu2Kb4D=(cOm60iY}VJh znDpe#DAXxxHhIZUrXM)irwsUy1IQ^_oRYuOHmUq;bbYU?p8FQh3+*zNzMC+(;HTpr z>dahX=k*ruh&*dEch-Y)CTmKF4!yjZ(FOY*sj3C)D8(6r->}M!AQ(YSGS^* zaWvBGVE(6ig!VIo11>&RR3(hxAG!^$C>pObh*=~R&vd>1(p~XbyEm@9<31MKK5S|` zB#w!bx4a@_?(pC!_NGYl$ECwi7|N~F^GqIS;fF%S7exFZ@!o@Cj%}C}9ToM@A~P0) zQ$G@j#+m44mZLJ*2cePle-OT*1xfZd!t<6(j62cs>pS%(`MedAkBl~n1O-&xd@`oS z!P9Yyg7YD+-!f@uGhvxV=%IE;Zg)hOJ+T}ob4QFKqhavv2?326k+U z{^@WTj%UBlU}J^cBx^GD2|qspB~M9ukc!Ul`#B3 zu3`vax#QrFM@GrjDKG+D{Fvbd+qB>%-~Smof`j;)rbS4PhGYfIN<}lM4cuW%t16jy zh>WH%Oo>S+QcNT!FrS?KGi6AFyOuKNR{c9-lp52Z0E>5o8=I+_a>p2PUr;w-p^J8+ zs}?nvPkQuaBiv}ly^x9{;*gSOFZj7Hnupo=(_tE4v@f2bD5as>x6ug~d^~36(Mba< z%FJX~J}4vI(cdP?XeMP|ts#VM@%z;j2KBEGtWgG8KuH(Zmg12HQYV&hFQswzvYB>N zu}|XeXXgvLT zEf?f2_5?LE^0FHjGFkXMl;G%o8O2`cEe+ z%$b*o=i88$QN0SThZtX0NpL7xW_w&w5jo?%Uo3RX2M$iwUStPDzpvss;8Cu$W<)?K zAX3zf_^;PI5>lq2N`_IutV`)oc-KdPJuu`qVB$%rz?&vf^IJ)>$P=!7o^z=353JF{ z18{3MUQNk6tqU3Ul?I^-Kj^M`-fu^n+J8p1-+TWH14X7 z_yn`&&fh4sGmXS7KNTVE#$e3|I?D%SRTAqCpPDN^sGt|)uY87>mFGaYu+_?PyI!VFl zq`7G}bdhAW3#^47``hN3TBgMyNmiWN*=jL>8RAefvR+iNsSYQXglg`AzJcRVlieU5$Z&i z1)Y-J;DT*?Q6dgGl$rUG$)BW7V{XO5N0#GBN&k(rs}F;yb(xTYj(liDitzCG()jqQ z(!_}cPbRYL>L+x5F^mChk*Wz09S_ZLIC~kH-}zDBxOTLCXv6OjB0u{UcvtcZvwW`G1sI-Gqe&I5b5KeB49h)|xmyy^`8(=d*PEBDx1 zMB67v#$CAq*<|(R$Sta#pcO714IJE*;5OWwT80XOn_X`ejQ=opEfH-MVlB{d)$QMVEAgh@B4p@Fe$GZ2nNH=(xZK%eA|sfR@;6yOqJ zF)b~C8O|!=8^KB$7Y&z3uD&=Jk&RzOI#4T}fZ?I{hY*6vBOLWeA^fJc-Apj%jp{(j z^3Jy{w(){eaus_^dQ32Y#23(r(F2UTkFWs3yirU|NkZ<0F&?Pg@CrUGv5PEOcV!3Z z32y4KQ{<TiRTo9DeQcR}+9 zyY|>~%;*YF1^ykg&y4*>+;c+s8JB*EQ{;U8tgu8}q?nj^ew6G_SHs))WC&9%&2P^Q=?|1j5$VEP~m^SEjH$ug|IvgH4Z z{m)Cu>Cb^;p?`}jW(#J|9}hWIT5bqakaYVbq+iGHns4H;`jl-25q zRb2jJPZ;*5oNXfW7r!;`(5Q|_D*s0~4aJ9kwO=YwOnrhxyG=M&VNU@yQvm|F=H%@t zwjj5J9!f+OvC`axmjsOb#}Nq3Na=@+_(#dQNTVM_&7<@Dpe!?6Y?cipGuxK#$5g&R zrG}z|ok)+{J|o(xbd~#(dFb(eh$eLzNpdYWB38}C8l2$|C`v9Jh{5e`ylAK=LlX2JOY_xn>9$V z;%0f|Pf~*U8gN5TIXHrRT}7GHrL0+Kq&vI`^>G9j+Lgiz-=V>MD|C>~s&S~dFHf2f ze=SS^d89CkDfI3UH1hqY1pd0oqR~_*&3AUrf#JBjicqn6Kgh)0FVvd z&H>gVb788na)xqlQ?6aJ+4xn{`JTG2!$s^_-LnXg|>6mgcQ6}_zFF@EUnDx$|<-S+n!`zxakxL-V`o z23NTU({JBu4$e2&*OwS7nJsM1xZkcv!#A7k1KOAM`{px51Z~kM&%MlRWPPNANI# zUXDhUI)Q|Kq+$#0xgE=S#9<6tF1)#qxm+59;W96eL1`ofzt4XlFml(9x?a@!v_n)p zMs<-R1KwWf7FD@a=z7v&8U`hc3zmy99HZA3_)mBWWzJ~9y}Os|Y|$`C zBuEL2T~qni5N-iGRi{#!>hf@Bcrd=81q`s-%H(ekJk#Av8R=DKK8{5&?>KWy*N6-) z%@Z>V{xD{VS6C(1#WSx%XFJ5U&lfVWZj_!9_B@?f#$Y-6iV&BW_d zeQg#K7xJ2$mQ(}yI!5ULY9$*7w~qLuL#xy*t>1=tWCY=5r3069EvHkzPV`4A&3k*E zEL;M6rmIwQ=oZH5vUDOn28`b^%6wq|rR~+Y85_qHG4h+N*-?UKsbRC_-7E&s^HtAV? zXr4ok&ywj`N!^f!yC!#N3?UZr`ugNL7b)zo>m7$5e8Ydj-HW(fxreZzqO};Y|hJDFSduY&*BDBcS{aO26T->hE z)MGE1Zn;3N+M8P?zUkHB61a(XQPkYP6?kWhh7kYwCoUo8)cm7s?!fw1tFb|fm#P2 zLGEjXGRI`OZQ`9$dBSDygMW%M;ekHqp8_`77#U4q2zG86jM6e#9HZ<}F&;I80NZhT zXj{k5>v+HDkk9F6So-;ZRxWeN2fJIcQDO?(R3>YkDe;thD3U2PH>NZu|4Z_oY7^mG z(9zdhqZjEB$8Cf5k-JWX^`w^ggU_ZeklaRYnzYzVHAm@#3T7mYR{QQ6uM&n;!s?pU z?pu1I=WA3=4V06e$wZ;Enh@Q+y3R=+%bGtKOFw^8PSg8KQ%`F7>MYrp}%{;(X# zP^21Kx`r$om)Z`62UC{_-QJvj_g?Qh{ljP5a{32i>*mar@h;#mlLdg6o0He zv{?gf(*U0;)OGBvALOc7Q?9pF2{mKYzvjCp52clu1pR9-CRUf`>6ES5+X9kiDkMyB z(Ns&i)kIO0XNXJufBmOqS0!d`?QBA%_m))E@V5f`MYNo<#TLWfKZ2qntJT$RtgNX) z5+fbh=2=+dZrZ2D@`VoW283?NMkNVmfTi*CwpkGL6;R;ZZ%~Qwt1Oy!z7@=zC8!iNsuFHBO zA(RRd-TB^{rnO|*gin#^+TD&~CBdev#vNfE!x|L7#Xc!_8m}i@1h;>WW(0gUnJ;Jv z#CvBnv9OD*JZ){Ze_+lD_Qr(GscW!g-4oKVy(h8wyu^tAs^(v?HFX5)6{oqqDsk&J zy_ALKg){VzzP{ZDf4L959>>w7zbhD<`+)&JG2;2=PYkv1KSTb~9?7k$e8x7xS_#6> zf*syL<_OczqAp2#^ns>1P{rz$2d~SY>Fr~lkKJ4umG7NFWh^dAtF(fK$m);&JWy-AnsGHm{KTxHZ;*Vk06_$^@UPC`DrrlYj9`+}e61faNTs$QW)7%{EcfVG6 z4Y~nYgRYLPuC2bU_N3-rOE}2pCo?P23e8*d=yODj`>y@6JUp7ioe9x)1U{&2L(g2L z0MYjQH?$p_R<=>RDHh_?28SS2ke)*7kO5XTR|+}5INn<=YB89kf0+nw%if;joAmW; zMN?=a8f_^}jsY73LBBTRpP_H!qDtZYW}&^4%waR~ooEBAxCx8u?*9`&lXJCqTgBYOm)&laXpk#*TAf&nHG}sm zr6-_T#gcTCtT&*HqKmq~P|Ye@NyIE=j`9>souXgDtV((KFI+5DsCy;s`+*GWl4Y6V zf{dyTh&oiW+MP(J@^jd_mBM!2Ysrplv#3=`qeW0vnCjTVu^a5Vc$*~1{(A#=a1g;6 zHD#=ll;Bh4xYW`w74+(|Ytz%V`hCKW>m#o|P=B3hFD z*1|BtEOm1RmSPw(b+ibd{K~k_lRJ|!EF3+QrpySWy1)PiEY+gO8je}(>k%3p0h^+b z5iV1<=Fc03is$9}?3e)soHIotVEs}9W2@QW7zz3sWb03`-_)79DRVe8Q=~$hvr@PX zf9C%;`cMOgQ!NvA_Be$eg{7+knsdUj)tZFvS4u+T-Gy;~S# zdf)1Y!#7J?ILDu^aAtzIh5F*MSn7rrA@Rj0tkms;y++jf>?LzpYG$oSzC4vbC?l4? zg#=QOW~<~i@okS)@-o zw$@+vb@I}o)P_(1xAvT9Vy!<_vjjtno1#?5ie&6DJC#a&#kjG-jlR=dsXzO)A{yRw zeMN6oC`jW;vlh)Fx4V*EEC0A<$HpJ<;#-XYQ_qqW#fwYU?M%NupY8`ZkvCd@RT{k7 ze8<%i6j)GR#hG4vm8^@kur^5v^Eix*THc4sNIai5ku;6um(9#cH-T8>JK(vl3 z?&^g9)`?JW4O`iCD%!-z6#*p`gX)EUtGJ%|WBG5=U4hTWm?e77)cA~PObW%iUWIL$ zY2kEf2M%`@A|8gjyMlU_jw}HG3kl7NN6?LlbHL<>TWAO;RvoG4@dKWuXi}~O42gJZ zuJOWhLaTLuD*g{p`%jBeE_k=W{#PgZfrt9vO#b)$^Rq zTp<_IT$Ja?i`w{NKp}Ol=#+vRq>{6a7&S)4feL9ECG8A~(B`Q9T0MPu(UamEXoT$E zomBG9mw9!TZ8s8@c(is>>{f1u61IOzK7$?PElzC87U`Mksfgi6Fwq<)UO2c$wNl2Z zT}%Jy_zzllD(i5lZH#6db(jy4qK=-u>LVD>sm-3V^h6%?bgvahQy=}l#{wR#MGJ_r zC6+;2Mk|6!!w>2c5x99KI9cki)c#HBGDds{@xr;f?;i0U5>){@NgEZsvWDNDs|T^IJ}rf;W{m7sN(ffZmH9bD@|+c*UGE7$^e z4$uw@UAt7Njj=G=Dl8aSnw2{vT#|xwwn0sGazyp>+KM7b zA4)QnD=OomW=M6fr;>%USluk)+^9JxCiuY}_vs%@aW#YVKhqMz3=#3z_0WXPW`hJ7 z>|9QeY0jyt43j8-OR_exCwiLN;+*SQWAsn4T$gFu*uw8?e#QAQgDQx5Rt37hkUI}$r;#+Ip*9L zIDe4+&`u_my)QrzIu^z7nh7TV|Gd41s>$fm?tU(hU}eU0Jkv0VKd}H?J#XDEjb0(~ zi~vZ{!3ACNx4u>me8t{=`QJN^oXp3c8tt+303;oMpJ0uP6~Heg9xc^~aw^!*DLdKTcjaPkThF9( zaG z4hdh~E5j42T|x*c?H&yKmaEbBtCTfGj@DNRQY`g+eCr)OjmdKD{ykU72CsqO?^_+~ zCp?JHzTwRVO@4R-w9|N6`9VAWf9n}UW%qLGFJ&|>3eEVAzeIP5Z z{KTa5Bl~_-hk-$j38|6X!!pI}zumS0>LKjthUh4iXfk>2|5}RavuVt0CKz?w^#=w+ znH+TtWp|UO`igSix&p#od>CC0`RNPaG!pbCu$^z6u0bn0QBe7t0DFi|+I2vucj>I|hj>u;?dW^1xf{-NYp;BKqa@_~M-VrXXXag;@Cft+iJ+K6Dim(cd9wBt)q~$l9wA-x?1)CJOMT z+osj?bl3_%l$A#u1FU&(H1+HBS-Ll5Ub;6O1I&wmAkai)8P-msG-=P*H5b4MO{0y0 z745QM=cN$_MHa?#S+pXU*lLG1L4)*)?7zQB9|$m~%YQhHzLEG-_%~Jq z8IHTtplJn9VdT3M*av@^?^-u0K=>aRD)O*!xSu|KLi~gkl&vmrkZjm<`RNlqBJ3x& ze_1;RS0i&9dt(tzLP8Y6M8f;u_@HK$|&>t0F`9zJaV@< zoXFvGKAgyGcPB8L%&A&FK~?x5TA?lLXZ;Olo8*xVMEaSm&HE-0FnHT!tnhj@NOBL8 zuaGhYMGLnEBL!Xw7$ZK%Tv0(D&0rw+?BLq@um2(}U|Mce>OCf*-5FHN!ro_b!KSKo zePk_#f6V|~AxmI=^Iju(E7x?7+b>`2N$l3?Emt}`gEzViI@5X1L(3Q1?trQNToC@( z0$1N+=hfYzo8nGC=!~JwtG`?AIi!v3_Mfvv~L-}+5Q!HM%C8$>6KJ?+%yXSLk6>K>#+IRh1RDwbehe927C1) zlo7>Ko*%u|rwy!DMud>hb2_kc%vuk8X>9zjt*YM7ft83wh#`9!>lie4<-F5@bj;i4 z*7t+0~3bnvdU1KgL#q`q z-GVrv1zauvedw4FQ1bhB+lW?c;E+ps@DzH_xP9l_wiRqmd>Y$zl#<^Hn??ueqOYIc zKazn!*z}>W%hS~FRc}5>wM>~$WPR=!IBtx{wXD?mC?_AN`B3VUys)#c`g=QGIJJ89 zvFH9z(;g7&9#A0g&v_5!A4=e#7#06YsQHKGtuGUfv-ZSO36H^-c)<9y_f7|VZKT60 zHMh8*)Y!D4s3(9LEZxr~AOQh}TBp4EG3)arddI_+PNwERQoK<@Ao4-XN%;^sJ)~t) zh*0wO__mjrA4t&;zt5FNUqiGjY2*pU;T^vgDe1#gWzDPY@QX1h2xT)!)ZzUz3s1Yd zkp78-FJd}MZR>W{mt_(Mt}#ye{c^loT1fZjTBkO{L>Fd#n%h@}wz57ASS@7zr~ZeA zu2zg%o3^|_YnaO6;{8|JHgbx4{M&cEHoH{t>tC}lYsz+i(pt^7G;AAQeW8OV&NkX) zZ=OC^L>8)*=mYFm*mi#a%G;}mpD_ILg~O)8C0g*=RrML5BSd<;1Eu!Ffh)l~(jip7 zb6MKYYri{D#2+bNeCN*K`JC>`KA4fbCqD43NuiUt);jP}?nU{QL);HN#Jkw^L-_^( zA=|7lad;=Mg>|?HKLa>$jr(kF^Rt4EdbyBpZ9}KI9MA%Up%I|qwq+7bXr9nCt1VvgzIu$AD?xjJb8>CB+?yd#t zltyal5|(CJV0rHE@8$F2|Kh&cJu~N;IoHgb&%$z_k>U3J9C&Nqw1RflrUq+Q9u|N* zo!MMr`__HqSM;4<>}F?fbciLS@Xs+q4^Z6dZ^xCvMcN5P43-k$nSfn0I6+7gc_~(K zqu;^R(=>GDwtf20FFaD?DQkEd-m!?aLh*2GsDE%}W=cVAmL75)Wu=bjT^4A_vw^ig zhD@fe8QWr+Oj6;ui70({{TaY*GmrUtgGE-+_ew$`)y=w)GqUR?j`J|OMuC<^br)Bo zzcca}bK%R3-B4Tw)QKEdxWAKIKqa%LXnv`*h@U3QHw*TQ5{!T9UM|FkQ>49C1sBJ56B&3S{ zfyPm#Aoi84dwSe=J!WazX9dr`vpW8PX1yYQJhWLTAn~oPj<12SjrtU5ObhGKWYM-~ zugkpNR)16&W74uPRy!q^47j>3?D`2(l^AomEw*wapxn()s)efXO^$p}LvX{3;FQ%oLpJ|ysf zz%}_g_t1reDJPK~s)A3L>y&hc1RE-TrztnaCQlmCSQB2YgWj6+lwO3f#23m*mqkDJ zC|tqD{1m$C#G135Pwlb=$8Pc#+3Q|ajVrGC72TsP)SgmCMH@wq#FwzRfI@{i{b@03 z{|$Q5_H0T6hJ6s4L(1dvT*4&^z8@TsF@14oZExeRPAE~Gh1PE+@`zt>3^KFmXAec=W(A*QOz$2zkzN>cJW_c~T*nPQ7?nfBY@KcHjB5l<-`X4yE-=fYA(J5m zcVKsu15fpZ+DPF+oIA~AjQd4mO$K`4aaFOfK@R1%iB#XRYVOY>d&j6n1Fc3BuTH_- zj|o;(&v?>&mxT1^8mdgEZ%2F_T6s#v%V=w|H@uYJ@TIjX_ami+UN)}&EnZVil23%x z%j!^HUi;%qB~y?Y5;)n7n=KqGc4W^u4yiswl}#TGsY;PEuto;HlhSR=LZ5RRd>?!p zpfDHsBb)zpffCk44gS-BTHM1#A@gJ6CYx_5l^X)LlwMZXPYkUrmia(~(-#;D38d)4r`b7N&g3NYhJp zdCes6*g?2S(WY3l-BMjxH;@UehD#L`JKY(hw-T{n>B<5<&0BnvDVYzs6AZ&1nZKQ) z(r?bhp7*>#oB%z1Iof7={&MU$Z4&d(9t`Eo5M+h?JClsP5VWFtS$*M!|M+0wZR7ua z5dM9h5_V0ExGRjveb6tG<=*jiFq`&035dPQ+L3e_5S3SLiI4qGn1f7;*pO1_Ck)i( zfAnkvYgzQbz7B6Dyp?%k+P|f^`G%vBc>ViYsqFmXW9QB>DfYbh_!SC}Z-xqih9Y~> zXorNPXm_ZB^<7O_sK>JfT9Y01JwHMbxW_a6iVp%pDwD8C^eTi!>8pp5xTR@-*q_>! z7Q0F>Uw2$OP4Us!W4wk6xk@5ug4D=b7D4xb+}W}xpIDD+%@VUU@*Ta~xo=ani_=qG z)HhW+7Ia@0DSPnmN&zXSbd+?m$9 zS+t4jSrxwOyfK)XMx3=q1T@%;kL`>6wSOeht*M8~4h)Ay`|ld9|ex zAaeC&MvWXPrQWuwj-aCt%rSoIWq(lYoD2)|ipa@F^Nt_IT_)^9HNI~@H)apf$zQ^Gsa@gvgTYcptc-Wne<&%Us1fn| za<5DBIEI{NfhGv zs-UM!xu>4^!Pbmlb+S#i<1ljeN26bn{N_;o|@pjMfTDRYh5{um)t6g#_N zn^!z#;U()tkdQA`zW=Pjwe#|NzPRj_-ZuCmuS(1|V@8{&p*j_2E{aM>luUo-uj@Ng8kEpA z&$8!j?tNENpYn0Vt#V1xPFI)OJ+*1n>Q}Sn1Q&PzjXpQUYc1oLy`!!2?~xNFWS0f3 zrpv2Ty8q%o7)ca!1-9ZHx%j=&QvWrPZTj(5#~I4BPjj!SDYjb0K4d~owS*34wNft3 zoe%n2feKm0d1q6BcmlS*f2m}iG>decUkm>(yjD$I{$ObMDV)YD4P^Ym^h>Tq!(vJS zdajv~U4+UK_cT_hh;c&k%MyY94`i1xX8kl9!yi=(Un?s4esSullaDNrol`|u9BQaq zmQ;0e;GdUL~oX)Z4WpSG?wDpJ zI#bPe>y3MtJy?Q|u)c}n+z}X2n@rOACm9mfdg0c-n#kjAH_t^HakOhNk?BI#H}%|Q z!@%@&6=!`^Fc(G_gIdc*Cdea+a8h62p#haM=<1?SI{WXZe~Tac4l*pYD~!KzH>6DQ zH<{$9ID6F8>!cZ0HdodQRg)jcKAa_efIPnN)$#Pu$wU(gmbV}B#nMsJ(Wxu^`PW!* z$`~ELDRVzn2Q^*k%YKa5{XK3@>W&KvE3B%MwSjh6M^r1He}arO(Ze$P+YEU~ysjfg zQxo}^pl5nX<6F8f(%3POAhed1(U;}qGd?JIxJ$wwTf%iniI*|_Yehh>H$20H3hn1aIoCG6LZokQ zHhqCp@}?(y>#B}Pd4`^eK{;>W3E!C5K1b%|C-(6lIbtK(=~l@2lvF7er%o!|hxqxJ z^aG(5P=NIU@{s%;+>NcRm-66x}0nC8N>jl4=bTduSt?AL$VNe4~h8l^tH z3z=vji~1<;5-ie`o-bHk%{kQ`a*@?{+6i>FX`nt*QZS@f4Q1t;Tj zP;jfQZu*N|k&+TC4!7c1yH#53k-WY>>-pPAXK44)y1e&(qE zTPi-n$rQDz>fLv8bNxymzc{X_N4=U_;bHzKn!vQnrm=fW(wzL&<|u16v0)#C`#uHM zwm9&E;x!45yun~_J=Xn~=D9f>i!`kd4iP?C`}~Tf1(lzJc?JAV72VLyy30)xvp;ih z%L)?dR`w7%fSKRx=DO zxwh-RWePY$)VUjNRd&kt!t1~HvUat%k1jG0PlmD8#^lH`1g9pq;$5A+pQgXn(UySc&zAvi!yhrpunuKU4GkaRX?8ieMOl5hqdC?!_CY5^iXgK@ zAk6mp`}ORvMww=qZvVcT6Q>)JyfeLAgRai}3=^HFWY5)m-VkE*QP0RkmveE(Rg0f) zP}!;>!|y}Vc$<#6T!sCB4aJ-EOV=@neA?%eFSWPHDDOf{!`V!*TL{Z!Q*qs_$l1^1 z5^vGIr9x~&u!PM`5(h-k^l7LA{j|=1y{~C~qf5tCl~*JcGGqRoMT$ zd)UhkOG=o7*9^=*qDu6>*!<-9VOeUdsw|?JS<7y{(fwFKgiS{@5wG`Oc4l|lE=Bs1 zGBW>Xw2HQ(np5x+sqCb%3;WAk!HEW*_Yyea+a1N zGyL6;%q&;NCby->&mRLnGh;NDJh#Ts`Z90&xMQLlTKFL}+(eWyEME(AVmDP}&xf+T zH(>CHdhYGNvJQJ4&uq_N0|^Wp{|+?fl+?b*@zr19xb-_N#7JHVi4CioT*I%ws+&^- zd$dySo;%(N?XYDEnj{&FcT2bH>KdT1M<{N5!Q%4TToWA&)oJ!ws+4H<5rU_24aPd< zuz8{TA9E-tPYWyWEoYgZ38REZ^q%YN zyE93Y)ek~=&vm&nWSY>nb#24Rnhtm0lr2(`H8nFglA*BRen-(mSxn+${>Y1gl{O?+ zJ9;;6D#UMHJ@C0E#QoQ$UHM7!sKe#mcapTQTy zXQJJ9^5c8ugyuip^kn)l(e-~^n~B6>^XL>$ew zP8u$T;({Z=y7Z_Wldm5ZIr2nIm2<0we0j#^7PVzCgA`>sojmmzxLHl%dhRU zg*Gba#Vv&pxZv7$x?DOO~du?drlHchIY! zD9AOx3pTje`O5`TdL-7Dit!oOUJREf>rC1u;lCT5@vyY#$L6Kl7}b1C@*=x`UEW*G zZe}{oj%n8WM_XPfd@wC3FmK0?c*mALJ;_I5ee~XG@I%`J(r@*`Ulo~8ghs_ZpxkS& zuBCtqKl$GHLN7}`(eLTprja^TL`U>8B&qy}T9r#Kb)H!T-aZWY~{D|@x zw8_yf;drM{*2}ijT45vetspCv%aUxyo6a{z@;79bsca>pXNUD)9TuqRb9W$be8yD= zDSY}b$*w!a|6=Z1)N*$_Mi7`lYDuLq%9#r_P<)A3^4Mie6YLHB9ab=JEnNK z*V|v4oxdvlK@94+gl&P&i<$TEFXvTu=_a+QC8n_M=pEE3AfQ+whgL;O+fZLUj@hu0 zDf(itq(R(M*TrUc61t|2M$9kG1*!lc;x^wTQKjKIqHfP}iI3Qa{&&ppe0x@PxQ>fo z^;z;heoM4K_6TsP@=UyAoqUJiu2Am$lV#PtK5+i#YO6}_AN7Vsn0y05*D zv3bvG`XcN$^{Sh(Q&|>snJ7$5wNn^tgYEE}cCdG;+Q#k2gR66S?@s@2{|_%5lUlj3 zrgZYeWa!!-Y~{VPe!GP_U+-SKg`U|G^Aq&tTtw1-4L#F004WK8E6Segp{|bFN_m2{ zvT6a{(l20e2CgZ_&O@W%A2Ej0Py`jKPjBX872B1$4-3B~e@hqIR zOBt1ap_hZmpILBq7E3k(+J^Ro$yKV0YE}^Jw9xBi3w5AweF-y{)UbK->@!f<$4yzv|;Al8Xj z$5=>mh!{yWQg(zO#5(C><=ZM}cXTm>{3uuJj;7{$0PCZBh!}}gp)(hWdMD);0WMR*?NECGO#j2t+Vm;gzEuPt%4|3=w_9hTNHc$n3_8 zXbyM>8iVo~dk6>7gG`7fGdvPajOc+SM3V*1fi}e^{-~bXhPI(}_B`Y%8ajaH#O}=o zF~T-gJ@X8a!a8LG>>YnIF_d>kh2)^Y)K4fO_r(4(5TIPGLJ0Gb&mh}a-J#Gcu_@pX zJ61>*Hdr{v3`;g=k{_)nbRqd5P_&8zqICZ$BlH2jhPLtMOaidw%ngyk>W+ZAO-1=` z*@5xU_!(Oh0o$;*XDT6H9NxSTzZa|WXSrx*Z_n2MBS;o4Fc^B(C8ZHEdrMQr;>`*$ zl#TqkXa%hB`YW?H55((*|1*de5GaBb*^nWU&P*saK#_O=g99cclDIP&+E3ZFITT4E z9s-21lOLi)ES&}wBbLsE&gD@lp9!J?#OMR;coRd^(KMOhuR`Mg3k1gtf8a(0M8g)1 zpI!V>W=sVz*;~#AuR`;Bk&Q$y2!&)}b%#I?|MSoQ&5PLoC1jCEItzM?EgK7jP&O2* zj+Uo&cPC7T2sOd%ZwB?mkdf25Nlf$Wp`Q$wP$b5+lXLx3P&)8GQw3bh$fOSBqR zZwZJUwyC@`CXjI5G0-coC*2pFKcM_C9zKOg0a?}WOb6uUzbX#61=@z%Ssrm`6f~a1 z9}`0NUxe8H%Z3sl5#S^Mh45rF7^8TAh&u-)k6l~}HWc_@ih&e>6f@KKpNpn39B}^s zT)b*wPX3RJWGo~Pz@nM*nJt>xn=|7OkP`4XabPYK{Xdch&`QA{*&t@jK$rv70UrYu z&MeUa!=c>&6#y<;4GXX@T%(yOoi(AEX`bo*S7|+L-XxGfC4fa>*aX-hTH4})WwHNP zBHFF7P^TB2q0m5PZw_E1P^*VMc(VkvV09-#0|0r2WE!9A!^5+I( z>Q4jsxS;{~7?}6p{uKmu!$wksP`r4s_^((w0Pc`tA&bNh+CUv3sUYb@{$dcl7yg70 zGe-D65a|b8pjhGQfRoM$snINKBpD=MXDxEBy;IQoAM5(9B7#Y9g7vfmVe6GDRx>?R$5T5%vOHpbylng88v zJ42vdHo$Fh8W7z7ce9nYW`MBRc7{W>0ROxLsUw-ZU&Z#n01D$@Ap?Wr8CD1(JCKQl zOhA2+V9{%y6}X#4LHiHDD3C}Je`1IeV`~(&2n)#bqC+Gp#EBJd9I~N!3-}%ZM43Sq zC}yB^(x4+isYr((fgnR#j~w1F=L17Zffw-jG9<|ZdHEAsPbwgDDne+UjeLB<&XvE; zgK7gZ|Nk`i-vgRz|0(4FO4eGBJv&3z+5ik6Ae<@y=J@|7Oomc0tR02Ct_3T=5!KF%Sc2;a#lFgK%MT~F#%8596j2|#NBkgOS>6hSg%cO<@& zfE^ydjszg_0wf!NdI0Ex!=34Z;iz;U7y$)6V4K7NS2*DCGXR7LP#*$ZN@slf1V(TH z@2mm$1EU_WA9(<@O906Z7%+Q_469Z`_CH?1fOl)aXbs>}51`gX1fcK&^Sl6*7M7>u z5x5dE4Ef)(Nr2J+RQ&)+k0LOLb4SvSeizTU%>YOR0C+(_BN@=J1~jGsKr>*C3)U!4 ziC-R2bKV`$%UQo@7n=?bJH@;reyxNGgdteYiR0=;z7bZ=w+E;H0r`>wK$HLwDR5x} zU?2db8~=_(67?>g`9D+tCok%Q06|x%z`R}H(g7}9V1*1oP{FjoRP2kM#F#(;`TwFt zEb%=40s92t2uug8OdU{<1%R9Y!h50tisIJ+E)K@P*H2i0HBmr-x0AP3S_)Xu1VWVyq=F14AmIZfet;?r z7z7Rz+kUu`21YP}kue}V6~Ipl00ac2-=jD{8SzfuQE48)H5db!oDSHU0uVufzg_cR=f!PXu=)e}VdV{4YDR05uu_ZR5XrK*9J*UF1PaLH{Mk|B*48SF@3Tc|w3S z1_gi zJ=3lVqSL(5*)|QT45^;&Qoo~~(VOfBYGZ4B$y-+PCh43^X%ChRr6Jl5ifeQFp_1`b zbl;(>bBqVua@?swK@-2Q;9W7A!qpW%Z@nS8+~+cv(s;Jpcz;d^^c|afyHF!+Zj~sy zDrgEGBKxUQY%j#G*)^pjs;(=~Kbp25k9Sd3yDg|K4k&>Vj@5@wSxlYARX7))+t$UA z4^saqaO^0kv{ERRI!f|tnm;xPIRDeq-e`-uB)en1NRx^lE{VRPgSk5wOLlTDZR0MY z(3Q_bS8=80^Ce0nDyUvmu(=Q0(mY43k7FTE2E!GF!Q)f3#xIk;6rWx(qEYdL;S{TW zxfYs>lbqxeA_uyILD#_Nwgg`U&}T(xFj|ipV_QK%8$Bea$FY-ZOL2XQ3D_3LBwNWT zN*?y9P=>?7t3>`Uegf)~Z1!%IS*32QT!ixN?R8*@AMTYzmfw7&2{l~kf*LajEx%uM z!4D#j*-b76e>?kh_3yInHge}3dKUhx-QZ#z5!`R8edNgAglJ>Rf=WeMZ8M~sC z74R{|w&tj1m??l|UHyCo&5Nk_APB)lED~6%@x|qRq_Dmq9Yzc!gMo0~cg7#DTa2zu zr+Z`&Ui~Dv!q|;-XeDzQULlZ72>i3+OfS14he2tLRnp&$bUYlqr(T)7H#(~MJGE3w zsL(UNAiTT(o<3FN=F$af$MqW=>tdA8d(G&S;`3H3Ddh2zlE)jj^_);#B{5q55xMd^#vqiX!OcWNB5@Y~k~cqiEe9;q zlPnE{;jUAPXkdC{+KF;{oMk@COQy2(3gN}@)$1F!*OV-dw|4Opl)2G%MxYIPvKtQ@ zXF4+lNEmas$Rm7*v+DR)w6QkS)qX$DJy0+N=pEQh#}OtraT2BxOb4N zvsz=2>1OKzK0HFckYOcXy_CDFP%Pz=hi|LNc70J;w4ypuj5 z=iI7jqU|%M_n-hRU&B@O&|@8&4zg2T%ZPYk9E_`I?&*@lXm?wectO2Y3(3~2>Q?=X zOMc2tKXKzVV^u9LpN;Mz%jSg3fW;==q5gj=e?Sc~OB|Yg*G2~$0>FpTzar=d95HH{ zeHlYIwx;OIMww8Da3L){Fj~9g_U_{!GP5yK&mF-$JWU$r)#G*HmwcgG1}&oF^H%Ox zZtA2C#@5zGe-Yw{D!&W|m!8C3!;bnIyERy8JNSO4iO>GY@L+MH2E!nZ$Ls{8p4u8MO(U9&Af{ER(>G>LH_{%-r zqn+q`T^RR*uw)5D@#eW!k(#78PqYU}i8-i?!MWJEBS6fb~>k#~iw zoE6&Qlbf<&SP-mmW1;Q6qhx}S^OM7~$A1rlz4vbjJ)Dfce$0idzj2TxKj;Ne4yfkj6xi2IuAtez*y zo)LG45TqT=r*`l2KLN*ccPL|b&+fMiy&kNf9q914S9HRx6TV%<_v!YUtY><6vElAz zno$&;cXmUCj#Phz$c`*}w`)B6y6d5<#}jJ7XOa;Wer)Kj6o>x^!!&Gd(CYK^VIK=l8g~vyaEo^98L8@}jH}{%kNm-nk zL9@3UNN>|&;#m;$3bDM$7JiS0zWZF{s_GTvOb&ME7Ny^TeTyCj{ea*l-Cu^mV>g^H z)~S|x2c#Z;QNzC`>3wM@lP*kMOqenPF1y5Cm$PK#U5LYc|hOqaYag+XNJoe zrNw+4%%`Ir9F+3Sn$aie2*tO0+={4+Y$rtZ^NO{&)jiYc&(DSOSNBQ?6q%=lxG`Q0 zH7FF|Ws@A!dyPl=;qCVeq@(N!J)*aQCHFaJd}OBCSQr@M*UaiX<3*w1s7W!Z!Z znIpN;?z4LxKTZ}*ow(g%?UcpxE@+V)n6s=;T7m=l`K{S>L zI2jg@c@p_*2OEO}n>H*=COin-DLk4*hkN=pRk7y zc+TP&`ly@ZEc|oAJfiHZ0{dLBJRN1So_DXmNZ?K>uzsB`l)cDR>C;&Mg%S19{D8u} z5-GHb?iPS~R3idzP5MM#o!_%%T(0dK_oc#|r_xNCUj~1HCpX4pnK6=yOVvC!#z9|N z6SLK3NOV2U#!Lz)e|@P%pwu5jHrHCP_)jpM4s0=A*k_vk^J8tmlj8F8L3hfj>B-0o zxn+A4szV{CkJHWXmlai)GYaKJ1qf?!*|Lm^?HPMCMdQwK+}0@>vnYPq|A^qCn&=O+ zUpPv+RYDv2c>?0hiu$_pd3CDDm>ntChS&8g+SoIP!qw#pt!;H&I=o<~UUd}xfvjs&Ahwgq+v#9mY7{=@8@ix)^xfVs*qHca1Ic^W46=;u@6t z;amHl=UYp1cxDhlupnS_T<{MqLFaAQh%kW-$&S`OEq`$vu6F@m&U9L8D@HiNas**% zHwTj|En}!&3~{J4nlyPzk!AV`k#6eUkNG_#Esojy<0M;5PKhA<8smt#Sc&epiyOhH z^4uoEXtq+&3Rl zj$NxUq;L8qaU@){*QZ6Fu;?k({M%}j?4s9=0L5U+0>sd#J~79J9<@6qaRMx*O}K5F z;Aclahq|zgG0hZ1#y3M~BX(_hmlb(LHa$hqD1kl68lE}$(y1U1;odq1bTU8dLt^1t zkR&si&3f2BxRh}q4k@PdLS=0bTFutyztpDIQrSs4cm{TO zD}5y7HY7l~f(WxF)}PyCryRo$$m)KjD88qk;W+sI43#&OVL8-tOFx6SMvqplMf#{) zP0xeJ4$hn?!UKwy^{%9OXp-HQYc^=AD>2L_@vhfwrnz||Ag7|!npb9b>%o;RBu7o+f-9kZG81rd zGwE@3igBsdb<=5B*{$zT>8&-shpL-^)uQQ({)<`6`Utj!q0EtOE#sOaVMtzh=Ydzv z*6thIv{F>iXzp;zNW}0BD&6`+td9gAM`PZ=amqa6AZMS?vihTC8_LnftM9RaSYMW- zkgtkYYU00&#k}&lVjK$*Y<>I9vq*hL=F^cs`a&9ZDy8;x8--uj9eIL_>};D;l8`B$ zP>E~Ymqf>RW0v+XjxYAsiNci%`XH%c%DclLJ?7!1-^}TR!xQe0lsk#OsCM6i+N-kc zw*;k$_~V1y@az$564h@I$P*2gF(s-Z=`$>mW}gb#`mT}Q3&H9zp_f6D5lLEWY)(aQ z`1cmk8d->Ds`J+bJ=F7@|Ezqw{lWk34#S6y@(uk?8|A`Etgu=xHNDY4*iH7XU>=zy z<%)RE{06>sMvTGQk_g{pjtuUCuhg5>JsPL&nyP#^i~_eixea_h*{rR$6)Z`7JNjfU zjb-=Yl?>FA1%-`y-n^ssXWP&JjeZYPUN&}e_J2Y>`lJ0fN24{~6YoSm#I16s;AfzE z@sQvRS1|+IY|SaeBY@%v@zEG$<&{ctJF2(i zBnfKt{m^`T+ne0BXbtm|F(Kxo-R^y`)@g^lYH18PKiNeh*(oHcZpkM1#NIaVTPNH)*}fD&_g!m4`f64W(9V(T>@c8g_ zF;$LW-D$(VDnhC5kXO2pSNhBx7N>~fNq3-~T#0$ZaAZfhs6Dq#=@p?ow3xils2EEe0STlyibmT6; zZVN5(zb`lGiQ*$~h|b@)o6X_e3D6DxsVZ5fKNhN6)bHK8LCPpf^!MKU9R|-iSsK_C zRB~LZ=T?ooUSKzS$MzT(ao+ADU0ZU=?(I6&4=e~VdW`G-3T51@d zi&^!{)GdmVdjAybMReC#s29fkVhsV#)V`f}25a3e96jOtRHwLN9Vn46#ZwOD2V_>0 z+(~KRJ`VVu`}0!j-&hozHa?6U4yV6&%uKQ98t1rGIw!L~Etu9sXB@j?|8#t0p=c0Y z9B0=5p#G_}Yx;xMo(u zGd<^GaMiWx4*19q3VsknTtvGumiA14lFk|n3O3WaP32L|eGFh$ImUnnXoRKAe2n*BKhYy3dSoXVqWi>=hx{{l5((4? zB&!y>Q|#gyPxPbscS2?X19!Bm(W_zCCoDJbe)J?sa!u_hdRXAcwhp2AP{a%QqCaWqBv_9+TC`$Ve6aHXx*9c zD(-E^WsrhdUeH4BXl-)-)ZK3>!`EqM6bwk@(%QRQ7m0UeF}+`zQ#$=+Icf2&K|NhV zzi>qBp|k`O-0mv%>g!CA=8q%F{Ia*Qv;HDMIYbCkieeb{uh&CbPTHBZ_;wb*+n&Wh z%pcGlFseOBGLhRS{;)h3GtK$qU-_#1x##h2wAxg0-QUaOA1dV;w0_d>kbPBKkf$1( zEPLcW%48hNw;(x=^3WP$9epyG7OO1sl||WE`*uBhNY^nhxsY=;_%RoI&a25dh6O6? zd9Ann@GwN&Wwx&=zw8|$0LN1d4i;H(qoL!S(!chyCbjf)o=6f@&yIXQRc9`VdZ08I zZa>8gG{ERMm{7{>!P({2nUbMAACGM8k>4*8`es}`ciJ$d#?38nR>o^c>u$C67T>4L zbr!n zo=gJOD7T!M`$Kl(6hT}Bxa3lSB$MIeTA$D5Z(A4ox4ZEHlaF7(K=4qe*V$G)RQks4 zT~m~yH2JlEJwO>)qcq??T4wS5oq2Rl{ zljuBg)Qk^RI}UtcOP%$a@$*{@8^Ki1|{9FWQW`W()aau@A$I$ z^%S~l9XXRxhdWqS8a{oajbteXu}=jrf0F)eNS<}#(r0X#FuaXBMjZQ(;1RvQzR45@ zA9;I|z!I|Sv=r-R_0calan~v46Z1Nnhd(-EF!=l@srHZlV|9~f4q+0N1zduPYCB0S zE?K67b(Y3OTr7&xXN4fTi}x1P!*(}uBu{I<)0r-w_nP&?96KLE2SF{`r z$KHgEM4Wl`Mqy~Pp>Hj(%k(5_&bmJlN%YlxO@52oY?i|zw!@%5#FmFE|B_KmrdTz_ zwf}<0TG^*d@$cd}E%q=hR2zwu>R_0%5cDeRULh%j{Rlx!UdaS3;svxQ-Q=Jm2&N<= zMm)ogdZ^1_In#j}vB-+;lS7zYhj!7^oO=NRcDcS~{KYvhpgPhstZuO3{ue&ne2pm< zsdDQd7-+)1vdlU}lU9LH2imN7Fd@zfr{g)tI<@;?hK1Yl``lCluwSC-pr`!FWoSi) z#y1aP#AM*{S@ocF^k$!{@It@gNzHO@VD<~Y)o(KFL-(5`;3udG+*lLtUkoR6f8nY2aUCR=Gnt2H zOi^}2zsO~C93Duq`O_{m)tm?jx}^}k$1|y09nNAuxE{XJw_O0(rui%HLe}#^@#P9 zUCRqyeZT~_4G`*nmv?*}vB)J_#4 z-kC)amStZJ@$!|5;c6gXbP2v>sd?>G9qOB&JGWqMTw^eI?GyvjI1f0MH3oe)`Vlho zNjemD&)*g3n;3hxj`Cn>Mv3L6yDHxnQVHl~SNF39a85 z&XZ=ZF54##LSHdZSQ8x8Lokk(-~o)Gp>q5vYc?Xk;fN~~%9dCK51c_Ot-wD$E`6;d zlvLHMwa(}sZafF)fB&$9n~5FVqgVOG!NyDEwb8&AT;yYe?gvtFcouij&xYK(7F5?G z@CH4%3}?piPBO$S24+NVND7U4;#a`EJ*<{|7$Cx_7hySDZMpg;&O)d}e`?XtZwZqfzwh@Z zj{#ud5uT?xY$6ID;@YW_oa^BwD-IvzvwOm8(naSoxA6D-YFl30!qN~s4f3DW8^7CY zQMN^3g6j+i!qP6r*$&wQ4(XG;gL;9toMl5u8_&XgiZRek&MpiF(a11gf3gtwhv9D& zfNpLrI0y~~8a!e2B-Vn?8t*81CO;QrWDk6E-G4^N(Ma@I8Noke5*XS1DWH~qYRt1| zn(0g8AEv!{kD)NL7$FVwC>&L??sGIw+^Mghf9P>KY1^z9F?RXJStRbbC`5`^bb+#S zdYcG;-rouOpN3C34xHPod6C_OUw<*Hy?#4Xg~|BHH{ym}yuGb6x%Sn*T|aG&Q1QC53M{dr$Gc0gwSk_VAT2QL+8-L23mO&guZXD=CMS8Uzu;RRxkw9n0u#J>cTfE#57XzaP zL|^_?uvl*#LokF$2I2=i#%jo>v;M6=PVt}Im9g;UaEK;4`Z%?%xp^|y>(uY_ljcA> zyV}T)uK2jx;;tvOi9Ah=Y6kkaF?LT$GAzkAnbQKzY{c8N_~E^(uyMMx&!53TKSOa9 zq`B&ZWxWdOztIq_Yy3Tw!JR*WsJ_Q4=byI6a(lYUfJSzTYxWVIY}S^!>uEJkUW*(I zW&kVh(Ya_#6u!63&E)-P;j6hUT5S~Gf!@X^c@uN|YnI;x93cBs&Q4zwu@`b|8rX5OJWv}^_7+T3 z&+cY^%HzbD-S{+@5-qU8$1w)`#0)(~W0L)=fGl>*kDvU3$#vN)%d@ASB2{oGiEil$ zEfWW+Oy%52vVYbuhYcWlGoIm{;0(&rdONmA=zf~5Ozt(ki8Ukd$qqJ)wiHw7BFPRV zHSYO)hhQJ2JIglY*Xi+s?`)4zfaLCIcC_>Q&o_UfjG8?I5yw#Ay?1`}<)rk&d%ynn ziHFjQ;5{|UlMtm7b$%xbd>`U{`D_3k&H)jpDXT>HR}!EHy}z5lt}CU}p9&uR+PiW1 zAuss*bNC?NHB}i8rhh>C|6}MpquKuAFkaQt)~Y|XN9mxosXfvb)z+@o-m0OoQ+pLf zjnY|HtPfnymwkHrUi=vEI}sd>=~CYje;t z7G%nUj#aqiBA%0$!RDbaN!_H4F7!WBi)ELy3xM^>Tb*pxNDOBYkVQS}xaVH=qTi>Y zJsS({rV)|jdsOE0>%JM+O8O&cHgAlM-E~o^{1=~v%;I;k;?LOmhI^ub2po%SrSws7 zitP#$JZLpGkVR_8^4qYCk-rg&SC z-rp=YQ9UZD9UHC4^=j$ydso3u6gd{eA>Uj)*L$j5{#UwYYvRq5V$Ci)w|EbEhbW;5 zc-jcDZ0D{#LSHX949pF05IoSNBL;jVXG<7VJnp09gZku*$LSODEtjTm?I^@k7SEaV zv>Rs&%#de-pQr~GZKP|^qlrhsTre+HVF!GX)Yar)VUioo%gGI<5+h3Y=3CmtM&3NF zVMTbpU2tF{%I&q{e**$QD)*hGxXpb z0B0{N9TWVzb+%uqv(c!3M#nznePkhhWGqfur%I|3=@>y)l57~BJR<%2Hl1~XZLRK%i@mHbt z&or>=D6&A-b|~VXOa``{u<|m6h37o>H0B%b*OhUh;wPa#MjOQFIq>zl$$2D1N?NmJ zfA{r%=`#&5lDA2C1YtiwWZnJfNHVh2Udwq$pD`pFXtcNCCKKmBW6z5tU^(=zkXs6h z0n_oPjaNO^f2Kh9W-kUDcvcm#R8b@OJ;;x>*GJWE&6QXZKXxtsVz}AQFf=Qyj!RcD7T*sbT*MX{fjNNWFWV&Yel=iEPzlK3opIY z%e~{ZXekJ_dB;#)U$q}BQ#oSX(U)4`D!?zz#at*^|nI zbN6ZrwvpHJ->$2<`aRO*RGe*~7Q?}^!tRbh!|@e|p|Y{Hh_JKkZNf2#F(PAY1^OKXE*$19%fp%H?K7f) zlZC2J%XFYk?VE@52#k5)5P~W&ewbSptud^TIY-K?fuJUHz`K5 z0sm-kb4uG%$2an%oVSd&h-xt@eII3N1H<1$D%N(mD+ZhUN+!A>L^HS>kI6kwk+~;U z#kbuM+a8F!y9)abz7EwmCpnzsB8O$`0mj=MB92=6L9Pm^Z-kTI{8re}pJT61F&Sd& zqJH()lf*4yoqJ{gp1$plXY3aJi;YyKn& zKP#+!f)ajDl0zvUP?2GF9FCdDz-;5y3LmWmraa?l^vgV9lZ>l+A3VFW%6NCxQxGOf z=i4N!lKZ9u%P=|%sd~x7H-73>GU#ExP<(_Rm%Qs-!(*c$u2r08RYkmSBFH%{uc6Hp z8T&p`n%Lm*mfP1RKFl8X9C4Wzjh^q`2oRc9aq~uetQ$FPhAi4nl79xSlJ(UdZ3eOW zuQ*N{FCsSON4Vtk$0uuJk79#YOeZtAv=OFn1VP@>O`SiJ7PGxR;NS2yq}iP-I)a>b z2BR(OHP-kj2Oq?zWm2j+8e)VAp9h+-Pcu1FQ*p0CG(LIBLg@F(5wn(p!S z%~087yOyw<^PNcM+4VBew=G~8{OV`}TJL-KS--M(czYb6@IZS$a=A4y?1=_hGTDDu zYivXg9sV6o*Q`Fq7-hb8W0J$s?#dY?s_8QENfn!ojc_p7mu-?O3mLGgGUTWx4Q;?W zNptiwV(lI|WY7eMW5;FtvV8KVn%oex)6lkrwr*A)<1~?x4>$CYyRhzWu*{;p+0cXH z(6K%!lNtfEP8ZJU!qciSD%%K4fEUX3hPTVkU_Ug43H-EDXGe7ONdi1R3cNBo^8u#X zm(fAh)vkm`0Y7?QunM4$ydKsE5_?hwDiAgjkloU|bJxQm2H4xYZ2;;1;0fx?7V5QH zRd_Rvm31*Lk2?AWJh*&pqIP(-jcMenmY&YVIW~&8B(XvExJpuR0dNH)YJl18J$NJp z@#tMJW_jS*L7mjjzA702WC#s0GUYWDGVx0Yl@Gz&y4{fbNgQ+G+)XJ_k?#^yt zMkH%n3~Rnmd*DL6XClqnP;RR)VC<9EcV#0kwxdt)PqknSyDdQW^{!J5wQnonV{la- zJvWSG{Ls$jeed6Tx^UqNiAQmuTU~aAZMvxFVEd?u9;zL`>Oa?L-{tqgL3By>)OyXK zI=+20Sq|~yi`Njx=YU&fHBu*eiIO&-^a{#Mgv76E&Y6E@<^5!UuC14G92V537!+DPK-L`5Ayf{7K%v zpVCBkE}+s1+;PeDZq_^T5nmgTO_5{ZVOnEEv(uk6Huk~RT9x6KJsc(budc(%O!q(@ z4y^rZ$Nes$)LwMjdvFrQ7xD-dl%OJ^!sudeyXg`;ET!p#iwhQF?W0su)e$>n+yd)g zd7|}#*G0QU`ESA3KAqU#Dq{pW067t58H2}?I=o#1ub8k0J7mVX7I^3%1EAFF3TwB* zxntL{01t^a5F0rS;$(oljBASAssRd#bC6c7FU&8|&q0_qR#$eehSbMGtKSW~6S0{z z)O1UfMMM#;T{|61#EsbsR2*;Q56yNs10?6_-ZBsFA&j1FMq6L zI*dQrrCg3#@|4%Xp2ce=BE;}3&P)3~!P|4*SNfD#d?K%TcASY<(s{HWXntNQ74CN3 zl>I*JwbzXZ`t6uJ6ybh&*j|GLiBbt%(LD!~%^gj&% zu@As-7LCBslkw=p-Ej%zKAJMOF64YBa}wHU z&FZ0LMTeag^71=MovAPKP1w6*=4l92=vPZjpxA~5UR6juQi`+}>Q$?t(=>@=JcvPM zyRk4+*aOJrP-!%Rm5_WOBjafOkJ7V*~9utc7XiPV(>F$!;{;zV`rS{BW zhhI2r#@$e+6UoN<9%_5rL-i;vGj8<`6k@l5BTMYs>liJS`Y6)+{L{5gTL6FD&bM>1 z+a$H zP#@7B*d5qV${o%6-i|}L3j3PC>V@J*6c0xJINW}Fp!^!?_Tf3i=lBBNdBl&g8y zomeIM)39ITvGuu8s2Gwj8ym{uT}rX5`-#_26|OF%v`Z8)jY>uxIepQP@>BESLp(_(P`L=|#0{7wqR z?}-l~@yW!j)C_2na&qSh`eZRjO@P+z-9o~T%)V$`d?>%u2=WJTD7R6-Nh)(BDA!&` zHQKxNQ~x-p(MHxfM$6o${Y*876mr+E!3C=XL+#rw7b0!DLbmD*Iw14ZFukxV#W zYNXoGtLqhDMTTEpYR)9oA4GM^6W`Znd%F;z1cWFKI6hT+?Z`WTjPGY3p_0 z-~5p5obkI$B|69~<43BE({pzb4|<2%lw*TX%)PO#`nR;e%V0tIq8N~@0KcZc~mW{o0Xj;2W0&{dNfu^@~PWNCxz7iDX8O5E!9~IEy>Ka+iKVu>pNf=EayVQ z8D-$yF|Ad}A1*G(2Jb21wUi%IzGO}gYxJ{`LXFA6LK;))XxPVW83c#x_dyrV^%6L8 z-93R`v&E)WSz!efwW8YbxPSgCh%PVmT06}xx~angsF(F(a%kQ3dotXik8j8NQ{v>? zmtJv4fv{JVQ&KE}sz|={kSaQ7=EFd?s9JwY5Gu7Z(8a?;QJjbN{ONzV8y=m+32lD`RqYa;3|(Fs}m{iN>sTW3z!w+ZFRLCFSj{SlLA~b zkhM2Y1m_rPTWPh$Gta*V#e67|y^Ki>RrZE=o@jiB#JdR%AH+XyGHORsA;BhWc+zYg zC>xlhXmD7^I`v5J1C5@; zAuCidayMRe*Ji6><$L6)p-ORVU1<>Uk%=*n1Dn8qo=a1FMYRv%|GbYH_)*+l^DHxS z@csZe_tYNo#2z);cqL#pP9-B1+Z(8O_dz^9(0Am#XI>R2@tgYy`t^W3bUmA3)hSlI(g5C~`5hC<; z6ssAk_r-ad>Fra%V?cF8Zuugo=Fd{VUZn%r!3xkm4N1cSZcI_lexPO_jt4BEFLRub zH*dMZ*HpBHPLgXT^+&r7EfM+ZZh}2l|rb?zKf`=X1nibn`t`!#KwYp`y21iD#|WXb&z&+ zha(kw7bj3Vr+3C8TIAa)M~`qj?0!OoHTp#|UBgp>c}7qLYbfjs6x&nB)JyeJ7hiIS zIRv!7fn##Lflb$v)wl;)QQ8*o0m8daUou2Ut#LutZZHI$9kZSViyx#J@b29q&GfW4a#G#`p zO+rO%7-3PH7*1hiOGG~^k^C2t3J^bR+jr~_$K9|-G-a(ve4t-{47EA;EbasiuK&5f zyG)QyD)3BM`d*ybO6T`J0xQaRSQv?~3o=Gem=_?8L-5MpGl@MXyA3$atS@_wG1MAn zLq^@)K7rlSrNmb!0&tBXeWGo*x7tVW=H+${VyfV>`X3x5cs|MX>imPoba-(N4!5dc zgCd@N!zDi*YkCz1yu7DAi2^SNMDeEUi?x;lhXR0giRj;dwd-5pcKTm+A2y!V+$z?X1;)X6M^f8`?@tPL8}Kzl7xIwEzT`44o7O~C9fHJQKck>lc- zYFWITxrtpRp?%J>16W(Nv|^aD*!8{D+>-I`IxrOCmSSFZ7dCNaBM`GWL*WENKHwWf zna}MlSuS}m2!BUoaT?-0SK6*|fF^J-I~>ts~tR zExwT{^*Erc#WKEj-`|}RnK|H7c`x}~Akn1yYhkBzL-pakhL`znLqr(Szw(r7yWi0e z8^&r3dF8*Of%`+S00TCzlFo+G_m^?%@l^8G_pzjMDbeF9>ACsp8M;TMh zeBi@!wn>T0Cc;MoJ}lILkFScCVM;>E7~GrDr#;i69=eyc5R3r`QS4QsZ0o9$K4a*Qn=NTMBD$;X@U(cR6RsOz06%sU`@2 zvCbTS%Wnhe%Z`KJ#5V`u8=rkja57D@HT!Y5<(<7Zwf;`k<|HQIZms62WYMJDqMx1$ zxO|W9w0|vufjG*ApO;qwEEBTdGP|DlJR4}L&WE|FQIh}NK6l}T!N#lk(QCY8xK}3L zOtuO(<6NqnVf+thD{~a5&zXFgH`XlCy}))~!&EdS4EORYzS{zCn*_n0Mef3a!@g}a zDMug(23%aTfBzdZ`0r(Ry|rtSe(+s7=cM!K-MTr5!Ii|~Xrs9A_%jUkvl z(%*rsXoH+9G2md)wT?6-FT#s`NBpz{cTM**B>X_~cFW$3RvnAIjwPx7x$nu)8-atT z{H%4ywg}PM4G75JeDp>m@To9&TWIQ+*}wZ^F@#_)PX+mEMkA6C!e%pVX)76KA(D5X zIXF{udHSq9hOkYj9`li%{;^U4yZ;8->r>kO!y)`h2O^vGmoQp!ltrMi_4wl2kGU4I(_*=c zYnmseUNiaxWw5{~b-gGM6j?@m-5OK#3*hvR;$!upUgO}~8#NM-2tsK2IN>$)y^fGh zz7%H8#n&)}?mlFH?^YohINb{T43KqtQgGtK9zS%rQ(curaM@fvA;J`FPggKIF?zo zQXI14|h&VQdb@ObF&(*(2a3-s@AZH5LnOEtS4-%LkE`Nm7F-wB;WbJns^E`=l& z;R?!AYf+utU*j1B`g+8uKkV=ZFM1!M7L*5|DlQLHLEg>_lbQhwX9e4^ivA%1n_5YE`JanDks0+! zP>cS)q${R{3DZm527ZzH3yQnGIg(fGro93gDbk{*$mJhmE=F>Gp{|GP2H)7Lp62mw zyhgk>`l6}Cu6^Z2SjI)YEm{El@R9IBb$7peHXUh`3w#BnI7oa2`zvxlnT2tngn@sE zFg0PBlS(c+>xY1B-l>|HxEjwCxyfH)OWR})T?fx1ae(#H97EWZps-qPzEw4QeBwHw zPB@teI&D?2MTAdj_(WQ*X8*lY6~JY7WF z#z!CU9qx?ZR-b$o|1IDBqxSgyQ#$!4;JobfES-x7FSxOM3nD2xcOotwDefhqtD1Yy zmJqwI{vr~1kL)y7!Y$1XqK7|8tpxvxMjMUEc8Yka3}@%ZhJFXi%dn9ow!J?Rnj*g5 zunL#joU=$i#GG9vjoPqXKKLxD!dPFeG-p8wIt#zGUfI3cr z{5tH)%K8iKmBD1C&QzOe-#M=#6SK7w4D&B0%rH9pFs_?@^wYbA2t*3>JBmJhg}&j6 zJI2FhbdHBg_Bdl(cpNFG5vmI5(VAW*)f}Cg^>r&2vFyDhJEC86QFK%hO!XU5l4*WS z{>PWqa4JMaDR+nY)9yL-oW#0u&rx}>+BK?>ltXCS1PjEg(p|>RlTT~a2YWQEce;2J zU+*Y>G_3mfN%voBcy!&X`v^@`3EcOqMgvdUu;rgVqijYEA~curEW;8~S=f^9^?84= z?zTr92jyRQzxN#jd!1m=XUhVt@xVp_Te{1BXV6acaSLnpt zOV|r{i95EY@Yjd$Tbc!T#=k-Xp}W!uJa4b+91c4k0;uRf$WH4E+L1ikL@f-9%B(<> zp8}iL>pBXAG5)t*fbw|5?7sBovR6rni0Izym}A0B?{oOW~&kP(gX5c+z&3z@o>I9AG>* zKq8dlfQr$$_tKULdjy_)9Wh5=i!@`a7U6ppI^kN0U^blboX6BW#=*F~D-qWyjC+dH zsy|^%-t(9v(pe^K5GQiILCHK3rsVo)Sjx%sg8Bs7Df>9s&0s&ce`o*U6ZH?&fy#N@ zQ+3U0f=tfdJd!dOK`V(9IYvbMqS7>fNk~ov4agxp&5pz}-_5S2Q`am!2aSw_!`|i& z>N=wvZ_?~qYz8=@R0NhG!J%Uc#?nOTfKxz&*nc<*W&j%>Fz-j+30exQ z5!8DYNsy_ve>t6b&+N!m>CHuR5gTd7vk4(%HCkE4K+x0!ZSbk3p?{njJr~=Jm}?(q z{~{hd$znCzTdb^(e^mx<^$w9hWH#H@6ul=WqP5~Rbf8DR-x>b_>FrsH52dtdjr-wM z&c~}nL%lx6J~3(W{@P9W2je*_cSZCk+Q?{t0*h=eU$f!63TqjGwt&o;gD=@~k5%HH zlDsRn{;j4*EUYeDwZps}g)n3Z_;J%^3^%UFd%p;W?lul zJn-*FaPF-gtLO9#L-bmtVSd*0M$Vz!*LnxIk8lZg2IZpVeUMib$zqij(@`m$M zy^|NqVPE#cUvC<&Qe}ao<)5$01dUlQbKR3#YWr2*m6}r1Ia_V#XU+LF zSF>MfyxrCK-5H_cpWMDqKW@5x*_nw5W2?aSDDDA>OW`*YVZz7ip*Ub9pA=&X`aAYI z`o9a(15JO6QWTUdM%fQRcc29nCB(fabktvLG@o=C3afrHAg)I5w3DeFdf0^K`zy1{ z>#uYS4!r>WfXTd|pA4)bh3D!;{0BO?G<~}anXYxPJ2XnDa+omt7a03=d;bcknRp5P zMpxkSN)Ks^|3Q)(^h)1{Ce~UgJz#PTEWokG;1T)ySQ|d8iJi1)e&5%gni4N_eZ#bC zcV?ctJXqhk)AKk>aSF!FAZ-i*3QD9Hc}QuUIfC%fs!0Q&qfq5$H{NLn+h~wW_XC&RyKQA;dXN6!CLs3V2bOdO}C-IygvOIwEB*4?I1M1ZQP#GM+;?;rV>f#gJWH8 zbvk~VeI=~&LL2V#-eljK1JxfZkC)N3xL{;_46@ELk)@{-&{*;|Xuz)aHvI4u&Pm z$Gx%7TC1VAk{QIA&~mI{Pm}IVk<3cYl}{ncA#*|L_4z6?hBqn?stQ(L64k8NueILlvyEKPC zVt|1CLp`xVRq{KB8lAYG4$@^SyNyb>Dan|jhze{z1nS}dUOQb4v5Qk>hqvn)daJjG ze_$>}cE;(4j_jh(phstUn%w78)Z6mKzBwx73=+L2yOe}Yu$kDtR~zprm{`Rs93{40 zOo^RY0yfrjBN;$Vb1B+)X@{YCkwV#+-|grSWiCd0;;46nIgOa3J%DGN^g#Xo8@1>=5p!PNd$l_DXvwhq zu#-=z^SQ|Fj^Y>R58H-!%{`xu*tBlgN5rjyl}>&XfbMtCf;8+k&W+d5vTC1^HOf^g zTMrGo3c%4gX^6eNSm2u#s^mg$ET-l)KYF<-Qd=0kJg4f$O@p1J;;hc3?)%6HBloc* zI9s-}e0$9PhE#=0d z#;L=m7fnLiRYEb^{uEv9S)VeZW$AWk*Fwj>JNpvVNU@yJ;T@za!-`$IH`Ut>Cb# zi=Njhe}pSuU7X+FEf4Zaoa8-RwrU7`Ts4s^ip&Jr z)7i&|L>Sw7?0zR*bt(STEus3R-_kU&)X+pc4E_o?Aa@(qkdTg3A zAzzB(S3p9ubH|+yY*x@#AMr55d6Avxlf*RxHC|7Fu#Cjig5iLuh=>xopIzay)?VoO z)ew%hu4Q!=4Tm3d7@%k+4QB%wm?E(J3zr_HS^7$}39Y^J&Fiv~YRk1Ja@n ztwdov^GJCUd$M!NZ-EI|!^#gH|8S1GSy=Hs#p359h@S8irgz?b6)UeEkl$n%7eX?d zb@d7(4)4mR*lEtYLgswm1Z2mkht>|2H8MggL7B5ngi@^hd|+Oa-Cgt|Ex0AQ7AD>@ zoOu472Y(ToQ^|mK?{L5M2`XwGZ3lGBv|>@=b@|=eW2H`0+?6j4q~K;;FSbxOcI73g z=~($TK?jtyLIiMFTEQvvuh_S}ompv)9^TI$nZd8o+gSPWdFGWCJL6I22TEB;-*`R3`s6}r^h&=XzU{%S9=`@ha- zQqzly*d?POlh)0Uc73JklE2Wzdp}_bL3XpoI;$Y#mk?*ctHH|XU#)efOE;1hTRj`>GRU!DcJOoqsHb~4| zc{iYp!&SK=m(+zQp z8sB-tB@$b13FdDrjwAF@8K5@!g%@GJ5jR=Vn@^43jG{?zuj(19ow}y@#^1_7Dr?bH z$gTkC{=TAu1MofdG(4;dJjSsl7BheQjUz09JOLdM6c=$y6sVQkxtzNwJS=?1`MYt1^N)&oGtmIUp2j5-eo6Yt_wT+qG~h>86DSS7vVe zsJ>bD&fe=?lM9Cj)`xXANZjA~)0T)74V2MFp9W&oqFn$efM^eV2@$tnqcH|tubl2B zyH+?FwCW)vRh^NM&c`HoB!&prKvyA$5;Le{Zf&aXHS8z9s% zs``lRaimr(h#ZL%cr5l;s9U1y<|hWLCat~O;S;D3o7u1N%kxs*bQ9Yb@hm7H?y1e+ zWXiFG&0jUWW{&}@f^=^QjR-hKVCWjR>z?k#E$a-O#1T6>1} z?mK@|Hc5VtdPJp=L|W zg}5kbP_W%W=I2|%oAy3jqo%ukJJ^YO0HPj??8e47^ilem?FjXWq5QwzA&N6V(i$+K z$9=HmkBFlevtnA64$KeSi-v}$H@6s*`^sr9$vv7tURS6dgD&r&2zqQWt~llvDutB>d2 zf#0DMTVxr9z2X%*_0UF4%bkXw1G&5#_AQtI{jAT`20og8wyE?rb!ihAIRXb&+&%%oQ``!lgcD^s_{{QbXL?C+!z3pzVkf$yUBnyP35#ilV0Si=KaE4Tyx%q~aHRgo=nFdZ7M+2#(-{-27mj_vYNTPkOZu0f6({ba^ zLU*JAD+#F@Eug@((M{i8y$klnXT?8Wn~8PO!rIyvrCY- z411vS7`fhp-XY-JVGi~SC}Z+yShGcEUCa%NSuWqhsQ<;5B0ChAF|xnOYU``P3X&j! zn5D2uqFC*#Pl_vNv=1aRi<@VeCx4or7!9hzqEkoM{b-F0tivnTWshv0pm|0k=an{~ zoBsELs?bz^TjJ{^)5i$AttR#L1u%Sif!~(~S!^TPE*v6!}h0sH6#G9Ro>#Cjph^fYI6yVNiOgwX>q$rRJoq40#6FP_(3F^>4A};d1*Xz`L+d~6lLp9kj-D_o3U(~S-v;laP-m!udl<4~WxkoR!aL#3 zqE+l2Hh(&o34(6D%t>i~NajK8` zle4_u1E+4oAc|SQCc9$lk4CkxM&p*T^~dr>0zr=#N^@Rtl{8!bgC-+dHph>4N`?f^ zQlh;#@1Gf#epA#lO(hqs%P|gxA0Mdsk8}4NzA3Y@%zqy4W&U*5O+rysrf?KJ`5{f0aXQ6cBGhH{liM2Ka5i}mCf#nC;mnpsI0#K!#qNbk z$W`F>*3S__aSgZbgYvjtvx*dUdcU9mYWj z{x9z0J(Ws>_|y}S2^9CA)^x}j#XP-`H2C|^y*|sX$-yGZzAaoK$m=?U|J^BihBo;v zce0leByx*_R7#h~!9fQssl|Sduj~I<3w{82HylNlltP9JJQr86FUdwQ_p^w&aUrJ} zy7S;T5WIXm5AK=tPxuz4$pURr|>a;lcLA{i%pzqbdS=x8a*<4rPk1X=j(pUL-z(PiqY*wke+A`JCFyFs;FI&^id zF;KeVJd0Omjlw1bmNspB%aPi&FC8wJ&HXRmOV>-k4y;oYFjgr=?`dEJ9F@`UTR2}r zs{{#IvBYa`@qgEr2y1?fMFGVzp}%vBQ~0n3-75AWGflU#QS)lVTz8SNGlNt2>1)SM z0hcV*>98$h7EfkY=IPs#RDWs&AreS9E~63`{a{B+;6rhKLSH-GPMO_(zya4zX^Ggo zmC$23G|;h~GRD z@JnS!PwYIK=nv9T011^X`1p6@55uv)8UwV5|(c~GF z`2wxofW;IwHcaU_omBE1Lo5x14%54RxQ4eMumumiaC*m?^&<~a(Pzuk)oSz8zhB}0 zt8c)Dt4&_(B~q;}TEl?`O*m5by42gKPwYnD=`LQT7?M5 zc~1ZX=g$D3A1KKnRpD8`H2H;9xQ?BwV8d_Nk?@XR+ zaLTWZFV7?^m8FHSp62bG1U4i0(bQ@g=F@&)Ji(CE-s(;_RJbbE5=o zxBnb-B=Ogu>UEMO1nHN2QA6H8-&;5-Wt|uMS;-;&9KP{WxFod>(u9D*e~o6$^N^pw z#l`~WymICRd#G-2tCkxF;~VKH&&aJRvJO>o&c!Mxi=1OOM3iubE^Y19gQljiiZ`ug zO>+Ej{zPVBsHr~#V?WS->Bs83h#4-sL!Z)3WZI5;WV3FEBwEI$z$f*cl9uuSRqB46 zm{#gJT_{}-!%=<<^HSsJ&E}S$$176n-M|e)d9|{&`;7n0TZ>nNG8o0SLTUp%hx=UZ zY^t5J6OQ&Bc5jV}H+z5J0Y7T)wT)S~xj;+8w&ot%lu6w9G9mScdfV5dehQ@a7_~BE zru#FZ*k;ytLRS?mR^c9aI$h7-Ci1H@@UoVB&ZxuR*m_NNCz$1&k!5@q`Udf=Q!0`1 z7i;=KM(uchvXakmX(=;wa8`9%ES#h0@n~zz14Wrb?&;80^M7x@3z2hObV>0!4g$y3 zg9Vu-WPe$i(9frf262W7b9SNP+v}eo5SH@*^o{~4;u#X-pqC`b8!%L3<(*R-2!fiq@ zD=Un|4#uk_a7bh&6}S6m9~R2iKW*}gt&%2`E07sxwTSNT%hEEqudTs0GR=@#Vn^*D zWp+da=eoUSlkP@E4PkkMW2v{K)&DqR!&9}UwR9N9ooXE3@OvQ?AB>*}!e&f1)ZP4d z-#+VpSYRZ(pf2j><{tR$Vfn9i>(j~`pN3bIT@>bAJW#J!)OE62nGb^sj~+Uv2fYsf zDer$#m1IiE9I(`;HGNkfzA;fmZ5f@U6{io7&f9EkJUN>q?-uIXzP7=Z$|GlgI?KvV z)4y83WovOdO!uRN>)pR?&W^A3<85I>;{)KEk-yyz9DbZFP1K9?ZTz{;vTVo7c_Ooq zy<=~{UU*hmJ&NLFar~x)So*x(a`il+>$1$T)`;2*ruNdi>Fs#zC|$fMIs0~Bw@pGM<*#dG`;@qMWO$kwU7Y{ftSDy^6{7* z&k1UDsJ8ZHPttpoNC8G^em{#{zF{jY*SK%lTMd_aw{snqn{J@+1 zDHGg9_sM1=19|RAIr4V$=rt$fc5lYb#u$zH0AQ#(^B8i!Pr;hWi};S=#W(muq_;UC zqgm285tn-6P&A(q8Au=0V%{bp|F$3Sg818wd#NTdTrn>Y@Qb5`^W1(J83US+k`Q1! z`p}&_hiDW2*3^TukgI2Vf4u7()R0J5V+Ag936?R9pkOmoJ)8MuFKP_JVL={Wgp38< zoL}vAo3owd@?QWfNy;UXtre$o97NrXAq&V)akDQ(6T?}T8 z4LoTRAPw?6XK6=>4Yk-`dgk9_bo9v8fB%viZL0v+sBgidpVa=|OL!8_E(K_O_D}5_ zCIOFA>h4e)BZd`|ffxlHRb&a(@A9o!wc_wco4Y;yHa99|-v%b%HA=zAv`SdFad^T* z<#QC6319p-?(DIA0opO`{d1=9fX)gkCim0mcfvQdGF-Iy_p7ZpG*~qfj5Xa}=6_7q zEFt6sY?#6x%osH3bm7jaLXUSk8nrSLuJry_2+WDNv74NsKH(a+I2}!AZg$iA#4Q_f zU}!_kV-JZ)`@z1X{|K!2qkQ~%YR?%gbMNEbiKM`0a!f4N_c@=7uF##-F=IU2y739L z7LZAft}70^?qjcUm!uICls4GH2UV}*Lte2NekA0U@-O1RA?QBsAkf+Gy?t?}(mf5~ z-$Xc*!(8uaj&G%jgZETZz$RGWFq}X0H}O~qQ&GIg*&rOb^mCUPf7n3d>fxRU?40BB z{DzhtW#}#coYDu~rVB4h-TIint**62G5~f z0}SLm(eHeAW;Fs(c=qTj7M3y;)(wp1KvQ zpgYLd55hzIMn1XlQ*rUyZ^C#(XkBBZSFc?vW~b@f%vTQ$1j_$kMHi9 z-+1ox&C9~MlMnguwS!5Nrfco}}GYq1^UmqZ%TP z7KOr!V0z66qI+l?^&RSc;0;4GrMO-;^Uq{o_?gL^iu~iw=7r2y-v{sE0YjJ>b>G;j zk~1pHFBDI*k{3EebFqf{1J6=tUcXeh>{xr&5Hj4i4;zjX%+rPLCQ6@vX(Mv8*o>Tp zexl^juA%HTln0J^`}j!n9LV3_HGMxlIZHGcuVa@Pt zbWEng8fo}@%LA2!6q*1+;4kr);mPAHVJIMvITH5=t!Yb6`!nK&aN~jDKk4weE+tK; zy=6&pkGyvS`pYlgB$MD0KVtwh8uc6Y547qYofP+vcYPlCG~5(i@~3VUL9}mr`fhgo zSLM_BKJ$I1{>J2yzugjr@C&rMdIx*M*zA8RhJ}dMK#Aaoy|RZBAyCYC?*nb5d>3h4 z9OLBbU;FFfXrJ1oUP5&f!nsZ1Q-Rcy+xA6cb7!r7i<(zj{qP1IZn@*P8LEf(_7Jt| z6FP;ZnZe(AGdjKtj zJ_v8vP~U!GJZEzIW!S{-%R1;(u5> zH+W>kuiHegy`HyGF-n;glIRmssdqm_%OBhRV;82fyI^#01%DM87U>88WY1bBdaq6Z zd1ykn%q_NGp4$Q+RFLW_B_eu=&8xr}hH$}Y{)D>hxsWVnDCH%QoG>LOL0&^2re0zx zKt%uKojYzkN!k+aBCXxTa_2GOt&j0fb%A%V7oI&({;UHQgER1YNw`XD_qL;LR+lf& zbPX!|Ysz;Ba6Zw$x|eXsl4OO8^Sv~n`ZiXC&%KM(8YyX>PR=+J@GK1LHT$|#iu5hd zwNKjAIE|ZjNPMJ49MvHQHdQDZoo-} z>A%%3e6Jn?F5}Fj%=0VopZRU#8Ln0CP?_6$MS(Zh=g1Y2Qoun8_!R0isE4TnUs>Wk z*XJ*F_-Z5ITQ5|8T~ptX_Kb!z@x?AE719SV^Q4L+P0)s84cs;9M)W zEXB__obFD-4S68yeP;nan6umFTPne~(Eo>HIja2Sq%3PWMI-ekZMJV%O3V-S$nFwZ z@BG+j!I$pEZ{~0$pBaiO z-h^XX-x)36W$TaQ#vru+R(${L+s<>rHF_WWJ3H=t|Ef5tn%5HJIm_eXIlJ&$$HU&M z<2U_>6+cV-?(=GN(O%+rc?`b;#P4kyztc{E->F}L-{fC4ejlNJ7x;4aYmB*c2SIb? z`M($EUj=X*{9`%K6!Oqw+Bt(f8$@xqpF7Ap@Zf369g>#?Js6<$T#bgKhI?-j=V*U8 zUMlnQ36WJZ{x$lf30c=Pt8Z4#y*QWHw)CMC;u5ya7+d>{a;5bgSE}d-_7wDE)IrdX zB2~lZ<2PdTBkN4*hw1S3>jb_QDPNXW?5&IRqv-VXW4)vwSt0N7=%QyN{lGg|2gx@b ziOO}8{&yn(5^>K^xpptuxGu7M25|lQ7S;FHp)W^X)$|GU;^5nH?_}I$G%bKsmoFRO?Ldl zc(IZZ zAJunXRdPYutN)N6)rvFqqe?z=D|_&tqMy|f?f>?_;YU^bn!SHdCDaGE^m#>H2<<)f zw9S&4U*M-9ioXJ)6W z^WDD$vN>dn{Jyh0=0IM!5ARSe;@G|Palrfbc}H0LCi8b>E1Mqw|K}^px2x(^S-V-% zJ-3d_VZ4d;RXT3W$Mr+k81A>U>pdB>>sfy|m0jj@$`apA&ZY()*SLr}o~s_=DHYC5{KCcFs2ByJ%invP3QxzA zCIC;PIQQr$OE{q3hVx=>fW4R_^Yl%K^OK#6d&6-3*SPis-hT;rLOS3vCP+WorL3C@ zT5@hW?qmHQ$8)Lw(`?Oyy^GYx;AZ35$7whcsUnt7=Gq+6~P1>z#k`p-nYv~i~ z2%XFQ?Z>*=CB;&g23oYfqyHG{XY#-=E=C)ciaGIFrii$!t>BGSN|)jWuXrQmc)U~N zpCx3Da`KDhnOcu4J)ukUiliH1%`3i;s(3}Ac}07DZGO>8;uon?X1x}+&v<`5o-LM+ zUvtYCn7mkxHoyd7FT6i(4Drkbyz%r)A1%^LT3!32qrPF=hn-3`;5+1{nP=J!24@cZH9{d)+s z@ox0beB%$PeyFpIk7a5*5Q}BvUU%u!W~towm4AjLcQ`t+XlyN>kk`&yY6)9>2* z${aD=Q|@j29zKD7!eYX)kHKeve=9%LUR#dXUUMzu_qrK-TF@7G+ynlfe>(iX^3?c0 zEDrydSBuCnoUO~Nu@`6Q7`d~6S2Gl?b}UzQr|EO5c}O4WeO(34h`CnK01SCNfF;lZ z5B5dEQkB|c%6eGY#*gFZHFxEhohNo)P*Pi3i z@<4_3*X!bQtxuwjGS5}KeW?dn415?Z|0(&PWcvJhGy$Kwt;x}7BkMPMK?9l7aDrPj zJc)Prl85?}TJP|-sHC?B_KUH1)RA~DQSk#y0kdC4+f5CRJ?*cz zbq_e<-WM&uyi3oT5%NMVzVq*pi+?Xb9pEbtGm@sNvue|ow2bFEinEJ71x>+?@2dj7M|qKn=~KWPvDFu1JuhQ)KY8B6w0 zo6?y=eLmXD(C{YnNu-Ge{;$My1+N=R$aAfidW#GRZ?w5G4kzmeZYd)Or}s)5j|dBu zLis%rkD9Sx9B=pnQNG>dlrr3TX#2(V4U20ZGo;+dwP~ML*jb7B?0iwlY|Tbyhb^~J zmKW28NEx1cPahKM%50m(~cdLxWc+j=Y z=2!eTYP`iaZK5{b4*o0-Z%h9Pcw2hrc-yS)oIT-fv*?bu-kc=7y(JEBFYf_wFYk`G z3FpzcxbtYL!rcpxG4?>6N8;fLLHeQcJYsH1`k%_ZBWz*+ud?x0(qOa0n=WLYuL}D- znu>9a)3`S^?mW6Y32*mw$6J$msxZzEK$p)g=;8d>5S8b3Vchv4z~_y_`O%-}hmJq% zdxG(`-Q(xVP7^=p$6B8%c5Xsk>|7A^C=ak11dVcOKEVY(;VI@@AY7JexTJr!wsTv6 z%ey~~o#6P%S-m}hdztHDjU%-A%P28_!Tw1?B+ITU> z^kU!cu-EoncittOu6UOZG5*RvqG34RA?-}*1)AwsG}FI5Drx3k(C`=iP9N{{?53_P zI&E0zUrxdIoPP#<4}HbYh_Ag=M1Fz!FTXUAucdu2;85|N@v)v^@PJ&uQax9m%gk|H zfw_*;v7N`XzKe61i8&PX5)s}XgX_QUqwug6>ym=~}YpsC&ZYCF`uFY46T*1Tw} z-T&0E5|H1f^x5M7ljwVb(S5&Mo!w~#+vQl+Mq}ezt>?2xxA#Rf=Ig{(0|)`DSo7Dm+jsdUBsB)yIxf7ZPWK(jca$$mx=9qzU#QA2jmMxzT$Jw)N z-$lN|caNH^d@FMfZhb+<6v&)z?5{Q&o##%->6RV(!$zfJJ`*4AQ4Tf7uhP-WeCTMi{RP&0 zXTBAu@4vRFJd4uz^>`A@9PbV@@5T9?a^BRfd*Xb~ZNFZQzZd%1qWuk{)qS>{m?7kz z**4%3onL@?1t`OmysL6kHJ1rxJGs*#Z6{+e<{7;ePq_s2mH8H^vzBpoQ{2VP8)aZ97u}6^A)7RKZgUskv&wqF$ehqR z`#aWrf1YdfSmuTLs~9!Q%*n=LDO-#5lfm zG&P%!NOK>fsX4j;1qLKZuy$2RNwm3XAeYAg*Ixoeoq9M>?MHxpJkChQkr##+Te_YMYd2qj^OOBO`tgj`oW7Yg{_0d8d04sDkY_7( z{MgcLZ%6y*(Q@i_X`l7CvfqDGInM`(a>z0b{#F%>V2`Uvc&}p)@4CrWag`8*I8Hyt zxlE1|a&LwxUy5;h`iXKc#_7j63CD$Kzr^*zVw_=jnWFG_SpSe1P`#LKGyaUayzO&T zjc>=7(ekx3MC9fXkUdOA&zBmx9i$6x>Lt&nz9=@QKVY7m7wZFgt5wa$v49`yVa^Af za9-ds8~FcihZyyX^^i;Pn>tI|Mq%d;+t8DJVus37e-m(M4Eo@jw2j3+_u*PW_8Y18 zFMgdiVtwpAtP)l$_i@TlF^JELOVnP9ti-)!e7?sOUc3}*RH|w*C9F}ST%X*I61+#e zYi?y+aaPTA#);g1Z|CgJXe09;i;>(nA$_n&|MuY7Kr8XmDrPPL`jW|U2yq=_nz3&` zThDqLdhc1U&EMy{!D+jri{|po#GHr+e&7Rnjgoa@Byo!H|L=H@xOe@&Xe0f1SKoL; z5cHrSYGid3TqDW}pZ88sHlj?>-KDAGVB2_{OVcn$O*35M#OT0ArGIamr1bCJ?}$;g zEh_g8akC^><;P>LpGKTR^cACT@23utxon7|iTfU?0qw?_J0E*~2gaF+cduZ4<9C=3 zJTvr4Sl4F#O!;0OARK%NoHT$V*r(br-A%-cO3)AbH#WX~c2Mk_-r8_fNS{Zp&3D_P zk~AtSN$#&nKjEaeOrb7Y{b4c^l?O92d`MD_qi6=KT7(~d|CcJjwX{a zz16@wtat50R)psP_joU3f502o0x#d$8(p-S|A99i$P2ZYv#YP7&7S<>qs2hi7~Tty z0S;-H^Au}(Gb2WCxF<1MsOXCe&sC7V3>EYdk@TfV+6ui}g=@oNkEVnE47`iB0-)W^ zz;npri7_bI65+S`DbW30Jcsbzj%(l@UjGk9?n>nlXLjGY6Wa1RmW}%Mtw4W=Tt}CYNMgTXm%ggX2x&Uy-GhsGn=Rx)j0FJol%ecRUAHdUcz|-T{ z6F*?-3G7iFc?`f)0PwU7@I;yvsKHr-xhw}vJ+Vvqv_B5GdJcvmP0mZ zpbp`Q4l6QouIL=+RJFTmKch|wYw;Q25xiMpI(`EtYQdKkLf&Ww-5&?pV%%`iG%gq4 zWAW|6eT=DafRN`!A@0>;e1*7A7%IdwJo7<2Bi;B&jAz_1JO|h*#CJ1b2W>}cL96O= za1Fo7Yt{~NM~WU!2^RPrzQAh0T3fU+1?vhrd)+Zdj3=`Bq3<=bmWpURoh%G_XGNWk zM=ubUNFDoj-%h?i5XT9vWky#oa7ZoLdN-A z#i^4gz&x&7zR~cB&zURk>YzVuaIm)jBz$s%ZCB%L0Z$RYH~9*0I{1pdrLESI=t%ZW zS%h>u-baf5EjCZmHXwI2pR@T5@kp7hll!@h(XbMHtKLw1vz0tgn^3j?j@CMSuWe>6 zURCR=u^w{iYVc=(_j9neZU2GsX&!XX%Sp1{%iU!?n~v>yDOz5;(e#n;9l)6JB>GkG zGVV`O=d|RB>HuF)T9NOAUSH0fuaHarc3k+UJJkIf!xppjpYQ@i8e%-pmR8^sqV$E59Ymw_`1ujqDEA_2fE{M{GoY_nVn5 zJU5xg(f20i0{o{PpE%t-OV-ax;LZ2;l-GCG8N>>PiCxha%czpwS<2iyk zGh$~7?M}C1kHS9?qsVuYuZkL((*E*utRdyvhN@mtzH4mn<)iN6%8kq&#F%`@_@0a8 zz2R6N(sYb{MM?OZtoN~EbyaU)f%lufd|@5pI=RK{#oxlatJ-_Zb@))$$;b-*?YZcp z`u22RfgR@`zDVjJGKlLH?#Ox>`>6cQzQb$Rx+8nyt`+mzIGj;&*GBT%MefK#c`Z|3 zy9n1Bq)uLJz5;z+j_=&?5Z|9RUhJFEp6?qBI3gYtr-^}_Te~@U1ao=Fda{Oh>$Mx1 zTOg;)NBWNE`Jeo*%6IVr{s%B#(v{jOcVtlyGh~PU1A7-hU(TVdDedSm&w{*|)aSwHV>w_-n$_M(+UFVV<9afOOF85z zT%V_C;9u3sF3us0cl;fvhktKe{CBtqKlAwe zJ^7(??2NthhGlK>jtwo*a+RYsBXszA75fm+12mkVzBnsH8uh`h=%Oc2P;VSZla_!c zEwv)U(cWKik1|SW*SW;?20dxJ*!TH!^l6iNx_OV^sPuo;>bdy^;@iI4ai4NO{SW+D zAIdlRBBkS_A=q!MEA>mw!r5^F5H0p|>yqL3(`fm>VGVxCvjaR#|K~Mde%kz^LqjFt$w&+T12l#B-!*qJ+IIn9 zy0W*Tt&5+R@HsE>?9LAT`8joVGftH7-jy%!1@quX#qvC#S8@)Xcd6&sJTK4m&4elY zOuzFvHJ|NrKH1WaIO2I#Q!QT3&cqlw*3VywHnsv@Ch#l~j7QV>;s3VpmwKEEkG}2u zIsTKcBM(xC_G>{$1CxbsE$RDi5$Vgd8X!jccUzIw;Ca_nnZ5{px&SAjL-S?;Z@vv$ z-N2kfM^C?|#2fOukG`IWH`AZ*PIC@&-glqD*>UHYoiFk8+m5!aG&L{gZQ~xf>cJ0} z_VNkuTUSuui8e+l8krF)>{_>6(D`rsERJ9L-O#u5+rH&EBLmfE2Z?j!Nl6=uXgdUb zsoSCJ%HSMyKBsegod-H{2IrzTu6E}h>H&fCWrT06ciTCMxo*H?b)ySQRLs+S&e4zk zTo6^bEMZjQQg*0+v%+EGCh>77X@!G&cQG$h!t%F#HY~pWY;@6d&;ztLpJ#jkZBAEu zoa;((eQdhe^7~`aoN;}{!8WIgT|I*52xF9)cy4}yKEL(cnd4yPn<{_8gPWDVeKU9# z`tWBxtK!V58wP(AoIJpa%mUBvaeA-(8TKpk9CMm@wqGj6#pU|o_^*6c z*}%s2IR5O=j%Ssw^mM!d^8u1JxBWd)&aeD?qMScf=f5F5dQK$e=sceIY`cDz<&&`m^2>(mv0ZNqrnk?4z~IHkK7C(S7j$8_Gt!f;n90J*#ZGq%qWo zQckz`su25QX?Q=C4V*fn)8gL7G@rr!(mLr$qzBmBUFeUoo97>k!ACX5X6y1B-OQJywdU`|Ln(yb@#y$T<(inO@`_JKR3BOxTDtOzc z;q1P33OAD$})NA_IIwj-8?jGN*-^S$|NSi7!BRCJmmzjGWu z=Y}iB7I`byIPskl&L2ArACq-Rb$!utymvX)X*%Y3b2aOL59v5&rNdquP@fuxvs}b)S3oxkGaupm?Dh*;#n2XsPa1fxyv_Zq(iJwbyje$vgV4JF5|cx zO;hH5ir6`xmMUKv#x~M7k z7R&<9+y{K|0FTIT)lU_=UP_*<`7BJFJ84%3!T+?3v&2DnFXrOT3H`iD#qhn?r0QM} zCMl~%iDUjM*0m5Vw7-kv;}?j2qG71qlc62Kexm%igM9ta&}~gB=BpNX z>oJX|h*Oo6lK_Vf;0ot(-yMaWY;TXO89O9&E8xPwegvi}olPbF6SwMbxmxB=qV0^b zANyt&d)kRjKYy+EwZx|EzCPbz=i3)Vs2d z)Ijgn&zjWPK>9$R&DvDp`%=+VkMkAh(}4mp+-E%E4%YVX7ZjOFzbo^_g8y7*rugL9 z?8)~vR5^Wyd|u4+_Ra1rX&de<@PAX?xAJ|p6|6DVJ4hK}Ki;pq+wj%a8NL8(m$w_f za{*(u{jWhkX+g?5#25PH1U5S48TH3?DtBMJu5aX8m4`E)2fql{3Ry4ILH$>DsN~|Z zNWtf2kssswP{91cXsqtiO-+g~xNWVfM@hU|YC1ZuIv6cyE~GyJuZ9Az9>P21&jY&u zqw*}w>^chv^)QaJ){@uCk!R_9_`Of_{70g3xzd17(bAajl8saGHJW&afe1&MOzsD8 zX{w{ERygsAwXOYQyjL=pDs5oR=u7(lfG@30`+kr1R4{ccuuMZALJADBAGOzQV2R*}L_5=dik; zfvc*%AM|6-cN@7KJh#sgB2(&Q%xoFw7zRvxK`VCw7jKD18$3nA=TPl#aU3^ZsYM&J znO9NThK7U+(zQ*owEJ`J6LHTu)pPr?zH9EVBGZJb+Z=4t>r!Dwa&IaPHdpt-hcf|Q(Oe}WookhBcx0`tb<~va5_h$MP7%+)z#Pc;kzbj79dP_9(X_hM zDf!gGzNJB>6Sg9xo3;J>2k8^>$QtU>Q<%5>I`Uz;>sVt{94IusTMODoNV%{EeFggb zP}Zh=7{AwSbo#u7!pB+&@2Wmm5FaT6R?l(x3T`Tklw8GJ^JYhphq^I`uY`24T}0~o zWCf-Eze&C4HSqkiWfBepPm9Psak%Z>fcNI&-Ye>U={zeq+52yubx(=N+@BS8vc1R9 zj>++Y232`3^p%M7rB2c%<>Y)H=~i8zK9VLec5grPwlNPc^?RUAd$G2%jtOYgiM6Cp z*MdH!2N`oD>C+_I+YKo*+(jDjR3d%qi~bp}nJaT8Djjzkuhk^xcr}{hX;xMj&H6@@ zqD$dI2|Eu1b_N1=9-iD09eG^)Mmln(?9eoBn7C{GfoQp-2YyuA%A1?T-`6H zhITw_peUaSShDj5JPJJ64Vc;m*m@T*MSLBdrt%Nc78vdiSsb*Pbw7Vxg?rT&b6a)L zH^=HPbIi5S*O6zX7kn#u4AzDCzpFw2=#P9kYgRg*@-mMC&c-FT7jzEA`B;K?i@<~I z2Jc9JyEduEHbUNhJ@a$qea;0xm!kQ(^TEfR3w|yY-)Z3G&ci1qj-R{5;<;&dykZP# zNuMoz4~z5~){$-lzry_$r{GZ;^YV52372%`iRj`R(;cFw4g3hlP^j1W2GH^QOwpkC zHG!4|4AE%YAeZ$|4KBxe+O&`DCqKWe|HPoofh}V1*)-6;M}A|LX?A#1L^<=}l2;|4 zYWv$t{z8b6wEOvaCf|rXU#(=XrEB<|8``6JsK8WrET5gOWksHe{<_S!#(MNhPO)T; znn;lq!5JrQ-UV_W939@nivRp=0>5f@h$HIyf9va|fQvf%eAT4}nKN@N#e+GM-Xvk`a1(m8$#9IdKle`wFbt zTGr#~E6Tz1_lTQIzoz_ny+vQsypBI&%*Bu&cNl|fn6p9I{UDpAi3TU|g6*4Vr$EN7 zR&?g0oqV*9?VgMOMflI$w_b_!dE{xjaK0F_BF<;oCvmNz6g;ey7c)Y$Fa}#* zygNZIvN$f{Vq z-D8bv&(pCMH?Fa-XNMLNW=KDEKT6KZ>Y|@(dg!lym6Ed#X!)wsma8^)ldI+>%T?6> zt*O>>)%{AYBAp}eO1XgY!MIggZm6a_b#0J(5&r@uORc`;nxOq$ZQ8)#l*TSuYM#F= zPL^7bBul;fHD#%aYlF<|u^T)a^9-)TzBsi!6*c~O^3=Kut;lNdfAg zV%=8ZoTM-PTHqA@y#Bgc$w_}+t!(DqWvW~yQ$bFyTdm};!0MPx71lD9zfs94Ql`2A zc-sP*YNIVvO;j?~SX-tV7^H9O7`)T4F(y-OTrFj)&2uPI`7e+(V)SatRM$(H>Qek} zf()i)s==X$RG;&RWB&Hj$W(_ZQ{_mR>LdMLf=ty6era_KUr#AhtdOf@*fJ=}-&t}8NqD(`t-h&;>k)ucH;1kLFUoYFEC z>%>$mnQAEaXI-L9wYpKsS@AUuRyHa*D_*AhOJh78ipf-O<9f0@HK!+e$|>&J^Of?{ zU@1>c;u!l4uA#1qzC<2nE11XH;X_?zk!Q<9qzUb=13Zp1hf00BtEfBNPKP|jJmup| zN5@T&tM;TV%?m6QO)?Kyx+w2-iU$APCB@_$#m4-xgU$R-7wrqx`D<=UQ}Jqzg^D)M zJ{~P!2cDcTet4DU+wYI@?cO1xkve9cRkSU{^X#CTl2^Co0?wmLHo*P`7+>9>&cVkT zm3&HG!r~Z~T20|Eqa&LbAKC-`q0O?uRJn?5`s3<>{sba%&uRMe3ipTkT)=0MZztbr z4bU$;iXAq~iz%~;nev^9aboFEr@`ZA`4 zHYYR0e2kJGV(wI&`v%(AIS(Qgc%OJxL0%B|o!}40lQ+aC4Sc5)eB(rXmw`u2#b-R9 zBcBNVbFyqRrNe0&EpxrXH%vHdxUcltlDZ$eUy<_F{o;Nq`kkozO-FyJ=(h~tSD{bJ zr4!@&6;S;2YHk{W-WND zKt@{7Ojo)p5B7v@$DVh*62SAdITP_*S+E`RF-`Ss6=U}CeAhk6*NNH%Ki4@9AUpr*k8U|N!f1_a1?VG|Bwh)*|Ogxm204mx&0>5ms-huU9uqV-_LmO zNh$_w!>3vX#Pz%AE3E5V7tpo`9E2R+g+qnFp(|~fFNyXnXy{IR{;NUR9X!Af=0_q9 zoxMu=c{Jf1dapjZh;7T-IqQ0s6(4L+vf@77uPv+F^+4KnyS6;Iqn>i0(^rS@eyb8? z#jX0@pX!y&NV^Vg>yOndKJ1BxM0v3U<8HC#MesX^qqhAtL0+8qzbP*ctnXP~ys-ZN zmlxyZ#Yy$2mKQ&%|JRfkZ`OSBFIUFo#q}8L&l`G{7YEhH zhsqvF+rG3%mMVK>LatT74eck4(XY$s6+C99NIObxsp37z`{g#$Ci#77lO*3=uWgdF zM^2BoN8V}MBL_=b-X`V8@5kiFG|G?Jp{rIZy0CVoZI8?jF_yu$N6tQ_JyPWcmGyw* z?2*=4qH(CS^A8P8Sea;p^e5RMr_=s;N6h{>pS4`kVHmYwMYq1NfG+8g2c3T1o1*1QxSNxG}Gv--DoAJUpJ3j4=)JxLdc*P-QZ?x_FUG`h> zn?FRGTzmS@aP8^;_HaW?c773k(SIkfsAn7HM=QI}W#7skWM`rBPBRacjaQitap17V zDc0|)%~v#?vh=-Z?_T19tZ8a?w1JKlrb{@b-%ErwTv-F#vlo2GuZ-ZVUztJHHZ6jT zmw#LYpQ=~5;29wj@_N{L=i_r@a9e2+=0(1-gc+XK|`5k}jaXrM}*w^jvQC`cGc*cCy>unv9BQc-JjFs5a zxcnvb`AW`rlG>o)jOR1>#k=;K#t6?V)-kz%%nOb=a<6zk-zMpfW5jw9s~^MsU_9qn z%@IE4+o8`#yXhRU$4nVfzf$=RZJT2Sml`ThYB}by2>ZJb^2RpoEoBM%uypCfE{f^I z*0Zikm+z7~S4@YL7dKB%p}(|uYt;*75z%(BuL@(jAN_r*;h$&3TCCfe56s|VBd^7Z zmM&;>ipE?AX7__r1^snP;dK zsS+tI`7@26n{hES&EVwYr3+Yl#&C+@6wrnfGd9S*anvW$xvV%k=O>-J*_}k^vb$*6 z=z8rdL^^l#_+b3K-Ot3|D+}(9qk(NJP&Nw`-K-{#H<3HM%$Yty^)Drr*&|10Am zE@b|aW?~C}mQ2qV}e`l-T7w~s)OZr!`rZ{PCz=rK4ntRSO zx(-Ty%n!3Cy1RU(O?RIUI`lplysLcgrR>vRos+fwt*&_B%D6px>{RbN&+Pq&yWUrI z(##)N5t~jBhgsX9Hb=EF8E{m^oF9FwJ4$fo)a6*RwvC5&TtnfuHLbFfH7K4~spjuQ zJIw2l0vxA)#0&fm_=ov8zmKq<{v1Ug92ggKcG8#maGzKWG|$%=-y@}GJicGWkI$Jj zzP4}6@vT3D@r~8vi&h(3PRuZZC#s1nPK_(TY4C>h4JJ=XSVS9xKY00l=8zHXtQ#-=PjW{@P08wcEd2;ZfE@9~g5CSYC@ zF|RVfdPxxLBXxpPuT%55|7TY40nCrJaykcCkuO1)J{S)EAM<ZWc1UH@liey*wccxOMIglGbL0`s_#9c zVraU~A7||R;d8=!sXT*7|6ioe2l!v$a?5vj;a$-g9Yxp*AB{FJ<|tMBW$+r~qj{Em zbc8XXDqd;r)9M_VTuAvzgl@fz^cv%tc2B1tpULRQbyAJpAz?8yRNxxv6ZUw}@7d^= zHHMcwL%O`WOJ`n|Q~;rHWi zSb^FKG5Zh2?{2O-_uG)2-j_`8rC(z*e3^7Yxy6bjS#zI5A9Y5{JKseeG{clL*zcJv z=gzLA_nBm#+XvpWD@(t05<02D+_;Iq8K?e!D-*w65qFN>qeOd`>gng-Qr_Q0d$^Ou zz4m!klT`z*_dfD%8f_pR+G%>#<{z^`P4B`{F&LrgK@CKdpg6zx>?qVvMn$ zN7~rZb6I1Gjgfu*4(P@AP|)|qwl^ggbqKuPRZ3)Md*>{ zsnL@j=f0xsGvaxJt$NcX^k_bp4_)=r{9f;u=Vd*m^cd|IZ4l(AuF>gwqBT|yg#4LbNZoI z@mX>4+Z{{aPq2n<`nGD*Kaa8Ld0N{z&8E{;n@&GhkM_cN^4AZD_nn;_QGO$sp8eHZ ziamVqKd83wPh<-##M*y{3<+uLOZ`Ri1U>ZVOxVwhHUGnklRgX1Ch_$ zo(etxA)Y&9pVQ|M)#vZMO`qXv+x@cg`P$w4V`-(l5bb4^c2_J7dnQEl*D6h^to&AN z=}6?~6aFkR=bcc#1CpJqx_#pE7wIz!@Uisyw>*~dCY_^dzcoj{OV&1D)LCO}d$!O- zpV7K2^8ak{`I3xtM`4?&Gw7W5gV*W1r%lV=Zxg;FL#$;B){o-q>#!^O%{DD>l#H?O z3HfGdzfn#?XBp3-Z^~1C9&wg&;w|;_yTB>}P&Tx{u)1vt4PRk;nGm48mZ&5yDM72$~QK{RG8JE2?M*1v znN?B!Nq%$vSCkr)&Y5N7GT|#|?3qtIHP?%Nju}_H^RduxWiM0DOP8Pfr(rr1Dg59Y zknIOX)0@i{{8sid;U8`fEB@gaTlVI0p{Et+o*+MO%zU%wW%i|jRjFaAj-%HLslKH1 zW)s$ua-VV9Q%tGf=yNkyBkH%WzOB@4ea@S-$S7^$3=Y}yu{HCyEQGQSS1GS!{@3JJhuVF64Px@NHR=jx`^s;fK&>4M4 z@XRN+&3)IX)xOGa2avAb24AiCJ@94k?=dZVhx8TJQ@XIl?|eo3P4V(R^HV*{-XH%!uCmMWSAPy)xkA+` z5=inz?wZ3Yf^Kgkqx5u}6bM=lE zpR2X{pwsKB*uL|b>gvOdE!DNHt(&)0Z}-$5sc!N#xNDD>Y$*X(sr#7VE#0}r>3gcm z)#&v!m+bL9bqpPQnk)9?R_}4r|B{lDd&rTiO5tpBSM1zV{ZzfPv97_T(U%@n`iERw z>s*JNEe*cvI#+|s=c?HGMD6C>&0BXhxtg6A>jhVHqpLw>Bl@SR_cT@4dRiKNuDXvl z8Iq4WeX^vv8Pf>eZ^?Bx`kGt3?qja%CSNlY)_mCI3qMz1jvDQ@`NfEoAo1YdBs+|qZ zE@$2GYOjziLeHu~Lwb28QZI@wn=2|8*XAN>-?O>81Z#S%FvFwpjZ?kEcQF_aUXDot{SIl1z?)c@_IS z$|xiS@g4NE+8gVNoDB^QPphl0w573@{VMG~Per3o>r~uF&&Cc}K`$K<>mYgSJuMA& zlBcoZxYX3*t;dy1YIgZrnj0mbNAlIXB(Kj|dxVMqIbQdtT$u5*&q|x6ik(#@yLZ~R zOZGi`N_JOCrS^*LCB;ynMB|jSHemrUPVx}qcGP*qRqbnOYQQ}5rItqL!3G!2wARyj%+>6Zumf9;x*C06YOFl&tM@cYiX)M- zYduGs+#q*1N_DP-Er+F}o;sIw$kQw-`_#LEG4$ibmL}U1avj3L(AL!SUtR5Pbo;8S zv%Ri{LmQ=>oSY3(EmjdTd)@7;N8iVs4emOrhMw1i#9>8ZofwQ>wHUKJ z?r9fs7O zO41%ALlENDSzoVrHA;$kkZLilW=R{JtfZyEm80lm?*fa{q}H|_*1ts%I5qpD1HMEH5SZ&1~o>dkayAK`(Iuk)ddcB@DN|O^>g+6Ek5wd4* z-oSD~<9xEk+0f`bs*dwZ^|NIID{?l{rtmZ2z8G3 zqQ@Rn4C5pD=S9XB(I94cjC-Wf^E!KR~_s&Y_kNK=d)VvKJuQDdY`R6 z-=Gp`UO_lP0P;t7ZMC9;{Cwm{Uc~?(f-kK`+T)e7u?K4FX|*5vRLchGIasUP`}lKe zD2{$g`$-$2S`*z{ee8*lpkaLqhgKV7+lUicG4>zQlSUY&DY)+NK>6q-v~5~yYh7ON zp_T@u7UWyph-;s6iM>{}XAcsP<5>~|mU~ z1dBaAh@Pkawy0`{#_23pRLqc?$o{Uda4{5i%4>eZ2 zcrPB@Oit?c!|8MA-h{Llopk+BEV^=` zKPxP?c-nrlPoc3=@B<2&`VfXvG2c+|gs&rJO=2t}vMe9XuAqHm?iZpWq+l4toE06` zzm(>R5o{X7{DlZX#R=o7u;?%8KG<|#u(|-kjC`j#b|Ft)K=ng|aV~UkXJ&hgQ}oQF zR#>^Fja{L<_+ikBg(am)F;uAggZxB$OUGiNinoJ`%J;$=;w^>yMmsD~ztyQN*`ruG zr$XWGocv(-%=4Lt6(VROKN0fIL&VV8y^XdK5<69TbX(K}N61A4Lhe+pt|;w|qB#?E zUih&jYJnl(1JASYkEr@V9+!CjJshiKVnItYxA3{@Q8E-tGG!j}d-KpzCr;YTfx_6y z$ER`f!h-i$@dl=B*PD*7*8`#p67RJlo3wkksMTXOt*tJ9^IDnG~R zqexq(Qxi+y?_9M-lzX(szF88F27wjM;dHPDr};_)x*dD|Yd&J61Wh@rpdV|YSL}+j zbn580Q%s6bD3O@r8MRBwHHbSUPLW)ZH!aa%;pZ(PRSQkB!|#YhnZv?!FZp^`r9|;C zJYS-dvn+BpmNtY6xVD``=MSw|wtOpBJQsJiw=rnea@0vOu1g>l@p2g1GvU@x~Us z7{Zi5)5`b-?P}DHE04%Z2yrmzAcrT^?4E%wz31G`tAFKLQD~dm#GINcCAh^pg^iE% z#lJi<%DN%3OF2>kPc6^-LEx?N-5DbmLV55zAk545*m`6kc&Zt!QICRVbdeY+@I!6w z@onyco=TeV9XoYm3mh$C98Hh5&3J-044;s!XPvJLCC~CX;fn#Ahl(Q6}D)F)r>6& zzfq2Of(3SW=;>NGaS3~cY~;HIX(6C4+3UYXK7<_k+2JRf$KPF#I{LnG3#PvuYHIZi zr=wxkZi9AyLq>%pmsk&GG4j=He4FXm$mmra^UY2MLyqG5a?`g0tHDoCrpGd+ycD~; z^WEay&K*P@!Ou?K-9;JufIar%_Q~|!*-lRa?|$_e_66I{P7;srVyCBSKwH%DrKeK= zz)c0}7+CH5PEEGr>(jheySw8w7{wgmwIQ=FOsNC4A-=NO@z{OoPEX?Y1@;y8UDlo) zY3y?AWd_$X?h5KX!=L)Y_;dN$g31Aze#5;-4|@4e`jh)*`Z@OU&?A7%g3JP;185KZ zK85;_9}{@|?;yYFn?Ox{Om${Lu$bv*Ct3RXC2m0xbow24Un?=lksf~0{fa>_z}X$J zoWs9Gw2Qm06^fO>KySlwGxE@}fw1|eI|Rdn$N`35qmoPL=@sC)FD6Nq)^^V8~c z>}&N8WD`h@0Qmmm+@05`0=2sxor{Hg_<~wSV*j`ygs{7o%jetG*q!<__YJxX!Ty5J z$ZNNOJJ$Q_)projPs~5m?}E>7^X&8{m%(>8-FM_2IQ1LmF&Wr1EfnByJGdbY#GFX8 zks=r)tZ@xABb#z$zsvFSOet&3XoG(hk2~<#*@e>}0J9m7-COImD5n%3)m0idZ2sZ~ z=Z4Jc2ATCIUmm*`CL7U?v{%jZ?xwkc!1L5fSp$tTY@|^GS+R{jQr%_T73lC4P1#Tt zym@Q*Y1jz&SZSwAFuB%uH|r0dhC1PZ$leO+P!-an)5nfr;N3FmKzMr!uVDi*vqd>s z_#-OEzP04E6|9lJ{+520E{1?U#blaUpqrggKpld`c|QdAqMv;{KO9~#o?mDN1Kn4c z5c7!8-pVm3pWWU?8vQLIgJ!5ZPU?}vz_&sQXy3mZe+0Jz(68#yoE?yFp}N#>=r7<_ zU(^S%nD1OS0K@Yd#v>5-1G*W+OlJ_Fj()T^`VCZS$KNZdL^q_zKd{d|>>*G|oqYGb zG7biykLHHbc8_)F0ebjBcT)yLjveN%w{nz;KyxFdRc2ayMw0)YPl zS@}jhuvY%m3OSF3%Uz##y9f+PmarV+DG`$z+3#hfN0$%e5#gMkO~V9{xRz^!*W;`Ahc z$$U;MN-X$@+y2V4?QY}0LCwI)e+#TR;B|VuBs&e^XRDe+&57ar4Z*Yabxfx-Z%)~+ z1r;++N$6<)`fuA@VcDe{spl{=tyJJvEvF?%f&hSbeLI@$oY_h9Jma>LO!;x-V5gg{ z+_vPh)FX}ncS;VR-`hZ&AGDD{c&@_(=g)t3FGE-!f5?k)%yf4Eyn<@c*)37E~FP+J~0$CnnFa721c z_eCj)ox1>(dE3D_J>^wKwWMm-zfbKx690g)$OPLAxxMc(BR~b$ywq-NHMfun0u|7M zdviQ$w1)8#-o%KzOVIPqwfgs1&wt_8&*JEhs|J56AGz$6@4qMhj7TLFP*Kk$svvwKDcKWP{Ib6_H)mPA5t&2<1b)9=$E}Fm72m8Dx_Kb{dLY)8$lSDrlj{ndq}a0)jMG}`v2g^bmx=Wu zo@QOCL~LbUE*BjoApHAnz3eTFv~>7Iq!u{Z5nbt&zvgf6D{o|jW&vNg{m8dya^=(D zQGqk3t+K-|VZryS;QPwJV=pVmyoW+uZVF5wQ4&{Jk)hfJ+@y_^i^>BzRV;tO{5h6no{(&2IN z{D(?3vj|~tJ+9Ni4%FKV*j?kFf%!NuTR)#`u3jB2LZ0dXzI2gk22l@y!!iiZig#F! zIUf#??+5#OoZ+_d&5Xb|*fJ)#!&^=6O>dQq<{7s>`I~y^`)&xP5 z|74lBj`meu03ci0Z0r@n&*#fH0jtPY$efAyZmA9bhg4n~hT{C^A73&(BnAn3Y@ybV zp>kHK{>W^|scmPHIj7e1jVY$4^Rh7fr77u|1I@kG*?a?aPF|AZq;;S}Z_}Yr6Rd@3|{CXCdhY#^ZT5#iQ=U<)V zdRMXP_5u!=G03*u7Rsp|!#o0|=1kP#hZi|V9#{vp=edjFb*vrUp1lGLVOhx>g3A?U$vl-K!;-N0j`PY;CU+ZY`<6o;=Df{SlUV7a>z@0azj3-#_&1TL-#gKo z$W^ua6T)1$nk!v2OW(`i@ykED9hU;S%8RIbxN4e46_+QeL5{Om0%k+CTZqk_;pPz~ zVvAFM=Mw9+7?$bCw!LOxgdXZ9N4(oSvr+TCf#Sa>`$Tn+mFPB@iphBq)C7s1Hb16I zy}$W|Go@lLAZ4bh#y!RX(|rAMMmXC;P!&DPA4T@N877%DZ~6F;?%s6&T7N(?w{0Q= zIo8=WMc!PgMMZL7#oOs)PrsR+WNrAiHpoI6JTo>;fzD5LD&^p0{yanFY2mN93r?HL zc)kIG;R?bM$h({MNW6$Y6h-jpSaQ8T3YjLHA@Tmdfj%@4SKhlMj6a= zx8JTkGhP%)@D8)#dwCLy`FwXV&ntW!JUVxHZ=>oB9M-Q}52#Rbp?CS9zhklZ*hDYN zska3r*)c9$$9ms>uwBqAdL^~oces;27dPn_+wK-1&=%K?w@UV9Cr7JOag}dng3pJ{ zZ>OiGCzEvVEfIuA1)yEwp+iP0#`5?t68yh1KV~29Rk3b55opFdag$zDA8$*-9vPSb z+cH8q-so>t2B9$4Vr3;^u!G{os|NPMi@s8bVgO^EYjqL;aqKpJxn_i6DuFBF*d&ohg1Kpul=7)T4UW@4G5fZNgfWPiU zl7QSh;5nj(DO&Li{#6gJq}$9ihw0Oz~=R81=1i8&OFRk=XE#^*uDERQxCn< zUg-Qq6Q_TnznN-!F~T`(fuGz9TrQBaEh=f_J*sN9?1obp+lRo<|`e2U~5NAn;`nH3n znq9S`J{9k2Ac1T>KB&$c$Zgi__B8n~Y764UJ#XZce#`+zm$F1sM#Xjc#-n0TQ3w!e zI}&kuB>aVoQNrbs)-<%iFNp;J&6k%nfROpwHqBlsJV(F2eMM_Ac zrZBWNdnj^pwIev~+P!uhXQc8n!L~xPrQ*A7Mtf$bl&_kt+?%?=|KS=nZ~<^PE=2Dk zDvjH-J||OCgFhpQ?{UhiLO;+_rUQC`KN}Gy?nU>%KO4Y&n$@2wXGC7jaVzrv?P6&R z!O?yXtgndVO#b`ZBapkAWV8p->`gcWfs{DOS1F=JUTeuSQIBl00jE+}(;wdKcx^$xJA}!5iX%|Nf?R>Y zvnb}XfVnB*$iChp4C;4e&%{ems4rDRei|R7eZgCcc)sdMG;tqb z6w+=9@9?i>{~&{gjDO?hl>(lr)43)HBrip)LYFQc=XD|1xZ$0GE4=-fKB`IT#m;}( zNGF2K#n4%O9fVy^Kd5+CHxdj1XBE|4Zp?N+8c2bBfPr?;A z(Etr$s@}d$rNcz2e{0u%yJ}INt6VgidraFT;yBY)4G+iCJJ9Kb7Mp?abiV{*r_k-L zq1Hg5XBAqhd@T~Z$|Y!fGaLF}a&_$UF{0XI2$2!aES)wFX=!k!>Ud3sbzO3wT4x%+ znFw!HEdJYXT2jZzm%kqcTOByiG)mW@-^*k!nCYjWrxsIOwqEx%o;`xL?j^0JE2=5u zB%i35RIB^zL?!CM84S2@nQ+Jn3q6~EV<;YgmyC$?w4$e#?h*!o0yayH(g$a6sn7Nv z2>0?>UHDW*LxoX!V*ep2>=1t;AaEN|vC!e+kLjrP$4)sH!1HSz?u>?Izr+6IB-*p=CFE{Y7;57~|g-VJJ=dKAI7NJ8e$92aqDlX~QKFJ_9qo_T9{1nkI|%zPU(8W?u95>Y;}ftfDF^@qG; zl8S&PQn(}@d9lu6 z0c8>vYr4vk6OdQ`HFW)Nk{9Xs05?^#00RWJ6k1rbrk!QJFpo~jWu2XHk1ZRyDFBHl z&WKx}%DK;l)OXyMstFtzyVaJBl3VUPZ+oB*)5|zhAmzQ9FyHTyh1i@mg3I^4>rhF~ z#;@4NDugQPEA5TcC3?6jMjw+q>w>JxIZqf~|F++Ip@lP)FJga1lENX~$K+7);*az* z)O6krE_8of9@pXg_S`ybB>FGsOr2iEf}E-aOUuMl+d6?1iC%7nM>fNjTwc3lRC~zA zQeRhvlSx4A&4o|7WiL-%tEujMUnFU_3Rxv^3}6BUB-eXA$Lu{^pk0*PtN;DlbH%od zsduxv_oFz++rPV8W%oA?JjuYAHFgPwS{hwA2}5?&`O@nLw2V86Od!jj1fIKQihBeU z2TyTTPupbgT@m{kWpYCN4{Z%P`I&U087>^7pNuStP>Sp>)?yK_OL0!tvy-t%p5^fZ z?3F90F=Zb2qw^>1=Q(CXs*~PI3~Vk^>J!Ir%IweOv^%Hgbg`iU?n~dX`9{__l^)Tl`4#}_@S>y_i{WGZ;%^1+;L#ok?9$du8M9>#*mojoQ@bbjg~^@$8iE8d0yyK>Sjl705S;=+c#3 z4e}w~i&+OJi7m?rPdwNvUgQr!if*xd;Lmi>GUA_frC;CHQ{TVd^+Xy zpkGQgau3miTxgdV+io9;YUX3ymc3 z4X}Xzv#lNF`Z4*oi{Nvds;PDdAitG?ovgoU zbtAoh0er}?D*$+>)!(-se)QO0!p$vhhRQCY=2?Pm z@e2b8XQ17UPpjrv8o-~PSSaK5!LvJpZfe2 z-bHEYu92xX-~>Q3m28`PU#q(ktY?ktSH{=!Hf9m(l&A(LuDZ6y*5Qx=k+QN_<@!To z=%wh~!EH4s=+cWG+e0>3;IP(YtX7ITWAPtcup!zkT{dSTpkjO6HvA?#`iBRitR{y7 z#F<4iav850zp)t6Q{5$QLDyR2f=3`>)&z-6$*_;L?47`6;+SYZs;1Nc16%fADkCj%|p1m;#TiH3h<~}19u6gSz0dhs3VBcWzlE0 z?+i^xb4;u%-bxiGUO4@Cd`^y3$njGUxf69XG;!4praa_|j)_Y-7r5K%ClpRNe1bs-pXLd1uk@iyGEEIq{E})sq5or??VR*@tU0 zVWHw>Fl!+&D+5q>F#f`;Ce<)+hmiq!zUOcEm`-q)@Gu*!vn%H-GK-O(9F}_f-$(%$ zhG1RO0nVLzCIX<-&va?DR&(MjjRqfsW8kz_K?}~)}KUJCDa^g{3CWE%v2X@-hhv+igHL0+h;?gy|ge0tJVq(b}`MI&^h9C6xi%;(O zfZ+y=fhVwc|C{LNkw|5hz~Mn4dCzkjI# z3Ic&KFKq+u23gjmC_ zNFq?fYHW&px$eQl=UIRiR}Cm5-wjV|aAg1OD!C}epT>neM^5~*D!FhDqxoK`a!rJ~ zw=H8@qjHWZ&S?Rtg&(@NGX_lQ^_A0wHyOM$&Vs8mW(#TMiWS*lKvX+RvT^Iz19Hc6 z>B2((BZ$_H#0h%iYB{VAgn{wuNaJcTEN|+t33I}yrN)LLp4Ox+?UCzpL%p$=)epG* zjHzmU3+fP8XTZ)t7=pa*0z#ajGM(D|cV8e5Xp0o97J}5LGFm6aUE-MmpB+rd)B>AIOU66Pk39J7(1bdn5PECek{76g0VM3Wvk z^fVJ2X8g3K^idkGITM@Mg9Gzp{!RaIBS*v;+i>@osfuaI=|I(Uy(vlQM%5SraO68X z_V`$OMqTM5#*oBUJ*Lj2AX=-C!J$=0N-t^kInqNr>0eyS;EwcZXsQh!|N6-H#|nSA z7M0MhW69Z~E)wtd`tI$PfCA>S$_dO&(AoJcO=hem7X=9+L=zv>lvEuRTLUD%)CS*)>k z1Cz8Og*)d3vO(Q6Xkipg##flB)~R%2xlA!yobP(RIVMReJ~J> z4M`##-E^sg>WQ=sG<2AENuCGi&xvTI*pfU!8a{ zpH9(OY46&}S82m_Jp#6pU!CwW$1M0g?W|cv?`sdxAJ*nLuV+mQFJ8+{S5G|>c9%V% zwm$H;-LRQ>0t7_y%FSm3pIZdS(&4;Zes^~Jf3P*Ci$%^m+%e+0MQD3Wv3`+Yua1Sb zH1r+lVNLG2I;;-kD7wSTv|0bA<6^z*r?qnW&DXoeK#0Y9u(NsmWX97QjB|^&eHZk0 zjFz!iUj2EQczLPH+nOBBGro~)&`-`Gis=y{l(SYk%qhRlnlXA~VB?PS58>!R>dlHK z_t?b~ySH=N$|wmTgMv7zhSGn+MaC-78Da$Xz$AiZtXeP85X~i3i9irJ@Hx*S_nFFP zLv+t-NK`|xPgK(g1Av`Rj=CqD5#kzqfXnJa8PoNl$IX`x>$}G3C!WM@p}2qi4}6`; zZe-IB2TkEKi33NCP%3$cAy69AXItmV%)qo8jW>}SwGaA8%_MpGSU>~wou7J=6vRi^a=wVivT!xgX;~i$;S=Hc# z>B5?I<*y2U1J*D)662U6Vz?q#Bc~!G9cfc^MR3@wGb>gWN2cuPjfQyS3>G5BPaO^TjYku zSX^UWDBIllf-O9cz6;;JEQPA@t|1z+zQDkeb;EpkUrpy<(kr%%c`HxtZ`*ZexBtv} zybT|pdJd+XejKKKEHS>cGrr(Qw%3NW`%)eQ?54iRmp(|Ict0E~aL&IxoO`R}Zvwx| zPECtF=*wS$QD@6%)yiM)sE(Mz%k?qNpEvk&n{VIDvr7Ww7`D1fOjY&x)URRhPFmKrzPkN# z6<5rRkJ%Om><$R;&lvA~3TL1%d2D4+xvHUNPavIQ-S3lv8VU+MN!l%rH{fSKIU^1Z zn3H$Qp5D{ECju!-H;>DWSse$_+XrH(mzE|Rq1c)ZG3^=MHgblGGAubPomg4IWjZV@ zuj(eDYSaMy0k|CZ^DJKSN&6)=d>J{op=VBb9GeSN*51!V!YI?N!&Q zDfD+xyoi96?{_w(W$m5i+t?6paJ|IQN{;1chPD^6iPsf(;q4C%qWY9y{eA4{1pdFe zick!9!=vw(b+muE6MUsDQKRft>VV1f*LKD$EZ@J!>W9({6x9nayDPijAo5b6 zzfgdHfFOY^38EDHv3oRTfPsLNK!Jb|f8Ofb8N1Qz>YCe_JL&4u*|}>c)W>Wvz=pW; zf%(IBgIYjnb8apd_rZa^gM(Yov)VTlNje~(oPA8nIzE^Bhso){ z(awzJ$-)*pUw4s)&yNZ#%@B6u#_CV=J}Vs@xLpb{^vH>wG!`B7eP>Y4KHu^$ausYN zJt2N+?Yvvg>{De2#-_JS@vu8OosTH1a9$>lm`x~jl17^UY4$Q`-qdh+_pXHO1t|%! z@#~APM%7f&ec%o)okH~av`Vc)8-#z{5W6S5s8Z`6xP;}vH*N7as@hh_A%Qj*%mCE5 z3tIFQ#eBqqfAw~d)S%ykK1J$NxdZ=#lm0E%sHp*Xmo@EG7gFUb(z_wEy?3#%^9vyj zH($Hmv=N5fR7z0`w^nFH<)6yo%qxf?77mzFYrb$L@f3yWpFA|ZlO-UH??{T22${(s zudvK&NS4}e5&dNb1uWYGsU<-t;E`o>RnSg%i#zF5JqQ=01`*{kt&EZy+tvZq3-fjdX0ekq}2w zhi|%ALvi1}QEo;_s1wVqu?^~u%MptH=~bzGTsLyaF7?OUfNh-MX>y@T2t1})+13{KtxaU>XDhMgl(S2SAa<=U4&R1FNo`$5JdlOrm8)GENj?)JF>Dd~kmbd; zNmHv9neuaLFUe0n{(9eIL`(cpa!S37{1^9(`O|X42cgw+-Fm`n5^Su#e3w$=ApoYN z+wo;j2;|y>UKjon5!oZqY?}Lo<>sE-T%$Srw+{Y0{ONJg^^3bmZn{|`_MG&e(2^T* z(HdOp<}d$nk}N~nIJFK8_0nfB40Uy#_X+%MJQ$1s@zqOp{|*u3hSVCncd9hI?ls)u zUkf7BF}umQZU>QXksUVrcDMapQB%bVCfCsQ4Yd=IlQK#wdR&V41$r#z@MfY8hbqQI z(F4U;Up|AoSV^Y@6o&~W&JZQ!BlQmMfA+;i_TC+3A8Fe66T6F5CUN*O{Nkv^dUHj^ zG@sLii~?H{8Y2!Yr8;f+-VE6|he$6BOFup96UOEj3)|JzQpt2En9V;|TTr{b(S-s--JA+#Q8zK&dl+H4nM#!V!8$=r9oW zTi%D`Bd?g{7l6jG0~PYv-SFC8%tv#h@vvjH|fAJZOds^;#AQ z7Et*RN#RFq^~|8V8lwOwr_k_r(W);@eT)!9tvLJ!#dg6`Q}S8%@U@3xg{6cZCKl6! zAuIP>@p1a;>19vNR^>)T^TDE?#LZvre}<@IfALJu&ju_b(A2L3Bfd{@hroES*L8#$ z-S34v*u;d7_1me+lp~_}zk<3lmAThvFgsC>Gwm-!pfdq6VEA3;Q;+wAUa$~f)`#x5 zTjHEYwmCBmZpJql3v?;Lv+QdlaQMywsDz2hw$ytQ6ZJtB83;|&iDJU%Dt<)>23wmuOt86d_}ov&wLE(6cN8H_@sricC>$T0GtiFW8yE<&KV_Ae#t8 z3Q}s0GfE(lJ*2^z?nbTa_ch=N4ceja`ru9%i9#cI-)1b1EuuU2Hbkd3kXFTerKRn- zunFN$3%31?eR;r1K5_ZiwDwAxFhlmKBsn}*DM&+>N(#vsX%@=(eYA#x@2x7=u;zv| z7UngPG3UKBOo$Grr&kQ{mbMvjdcbz~Dl5CK*u|m@h_6_mOV&VP<12Ex-uh?+Kfe~^ zYl0ef=WaW&-TFYo&>*vUt}ba3=rt^afUj%EZU)yw!47L&Od;f%RrO^3hz_8XU`A7< z#+C%+2S0yFsRimA?mtOHEjONW0jP>{;AGEhkYiS)HNr9da-23(xmzy z-U~T4N$o!f=puE#tKu2?Y(K`i)-E`QlqA7M`Zk2>( zr%@X2X2Yc zrd-oAzPrwSx!KY41-Za(FV|weC(d^k0JywlVll@0s*kPs+|T+SE`x}?-d=QjQWS;x zss!U-FY`03Yd0esNSngo|J=HjOtJm8dBDnf3AV%W~R1&p2R5j&zqQr7B(9Gk|?{bkJ6)&W`i5; z@DNUxom@cTm-@RYq^Rtj+ePG}y#4Q1t9W@9^V%sVwb+V>KUOzFs)<$mbHBU`oXCh#J{ZFKx0_d z(nn!NMkjp6#DT-_wFE8??)A7M4~!cOQ!5iq?|(DEKnJ5U2_K1ws%IahGe*?2j-vd6 zodaX7!;Cvz(KG`c9{yNm!79IhAoGzdsOYtek24c&%?+xGhqkg}H3Fq>cA3T29~jTG z?xO0B0n4D(gtmcAlOxSSkh-3$Z~LE>Cv4|BCD25TEd(LI$tnJ_;gpF|@X8p_K4jL^ z(glE@J-faKow^Je%f>`mnaLy&A0Qa7l*D=2ec{))$F{fEa@eSf2RN3LQaeorcg@)7 zsFk;B_vh3#*0quCOwM|uT@}ue1JQxIsiYZkE$UyB_(+fcSjk)+a}`J9*c?Y%)0eFS z-_=&6BE3ACo_g%%;UWQR=#(Iq6!R0e-U$i4ajS2Ow{IvZ5HLq?T_anj#oMZiA9{ay zFFA}kjNAHbc1#MLLahIkS8$Hb1@KjHL&7I~iyc_@pVFBTOiRQ(Fqes>10i%Ma3Bxh zYTYLTa}a&NT!%GOyhFpdF2so^my0s(Y3QfOJbrz3&%mNDK|*dN4uB>M6Qf&V$I zAYLvD!*S?&{rul8=9m>jjbpz83HggW+eNv|b`k$8W&$*ur!H59eM3cxGpJq99nM3m5am*8WA2hDt)3p+MFrtA5e^ofe(9Gqg)I?#6*A18i;s z%+|??IB)SN-Xz#xz$1NeLCPeRruM6g5!o7vwzkHCc$q|`AGgK5R%r+@v1AA*8&cRL z`)@C9zHD=;UUv35LAB7)(VsIjXrMdK#1WjQe=Rs?QxAL~r=^)_Q$Gtz=2cyU-G=fA z@pHAAw+$??U+9GnY$zb+E>;N@O7Ki+HHI@2&F@;wm%9@&$q^>&F`+Mk7##rM$*@O; z5uNAEoY=&B>C>i7VK_KYHH1UOD-3xUBFNOMIz?GQ=fhT)ZgL=5B&Af6h3(YJh-M1d zVI;n(ji4PPm5@c8c!J%i=nYgp6!5UuibZ6gQ8*1Za8>N6by&WpJP-@PmB|0~Q|Q;~ z_z{iLS`Pt>WH5n=jc_z2=;3;`_n{iY?Wk^?Il|O2wN6Eb+QyE{EL0HrFE5PG!6U{c zXQ}u?r1IA(DTTs$${UZ8D_T5x4IZ>l_#%8pCP{aPa=x(dcY`=RX?JqKq6tG7QyTnz! zixfJvLluW7Q4icSV^92+iA9}{A<(MShzXpBVdDmGx1$ZnzlB3Tls7Km$yN*xDa+8w zb~hefSj@?~zpe^UHO^WquOar``&a|=yPkylRxF#dF{w)W9V|jc~j?j4YByZQnp{mPI2D4_sFHAVyxaY8vVuzgz5^0_3{Or zW;Y&$CJ_~%+j?8607LL559M}RUiMgyQcc=#a+V^-eDvm=={1F)IkMRCq z)vIr9_n)d)TV4uUh!Jx01O<=aQ8-xd_X7FhL)Z_;wc2~nQ1I?)RH6GL^iLX)o%V

`#tx3=wl@DAxq@+ogMNN|QL0m!W0+H5SX5w`JB)~> zm!hSbnVOA^g@~xUD8Dd1GCejwBttVpNkuuM2oiWSvv2_ZUz4UC2RLjp>vi|iFtH%h31>s^x|>zY`nu+#SQ$($CN9WlNo&= zHCNz`{|Xi;uEl{|`6pGV>k_^JH8a0AgMR1V_3MOiOHZa)f_QB;mOS=s?`&6a%gv3R zMr?FUAMLb~=D1*9KgLKtucLzhHg~c8XP#~c%$3ebvJUq&R9s7>y>!Y=ke$oj(G*pX z`Z62a8@2=A+v6JoF}G~XDMB-p=}iy{O934-%0?xYT(FPmLj%0N@s&xh$k$Nq~e``3gN@ zw0VEC^^B4}A_!_(hp8ttz8Aogb&GDVPv@r_yOYQFNx<OzWyy#P%hMnjJ7}tMI~Tg>4^;AD`ZunER_$Ve zh?+9Wkm8WDD1Eip>!_WJoz%s$o_~lyZwN&z5&Eo%j0B53`q;w^!M@6jF#B1?Ajokv ztyW zYzas9wrv~2rrYY-e&Xg(l3rlQi4yzw;7k_D0`*4L3`O0!7SM$G$#U#A?@~oVi$P?E)v}~%X2H#fW($8|r>z8hXI{`7YD zG0rrYfEF`~^yC&*2!FE+ow3ez{$f|RfZZzDZEh8YK1{jO2DG}>FnZts406rACDJR} z<+j%TeX~(q^9B0lYhH4iDqqBzUJ)ZC9N(A-npRO2y}T?<1LpZ_8qD+Q zQf9`I_LJLCDJ!EbH*qaj??!gXYS6VxK4kB2V@ifG6S}mQtgv|+jd}sAF`aunWJ|J% z3?vR>(S<35AT3pMt z$sOn_z@z%bWvq&p<Fs0JTpbB&VM85*gBO>7>!87z~x z2jX;7mqZMbCYtdXDkPU}sC}XhX!$IcQ^4P$Mfc0&`Hy>QE_Jb@BeiG_V z3ht?Wl>hF`g^}k~DT6RYs+ju?ep{r%n_i;nM9eCv`n-g;u`=1J!yfwIN^3zjsw`!L z@-^IEp+2z>$oTF~d$ij%C5jFv6#?F9K@N!hA*1%;KaVIXt~%5N@`(T!t`QlF*r-?z`NXAgbgs|<=@pnw*(9+%RyLc4n*vI< z>PNOaD>v9p1m%d);{zUj*D2l&-c$C!A=b2{rK57~Gg2gb9b7|374;X+E+=w;%-VGp z#(JY$5C@q7EP9MQ`M|x) z>b3#XhhV@M+~kpr(i8g^7M(NG!wd8ULIdM{ipbYa>X~&0d5m^t^?$fJ$1cIbWLcMO z+qPX@wr#7+wr$(CZQHhO+qY-kFLUNM?7h~T8IkdX@;yBY#bXNkh^a-~gYlWTatfn8 zJ=79jHJc!60I6lMUuIhgNY4U?m0*urIgML$Aj*L(NFOH`W8gTH#&s^Hr)@StIU7Ax z^Wb&SCXzMuL54;sTvgTF7&(5mqt`S6W4UmFDkz4Gq}k|WyJH|IEz=Z#WlrINF0D8r zzpn?J1rXhtZpl#zCGl|;t3gZtQi$~`lF9^_K^@=QrT0AZgUD0{GGkbasF3h!=EBE6 zmn4BZ0b%~~47iASYQ*oy(^>2ZfPuyHL8fTAuLN-IJ7|whAHHo((h#mGwnn>>6DqT8 zeUzO5T+|#gCz{xFc?f(hTPru!~R5l?6d& zgf}4idZ;T6=pSC>*UJVFrWp`&X<(*l<3XU}WJb=%RHy^OSQjxtRS49G`M95c?~m4#e8f9}W{lxX?cvy;rR{FId;a}r zL%vk`PwiFEA?15uQ@zFU0zX4!?=Koy?!hrdF-*V5?o>Cmrgjhg+nsx zZB>1hG|J8`IaE$}w#x`bTaO?Yr~@uv&(A?tXvab@Q?u=i-N0<!8qmNNe zYd6@F#owig@jWW?;am?rUe4D*O9AwHr|K|~LkNu$sRXP(kxS}g$kB|Q1|OrWb!{tU zSUM{+<9q@ZV(TrIz$9r;0+eZHd%5{JjglXF5D4BYLIR^ z8Lk>Y<|91Q^P-=PAFbzch7O0o6pi@I&F0dC5oXQYpf=%<`T0jKw>SjxsE-H+d}b3n zX;ZS+)PX&8q(vWlHnt0t*CNke^TnRun;wr!SN2k!QvkW;JgEQ&>xEmgoVNnCc&(qS zGqqxDo;$hzh|~W1OP-^{d%S{d2SX2WGzFD&lb;!9K~~o~3eu z(!7Fd2X)|^7$AqUO1x*+Lb=bIZ3^kqydj@CW=dK;a8_RSLhD}G_$bm#a*8HP15ID{ zvpOQ}Xv%M>*J;b-fbkp+mGvwMbPfT*oBLa~k*JO2AB8F|{XMx@kuK;|GG2z4W30I6 z@;TY)XZ*XPNbFG}_z~)GmeVH8UiOb+b-A?1hPsq4;E1s}P{_5Ho;XGthwqgmd7!!6 z-um+?sQEWE?2)ecUAM35h6!6v0OYcOh{Um};>TwlL+FTN2@MX^w`V(4`m)y){MWCr z(QL8#w`Y6v9%BHkJ=etncOyNJ zPoX>m%yy^CYYjUg>MLY4u)RY<=GsdDq90SC!$NI{*+}=B4F4(&YtAwMgt?;39C7YURz391&vnz| zg7OOK4Da*I=kcxZrVG^+rFgAD-X4L3HigraA-Xa5R0s86d~P z(O<=cz#C1sh6rIuZ+tF?8TtNs5g1&@s>+uY(d!|Wfq{O7)Iv#8Hf}Db?rI)(s&Hiw zM`x0n;_lmDH->;U4P+;ayM*|h0Yu{rlLBNhfX^@>6z;C&tQ-=xN^!fdo;yEt_U)Z4 zowHvXX`c1R0&ChKgW7`j7?{~Mth#A8{VI z70~4~P>47+(<$Tslnl2tSS9 z)+nWtJG9!A3;5m;w6b^kK%o~oy*gLA`cI>j!|VjKln*pYOFK1lC-2h2`j=LE&Nq*> zzWNd`#h?=y-;LtnZIjb<=LgHyxl229uG2|1rw1tKC1XAFra7L1>;yx*;U0J*Hwu%7 zygYSR6QC{CAF0cmKAPP1t?8c9dO2i|?yMF%jM`ywnCWKrGHtBZcb!ad;eP$rO%ZU% zQCM`1foss(W#de;i$j@d+A_!amx1lzNp(ES(tTn=vIHX#}!CSL0u}*Mb!wImM zXQ$qUN!;0z-?nI%*6&p;6eii3PBMbLBZR%Pp*m}UP-`$g2^y;qxq%IDgTGD~fy&U6 z8Ebz;pch<7C`GxbbgPD%s(pd5UI4=H0xHEzuLD~c7Fwxeof5O*m)n~@Y5H<*pnBpZ z{$&=y*sP<}k~gpr(7}<*1WAY&V@Df~#%M zbuz~WO!M6aZl|YDk1Uv4njOI#IT*YVuzEEd=oueSt5{6T5!1BfjcPz@#x~iy!}1zb zD|Zi|N?-Sw05dW^7B_7vRHhiW10aZGZiNIgAiW$XmI!kgNCf4@Kx z)m7$N5#ZV3@>w$oNTW#HP5R3VURn)awuK2=Kg~>Td#?h%oe$v;JjVeTyW&H!Kr~>R z4-|0D$!s008g*h(~q~wniq7j(WCsPUhDC zwnc~ktc$#svDoeLyQgXlOabvO%?+cT5O%UBb}{TdtaKXmR;Os3yZmSI9EE>cJDj&^ zW64K;UXAX*DH{G+0^Rh4V;&jC_3AV$ks=R(?kiU|n-3m~iRTugczhmTe`R~N(b=0w zY|ku|*i{^L7HsSv2+BNOY8@Z8eY@XIhLs+!jw^5VdOSW(h9__HPW~z5U8Z818}l!` zI*qecd$q?AE4Vs|-rXg%=j=&e@6YT5Fod2p@6@1p8?Bs>fgAZmlHK& z5jlk&=bt*SNBe`Oqdjb&E+XhMp5Jvy%^jgIx3wrcUH!Y45iSgSzniJF*aye7lrAut zD)!@cRT0<=O+{@sl+>5leJ>@npA9`Zi8bvPwh<})gqIDMn-31V+`@`&^@y9OSZpB~ z(m6e3^ty6mxw$<)_Z~C)Uv~%7t@OV?Azyb-%ax_O-R}3Pt4GJwY+}3Kk3gnj-_Kb* z-#7S2e0pB5(^>MO6f3abb(Vm>A;yrxM@pi)N*4rn%Md{wxH`#d#}Uz$Jv->DN!skU z=30&Wak$p4CacLZ+l)^uFp?@ZlRanesrvGfiO4Gki!VjJ+w-!&^G%i0<0_k0!+hwI zHDCP31{!+d1CCY$?TMiPboh?-= z*~7~>SMZbGdaBvwOV3R@HxVssE|WbPHkqu|N4pj4E_%<$4dA90seKr&9qoFG3#!c) zF1&@?eCCzho-=RO-yeKE3aqL!u!RP{+qx;N$#zc8&+R+cr$-qtek#35daDgDS&S*a z*6%%K6VK!08M^l_tJd{!7XWaES&I?z_Uqc?3bQ#`2rtyN;v$y4QD4~ZAXx%ix3>5? zG;-f^K#|cRDPJTcycXD@A`Foq2EZh0BGdzQ zp)~t4#!TLd6I*M2T6#{wVyZs1F5aCgieg4K33YXa1G(QS% z)nUt9-{usgEu^6iQyP_lPGfPhu)Tt@Yj_q#cmSM{} zx031O-vt%=Q&f;X2aOezUTs-!O&yKLZlmksD0MO!{7%9`pH_pXAi0g13$_TqG}tDJ z9dr}WmBw=|3yK{V1?A-gEu)kIhw>o*Y%nL3xvY-uin22vMaS-48{1UtG-pHCM*KO{VF};m*N<2XW zGR|VEA9w0ENF3&2_Ni$Qdd5_R0DBCPI1IT8q|a16NC>T&U_^s4@yqX&mO5DuL)CJb zTJV-ZhHReS9K!rm%=PB!h<5cYv4fUazn}Y2^2FdKE=j2n4Xb((Gc}^a4F!gPEUrJl z!O zQeEq5CE=3VVkrDBw8(I0|1HRJ0m8T@+-TTlu4yl#;tEk3_?00#3d+MIi$H6)(<^^B zbUZ-Pt^+4-14E&Z5YhK4p9u&FgJLCExcmbuU8xC<0W_evoa1~%WnP^bh{$(8ch~Rb zPKOn$LpX?ihLVrA!wbd*jwn-PCCpMBiolqsBaDY2!-&|t$A;6I%W8W0Vjjk)7u9`_opA1<8a(T;$apjSl6i`P)HKOCNeT}ZPqw)Q6(N~7LK z1c3}W?S>B&rbiQa7tO?4LGdHr#iUeM{PPPfqC|+6=U*)$D^mWJDW0lqqc+%mXi$vA z-x>uFyiH+7fR!V_cWM(Gs;gPMOe07a)J8HlmR3Y!Mm<``^5PAG$TV3X(mGQw{*j@0 zzhw9}%MKrRjjEk(lxxkl`0a+j>_F zAlwOw5%WElL+lU~xwLrqzes7dmc-=6cd{ zcu{cR({YUlet8MEOV-sa?3$W$fVYsYwuSCx~(j zj`B%eWgEgQ821Rgao4jXtSV)U_52}kI{}=FlHdqOrZUG|Q+JrOpexT5ISknQY^$#I z6r|7f*qokiG?c9%t8Axpx}tm-1W4ktW|~-u zr_KzU3sB*E9UJvJL0Hbi(v%ev&6HScCh8{OFL z*&Xr*X-2I9*L^3=bz>Xf>I$r%0P-c;JVS3A_Ovp8G~H#^8#esCs*?goSuy1P8dqR7 z+@k4X+cXM(9FhhX%dH$Qqb37ok|(0FBA;~@=viOo!A(V&A`|e$z5}%OD<%`46;auJ zMhUZmaJiH47m&^PXnD>y^5E68Urwi9PaSy&C)!45Uv!d8J1CadDtZjw902l0Nrtk7_(YQHX#M4D}W)K zPHrqME(Wf{llmN_$uyO2*ZnHvz-%W%5y%lZ_v)*4iwbQb$QaK48a~51`pz;tH-#0 zA^tYFaAb%5xBfV&;h10e74YI}SF42sQTW7#ym4(Vb_{qNv&+nCirM*vAf5=5=N{Jo zv$((te(n@QN1Gy`=_zn3_LhB0Zj%L-WZu8c>x=#f+Tlg~ms~TGcE`~nL>Vk8O$W9V z55wZKLyZ{&XTquXvBaP-=obdkGxqt~n`z(Q29I50e%rME=mwAySZS zcX#*L-&)dwo9Dw=%;|#xj#|rZ4doiXH>*8~jk@+q&F>LhL*vkU1o6rzfyE7Dn#tHm z8>OqWO)>O;LwuYXOVb!hk4fR-Ii(5jFn89d+ovy($iV??nh9Y*0pgin+-RfKQ6&7L zGO8DZi8a?9Ydbs2?nj#Y7NM)x;8Kkr_T4bQp-0Q%+vSP}WgXZ}gWYAnv?S!+nDthZ zo$}aE6>z)MKVyg_XCGh=BWrQDHnmqT_s7Qq3plF}%V`!-SO#-gqN2aIxHQL8+M=qh z-+xghmmw}%XcL%}P*Bn|Cp$Y2RwEosjGuFIKuKx5|2=4~Zz1SyYOn^SHs(Gy1e`@I*NPz*idJ>?kM zy<8AzpYjPhk6U!CeF(CMy+j??S8`C}OfZYSH@i4CJ27mc*YYDINVjBaH(fh4(2XX> z1RqDjf`kcPrm{j-8oIE_jF1oqsx9UXF;inh<6OvIidPmc5ow9}#*|DF7DBiP5;apa zBOlS5X$ALA_&YFUgT_kQ@qp@7yUIWP!3>`z4hAbR;%qz+BUxMMkM6t<^PiOQbhk`H zz!`RFMIDlTXy9eyJDT+_#GqiMINDaeL>F9t_)5`u7MSPB=kYwu4UVfAHcho(GJ zv~FBn?lr*>eqgc?UcyXo1NIQ&zZV=ZBh2D&F-ho2cR+`K_XZma6*zv*gBdEdY-;wY z?L$nmhYMtxI;*~$GQH_obeMzAg8y6XMLC%YZTAEIo?nH@l$Ci+ioU+r4 zz0^tI@y?%>R8zA(IKJ+cnmG|jRF|-qe8#w)-WoC#XjrNKSmRW20Z?7bBfGoAdVd&P z66*Ek{#He+ztwZH1sjwk_tV>32Yr}_F8qus&Bf7W@vszOcR}|SKv6DU*Yuh|zu`8^ zmId{PzEy>sS`$fF(Jcmz8X9W2fyWWZo^K_^apS!D8=)V@;$BT?niR?pp&OjA2(0V} z)?XUv)2@Ah2m;;{;>QtUfTKymxIPeZ{?&Ehswv;n=8H$-Gt6>LYf4>~;DE}KfU8B9 zYLP1T;nZM-X^Cj8iojZs=^u=D)p^GDnB>t&4~0TxZZoA_3(Bc0$xe>qaHg7MJU2Wsq-OMdGBEy> zM=6vt`wyy1_#ekvZ_}J1o;_hwO~!-|?>P0IL-jbQv#$iCUgWG;6Ftv4WX{!k9YL>QP z8g2Tp(^PF90Zme%1sQTtx&8>S8?}MuW=vgu4O9H(<*Y={0-db1#$R7y4B4NCQBJZU za$hk|d5vD7EgChdgQ@hXDd#q%ebE4^LJ<6o*6TB2(BCh+gw>dCNCTYnTVDtG^fiOk z_kQm$S-dmqCP*dtuZ#!Ji}9NZFddN8Dz)Q^AY`>E-+XHg<){rFI^uuxu73hPDcDHH zo$Z$u+THz7J5aO%>?j*|+a>gIX=ivju zjXz+pQ6OstpFkEUt7!xk61*hxR5aNjoAX z`SQRO55rtCE+Ue(O#7JW>)gYw<5PdS`I{@|Dyk@se!w)jZnt}M7mq1vho>O z=07zM$>|B={IH>vtt&;0FV99ljD&t*^ypn_o8fty#qi>6e8Y}*vG_CGL%Xeh#0d9> z^^LrC@rXZn!R1VH&wPist4XV$rn~Nx{t8GWE}RL%u6h>$p@qoicq{7Gt0c~dW=%BP zJNtgd?WV@R9iM4Tj;+0;N2cww5l2tK)nSc%>eK@Zm*<`;*{lf0%fY$Smf87z@z2;-IVF|Ej@ccx9NGtL zQXYCplblw;N&BAa>Rrrhg8l z%(4wAmQ#E5Tu|($0pbYcnC@f3IOIXEK*KI;c**RK0xT0^8p7dcz&{|>hY(fpNpy@c zI>w^cdf^_~$Q2+6EpsS6lL{V75#KEu0Z-m>4sO4=*7cKY9a*sRVx+fr`mdc3C7ncoH&a{^X!;4i(mE+6#)Z||1+)YP63pSLJ z!*MXzAZ9JAdXqy}&FZNF|0tboFnasw2GXz&6gXx9A%`cIU4&ZVk;DI$F9B)ZjjMs znl%qMaKkqgBE8Sq@U4AsB4wAwrs0qCV2E_x(iq=+EfWvcq>pWQi^V5jA$taU5`a%~ zyFg``ejRwVLEb4cPP;xm)r0dj|=#VU4;WBo?p0e)O7@~giI*#o+ zj=zAK*7o8{m$(+iB>9bt9Ql?^(HpOMq`!UtIFAu_^&?~Gs~Mi1lG-aV&O@mYG2CXX z{5IuM8r1m6`LI8&}P3}G|2eu({cQKp;kO-6%oEJwk2f{ zT4zCtYKJeym&5wr@loiD#RX)aO@9Ugr^Hb-58Dg^|Dd}foiIq0g>k`EnA{~YUI??N z3*FMq4K4X~68*(>`LNG0BRXr<{%P^uBiGNT2CPf$gHwt#(kEnGtT7iM&y^+3N@*|V zLkfCXdxbm9sbRa6AlnFg8~@zO{0}zi%hdD%@PK|4jX0pQ>R1>kli%Mm|3=ysj10=; zFpY$o!6TiCzg@(W=z{!Iq&bYvV_4%Mh$llGYp3~yh7sUGaV$9o$_wZhqqHsY*Izhl zZ^!N<{qlF$&w!zrgb-c47l0BS^RmKfd^aG`*?C!Zb!m@JLp^umoys!;!Bsj=mae`W za1LIY2eouiNRetVB9ugk!Cf2y=tyBJj^3Ygdu#A+=QWqn%XVGoxn|%saqxhBjh5wyq_oszAje#O;W+ge* zL`v0~ickME$P%LSlVZ%&CLBAKG!z(npBf*>9Bd_CBZ*W&x^m#a|9y(xAbgfB93`*TM*u~NGkr+S(2)Q{wCpchlJqyGEI;gigY;@cIOhv zkXMyN{KDb=ECF0x!l^-=bH;)X6vE&i07QbFlWVbepxFcNUDPa`8uyxT{JoO&V~3z2;3{2CN*&YN8-(?1|_NIBNqe2V!hs;axYys19_{Oe8lS?E|{g z@{t`6U5uq=V<7=Ls^vZAVti3^^Y^(p)>;RZZV_e!ReWJubH!OslV~=OUT0n6I8BIQVx|6>7iMy{gyGI33yB` z!eDf{v)R|4rFZ#G(Tjx5@=ubhY|FM0zZAo3|COPp4)G!fpGh zmMFrfHbDd8fNkK%Uy;n13nX9DZf{j1gB4V+QjdN24K^Fq5a8o;>;F`Kz8OgR0Ih^s z{Md2La9Wjn*^Nr?Y2gG9D%u!WU$^@5N-chT=oea4$HyEDZ>U~-afB)-VOuKcOP3ZX@^QV zTNE<(5?r79ev4kAggNPHHZf+_FikV!j?%5uodJ}NcTnm?yAP7qB+0DlvS2kexdfDY z$SPuR-L^#~D87d>OT>VFZ5v-u31My*MZakPJ8;#zZ zOe)&jGmOd&R<>)W&AmK-a2eEzh8$2LxgE3~CvOt!a$pqiv=PQi^GY>+U2mrKqN4bc zzDE%JyS#YF@hHq+7`Yz0?PbNx)DzV7-2&k`ymE? zXu^r{p~Tk33RvWH^atYoohxZ!Oh|ODFHw7^f_77Y>Wh2a#2~vn3MzkGksIwE*YnF) zCMytA174=CsMYj;(_VDbZAu;1x*VPtqJH+qDT_MGRn}oS+G?KEEBkZ|_c7jy#Ugv> zeHN?s!yhP_vfBOg^ordcA2L(rAlyfSgsHudKK##5ys620wgeYI^Xp*3awHk(2oCo# zXYKNyJ@QK|I%_`^K}8~PnX~%$eD#u4>^0V5sP}+4b4a~&f`JvfeTFs(MTq%TVvAiP z`jAY_^)J6&JUgv+7GEkXY*JD`sSD`-=w7c5ABfQI+UNFoJ28F|lf4)MqEz-aZHG*E z4my=^CWK)&1g^R-0N`A4Z}Tj;gMLZ6&J+a0_e%!HI=I(R&D5SE91Z5&L=M%nQ0r1c zoLBV&K2PvCHrV_%Bd0d+Bha=jf~ObgDq?OXac7NpUHL0j}FxOYsMgq{rn|n?hd`i2pI-wc0^9RA#<-q z2REy&(#z1kaR?vg2FWI%TB*%ehUeGt7SQ939n}E?Ja_andevJacw{>?}XK zUf`Hieo(-C#R4~>^#&`5$%-MhqW(uocu(D^zW_kvEY?FAJ=*~vnQtO4RP=?ovT*&z z$NUjfB4LpgDFOFp-8O04eag$xPy_=?Hu&o1syPdt7YpYHQLP@FLev%gW@-Q?rvEpH zMfM>|@!_lId$OL)z58suxNfDb2;(!AYaT_P7on-uH$DCCWj0+2MCEOr+_LLtB)0)_ zG}ZoqY}s-pJ@apExe|MZs2CP+`sC%Qe&EqR9G4cseQG3MaszI7KY7ES1;;na@%wX{ zSZ!F>r0#h3p4q?F#`67zek%bOa5mBUb!cT{aCl%Wimaf#E#DzC>P0p{K2C#hf%w%t zHGyYcqZcvYwT*!epYrLv<1q!BvGp+pup_QXB$`z}2HQp9sa8p64DlAB^$txh^5|f_ zTQ&u>36~2BJGR)5*$Jn3#z}Ow&2o8?YmcY5W zK(SAKNf;TJfG{L31hUhnG`7N+;pvxx&y43K-EeoJFM(k`L)G@t5ugKpx z3`%n3KJcrLt`G~fd)ra%>(+U98z3CvN~sLQhB+qI$m1JYKDMW z2h)cjQan2rzgOGTM75ELYLU`r(uj=U+25}f7Em-J0Rr}7)xy$Mq=NIUqzr|yOpH%Y zTU11AeRm3WjPYsVr~Lj=<$Z2Uj#LFjVT%B}R7!k$X)?N01}+=x2hx!Cngf;`gXhd2 z?Da2)v_Jx1b5xAdT^PWhsQ+(pJMw)|rFhF8{4PR1E<5lpjBlqa?iolZrXSM6f_k!hlP!UCpHZB~h0;*4#os#uGF^ zmY_-a>|R^%$M0BazXoKkS+<^>@OJ};U%>^{j`OV?xde+$1d;JebmoUs38%YVLQa*n zawL&icy+<^M-N1a{t+GsgN!atbya4)%d-`z!0)l6-7$vF1>upm)9)&RcPge7=X2$L z{wt&WpE!vT&oaj0JpiotgP7|r9d&8-Ig8vjCXL>N?(~@Lf0HuO7Qb|LD z6kned!A)m^WWT2cfk}r2&7&mPfjSw{U?Qv1A0b(%RZntf3)Wefpl`-f5s5~maxAE5 zXO1?3T0Q!gr9f>oX?^{50Qi+Qk8-xR$=jk@PQhz@B$b^$I)&qJueb^RVHH9 z`4}Ww{uFzOYRtR{*$e^R=_(G?syFZ_Zqss)iU7^jm=I2sGKx3& zOZA&pi$7YWv<@Su0J09(0W{B0#tgxKM&SoX%u;Gz zRK;;SFVe~cW#PmR*WAX`j4*prb+~z4Rd-k>{!Kl_SlFAMa}@dtN32QbB{OX>;wDzq zu0QN67M&d@E=c#iR(75FPS$vt#ytq{B*(5i&QK^o!qhdN#u$De#6mI#SKeDACqU_R;SXqm8vX{2#fH|Max3^5m%)G7gv zbnY-Rc)CVXK&uz8kIGwhO{M9}Mn=gu<^eWf_u8drmKUaCo#|`K3V~*<2W~U0K9iV# z%0Ci!xQX=~rS2el&23-Ld!^N}Ht3m4;;sN$f{1u-j+S=b zp4}f`-FbDEGCtdn^4jXY-Z0+zsK6(HiNNy;z!q$DVHq7&_~Q>ER}TzWex5QWp3NOf zSF=JWPZmvQp-0Oqp08!Jd{!p$e;{?p4b@sN`d(%X(x#O6k%F7uAvc@vA}SLT(;)3MPm? zF3QDvXEcq=e+`#xh$FsyZaE?e(5VAxSm6u}F*h=j9rB-c;b2Z*ANP9oAl|Ks>O<|{ zF>Sp#fF4j83J59b1`n8qqq{Hlek$wg-sRM^Zpxz>AJy6uTx9KoIhAYei$c*swrbOh z9am=miz^)Y#(~kl>yV5#4g%*7$K*V*b_Jn5GPQGynEZ$!ODqaWgYFb_;9J9SA`Roo zi#G-AU=3?J{Jd625ju?p#OJnczr3aqD0gKY7VwO>7?}a$|LFCxdF+c0x#Ct?`b7Lz^4&j!oE{|AJ7%g?#Y@J9hDO#uSb927D+F{~{cIWDs9t ze`4b=U+n~&_JxoVyX9R%v!%$?3-BKouh_kB6p-7{T`y+2Jj&?RN*LjPf z+X-4&QVxcnUeE{Me5YBb0K9K+`2hlzF@%ydU!C9BnE5p0#ZADDJ2&&1F>D7HAcR3g zm0_8GC7=|7JrPIi@H2t6xYJo5B&91y*$Ueby16d@rZSE09 z5ZE;B!2C;`y(0r2on}0P9SVB{&X+mg2K~^5fNDuNx>@dLTK-AGSCAoRtd`jtRs=ob z*osZ*2=qrxFsFn;r0+=`Hb)-I#YKFT^rll!H2_ptsR%Kc^thR~=6nk!*`yyfiEvh* zixTZ7Q?5ne?Zt2TrQryXsdcRUUqK#AeLz(AXc4pbbigj_x0lzoP2-Z3z5stZKnHxm zhC}f@x1B3U@qr6)1_}UjnyCepw<%M<_vkT!OOJR;b7m&(19uxReWFu--cIz&8>&jh zJ+STEdVi_;u3g5xr{?>!W!*w=WmG8eR-E?mnl>+YxJnzWEy;ZdM^B)Jppjq59D2Jn zQ;@%&zZ2BA(KLnJs-GCWi~LEx0&pb zzWg^_HDq}> zm}(kmfZ$owzfMrYuwvjTO^&uuIRzv-ew7-BxW9~1IC{E>m%PL=^gOPAF;w)cTQ99? zGO`!>SdrPowOtOX*DV6^oHQ4Hz@Nox)FR3zbFii~d{|eHWWyVv+jB62h9h7m&y~AJ z=!l}2!|ncgU`}z=)3K>>FZ8Q0jLG&b)gU{29X1}KnVy>##ah7bcBG&HhlTa8?0|_T zt2%DgKNYd>B`Xq;CG=W8$gjb=sply22G@F!hM37bZpOeFSXXAvv$Z=PR-Eb7QmQXy zC)RU5vc5A2P&u#ST0%at#?-b0%Kj(ZTIxV!mOAk(k#^VlR(F2^<&Hyn4Uwl)J17(V z%OHx4oieB$4}DBknn}sU%%aIbisY7*=gkRnliMFIm|nU)udtg49~RyJ0pT@7T4p!l`x(>}~ii@VI3qB0DZ%=4J|_$_BA1b2TN~AT%a#S**$*dm9LN zShESY(k|-{9Z5c=Hwc@x^S0C!VO9z?NpSJf@9Vb!58f7CNQireBN_Ze!_of?gx&2O z_yL#z0BJ`5LryStu(fge|D?oi9!tl=w)ov|YCDHw5>BS?w9e`zrH-Yo8Jl5z7v5Ix z%<3d&5d{)sdzpAB35~+38Q)$X01*Cw6qZYA(o=Y5Hb@q%?;Fv;a|@lM=CVj8-5i<; zL(%|-=|!&Sh9`W*qOT2)B5;vPXZ2+T>k$??GDGZSclD%vn#pi=k7MhiYDkY8RhCqb5gf6 zrV8P1+9406nn+1i_BujBz;yQ%RVl=9$&~{2Fk!j^8P!N+gY9janJLPe!dptyasI6^+rp{@I$l=0}A4!nt;g{ zxQJz*X_Gg3(eb`sF;*gpl^?)6BAR0^sY8mWNEzM1x?d|`fu9g4Wh5ZS%7Ut|J2up- z2#QX+V<2UaH^)U>S1KQXed^q#B^tKpk|e=Jbf@~A<`H-_04_)c4As1H6L@IZX7#-k zfeOE|;SRH_(!`nxa}?nuli?PmOlPF{Jyik;lO#ym=se_j0Bw?R&NDPhj2LALVFqgz zA|_SjrQx%3ab(6dRP3dE16n-(UzsVU8=}8TXd)e0+|)`k7@F6PV9PO@-rmd+09Go?@OWcJX({6kN!1*Sm@wx3 z1x@)$#70QWC5qTwP(v8q`_e@1X{eKOG;3CzMF?8WQ~aO$_$2D55i1$I<_wW3`m@me zI3Y;J!NVfPB0^b2BjT;09(dkr#M#oHG*-bfPL!Gw&;k`b@iV{)8QEZT9I%Y8VmfRA zx@;R!(S^#orD;k3`ab^Pjbal_b*^S)MEDNI=7t2|lE063F@T`IHSFg$Vr{s;F#2)S zAcsU?93n?*Cx>bi^iuHUim^sQ%}&0S=gAwg%=vsAl51gD3y)-vdnBJXU2b(qm1APz zvq7=E-dOO5_^%+Eix@_;ZNUozI*2AVU*q3o4j6WgMU?qCe}5@|)fdzu>cbR)4yp-F zfnbY$g7?rM-XfS9Em9+pMTimz|yV1VVq@_Q%1lJ1hy?KA2r*!02KC_0GTKTmQ?^^p&0v z3`TOH5*S_Fe`ZoBqvJ;+5R?EY5&p2(0Z&3bO2veEY?V(90nZ;D(#Dd>uIb2u7|0vhr#_)i5GPW4py9Z& ze`9aVD=eUed6>F1trcbgtzl5m5P>%zp3l$cYE4Y<5#bT@QN829Kb@hw)IK_PP*iy@Y&KTNt=tP zyMqH02F%d*@%uJ4RMP>GZ-P73BgRlk)QTmI2gk6PiepXL2=IJQ#CV-Dx#~Ii8@f1t zDJq%@$_L;wnKI_r4Qe3|z0z1${Bf;KkXz(%pY5)$H>rjAuOo__=}d(mI>UvfdNEQz z(*#-}WD0@}K!Z)z&mQZs@|2>6;x=?QNf3DO1a-T*aJpJX z;KSaUt5pQcR5&zpiQz&D${DL;IRO8`zq*7?k_zH_+0{HI1pSW0@`FGOVl4itVKQPU z_MlS54gh1yGyb0;rs+6&y1Y$~vX^==_|zHZ^*EGRx}iICn3NWgca9>(1l?BHkSW9= zg=1ONCl%Zja}h~-z}PA*C@4WCenv4Q)$bu57-iC4TzyUADnGDb<2~#}S)NiCnf(ju zT@>4m707~W$R5AP^!@6+v-Ef}Ab$n5eiP=CCA?*-09Sw(8qURzP8;e+R*J^LQ5-x&2s0DczX#`tN>ZV| z6oK@G&&2P9DVG__Wg0D5olG?{fe62gFoe_)ED;PZbDYHf;Rz(ig;4%n=?CE=V&SB4 zptm5Z235^I_Qe zqszZ&*Jl*-pwb%XqF9%>gD$C{;WTHQ%q84b~os}dUCP!>toh$949EPc=R2y{B zBv!fdJCIW&gH6)CGRQHh?2Hx0 zEL5%Qh^t9kCRq~6Zpz=3ZL%gswKAE>bC(Cd6LjkpE;ZD28|9?et~`{H^(ZQB zwOxZv3Y=t{;xIA<;sQc|fMVTv%DpFo+JG1LWb3u=%PGt_X=x^KMR|D@I08fyvs1#X zI}-Hgd~|Rv`0Fg3qt?%~x9wR!^8Qqb?e_7i4en%Q=eGd-U}L6Ewm7!4oI}SoN$PKm z9Mnep8zisLvJhP87;eTfCC6nifAHj9EEdf+H<~2Gl2pg#pn$eb#z}@tL7V{ev*heOk+Q z53sBEu&2JAII;YIP-ifiW?6&C#!X;$&@bECK84!be+|hh4LP8Z=qT*0`i=m!? z%BJzK!pckii=Z#TD60}vWiB+PtZ6=epGKT?-!cqzxdxAV7@7=wSQr9I0(w|pn)YZ0 zb4aT#Qu_eqPj;0N0L{TCb)V|-f%)KOWec(i*PcUSd>LsE>J?zHf!<#Kd8F{6L4*E& zV0xqfyk?tDG~({==~D4_h~o&}iLNSue}NV`&|+=oWHqzCEWxwly=EDeMMcp!od-Ih z*JQ`qv3l-e#VseZ3wjzKLl)=2$Z`+{7A^`cF^!q1S;lInjW{fjp+GIckv3FSiHn^! ziw2l60|9a>YmiA4+0f$YPoPQr_Je(}L91TzkM~KlnwYRC6nLL9u zF6XHxz*AwF(JV$_Y+O`i6=Tss4G6uun3*|2B4RGj8-PRlERtxgGJ^Obt5yM5cmP}2 zZ{6bstd&uxYp1^uryQQbT5mhk90O&bv{OeUNo+uS0Jg!xuCORk)wn4Qp`&Ccp(LQ@ z1Pox7HP!10HbMAB?RQeR-iRO zSlfkQ;o3ELB#o>v8}9@HekTV3KPWlsPSOsOu0yG-ESHN2c08iJZjG39(RG$akB|FC zzlN6dHXM_YMi6*0(lF7w(v-TR~t<|xGG!j4lR|(3?pZ? z=&U8ULp5{#h zU7u=&w`x+TA~R!^s$7PJkI`Y#p8PQ0)2q4BpZ9waodm&<+!;&<-Yb;g+gFGJe2o%L+KM>7(pZboZAvUT51- ztw0E-O^nkOKIs@QNSJgh_6nOI5e|g`$YL13>KEyECoXx}_c|uhkv1(8FeiSQHG(`J zg%U;sbM<$m$3u1jZ6NZ6waTq@!#3ET)le%%UO}0>e5e1I0Lrc=Q2eLD%4F$>sC8@$ z2F}^$5ntiJvIWjOjF?m1=8b3QF5rkVcI5e6#E-oMsjD8-GKpfO3ghBZyrXi$iGb|| z4kA4-?}l>$nmEFnxSMzW8qA5%HGfY5Zi71cZe!L48sw)zoCt07vm6x7FNUoW;($AB zCcE0AhSiRiiek>eXNqMA4hrKn7QuOzou3Gt8WU|G+T+p8B(zMTBvpbV`Jsq3k?Nue z$UvNC^It7&@a`n$vjvvA#(UG1i-8gIBnm|lXy2T_sxZe^B zBq|yUWe?p5DjliPC!T3bC#NI@hpiOG^|mPi?sYGre}AMVLcmT(r^d8!%9ckLB{0z{ zPwhz@iafmS1h1JzBpphl<5CY%h@}!k8XedT|Gc*+%L(zxevBi16}e}iNp^ zQk4i_N^+uJEvkt5uDSfd(1G*r5mkGs5>LjdC`+ln<%tz^ov7w(fe6i`+@N| zgZgELxJ%gS33*zK-#9Q%A}p!27G=BK6R>JgmD2nlwe%&*_>905x8YWLU%)R@Eo;^E zX^xwaw{RmWWFO}P1xIfX$jYZFT8@fF{1-88?wArV+zCR*1}eYqHXk>9dcf^JAK@?W>J`)96T=<)s>L^;60Z|+Gdmg%??x=1u6ZoILtMGS zyfEZu*T@Z;5TE+<_jt|eW)`kza*R6}REtK+>BH2YbBNIp{jflYAG60%_&O`k9Ubj$ z?f~#D>Bi9W!>_*W-p-#NL|T*Z=&VDU=sH2&b7U7ps%1H3U@z^L_Lk=}DSBp_)}Zps z6`f)~No1BNWyK2tsS2)xBpNG~SDJAbUAK(@xfcPeoZ_ITWH=nBNNLSY-v4$V<$dqM zIj1;2nefz#zA?*UOwGOVLqNV%KT-D*NhD`cEta^8!R{E#TNX;@hqh}aLu{ zR>d&|S+m?4rIr^$6`uWK!~8OI&SlH(VTVzT@GrP9ZD=vgnCaxI2=)m)@LU%6uM9qZ zRnSP7KUg82enAa$I46AI%FR-i(aCzH>WCF#0>ZM$^;pm4X|I1mp;#X za=vEZZ@gWdIC3C6^#i`O=lWkvfDT1!;(&gaS2s>znm^m9XbqPfdw!_O;ZK2Wi?+oC zt!pwq?B`yq1_C?`f2r=IicUU|$9rn^nu}s$t_u^Xh8v`602(qQ3!n z1N&|v@~qfYmLKoromOwi`G8vg39lx)UDE!1mRChxDpyvpg||({D6P&gHgD7R$ZWB4 z{HHsavTt7oNo131$9|<2Q)w0F6bsuF1PetOBZz0+|1kvZdVwE|wK>nz?QRzCwu zbMwDg_5vLxUVZdW`v72HdPIkcB`w0AXaesV4d@-&k=kaGP)}}#%x#I(bFMQ>l_|hP_*gb9T0|0L}%Bfl1Wk9%>v}OGT{`y$M}O+a(pg=MMVmc??&!Ub`F7+te!D zzUA9GS~l*`n=E1_wb4t1%(W5IgrJ%*6HN+2`#W80rC)krtqZ|^dxwx#Go^Nd(vQQp zw$wiuoc3u|-_~Y%B}L}!$@ql@!ng77BVl%bRxup%CmNjk3k6vZo=$S$?E{;~ah|H& zCmyC-AJK-8{Vy8px2Yg>eEGAu->b2zhxG5! z4baubOd{f!-9oIL9qb-r`Y&1`+wsh>D)nO>$dULG2utBg(r0Y$j`e~@wBA3>OGKvw zpU;D|NFYo`7#dyENq4fNhmOd`UfF_3U4u za$kN_b@G;i77TVUGaWoVdW;9=G%z85u+W~Ce;=wE&maJ`U|I=cq`^Kt2JHlI#S)M6 z4gfb+7JOF|36XOf+yceCoT#@qJ6qcjbhvqgtFOs)Z;3d*0fZK$>!FQ@FxBt|v0Ib; zrF`Lx{3TqI?2<1qulkr9%;)W!BAz8L`YapF^Pb?Avsp9MIlPCRuS;;5tx*#sOa4nq zP}>Gf?nr7B@2$C3J!e~9*cnJB+ce|&I$(!)ud-PD2dPQdg@!3ms9fS!Y?#ttUX5A; z&sqoL?0E4jtlNWG(;pz#Y=yV=@uaH7eBQZv6xR<%z&>=-&)7C0`NUSsmV>KYL_Oj? zV4~%&IWxLXo(hVQc?l@2tO*94L+vqu3H$}bmUE)Q5tn|J3)cx>`QA1$Iy7&@%NGYB z4{BC`5V&o~#C4cO>h`FPu)CZs_27Kk$4z8z*DH_3(*mvEeA>-8QOj6Y6H^}GDM;4^ zZ)+Jo8pjR?p%qpkx(=b}P}k#bCHwO9c3K(j@^(4rLhAVUSj~xK1sa(Kxv6{~7EQjFp6Tzt|C{yGfW@@k{_lH7 zjQPKge-?&zCN}@U!47NL+8?sD{N(5ha^M^N!8PRqrK{64sXv!7$KGPYlv$uf6E$2! zvTckf)mMrb{Zs$zo$D+iS@eY@@9BRD&s;Bxm-G6VC7d1k=h(yg$w)KYkPFFvx67Lu zZFkkFu*qHNyn*7;DL+|uX68?d&-ZD4+wbW1XQ%f4&HmzOsC=vZANbd%2ew(z+NJ@D z%BjeSJ7&DEJY`jl6sJO$XWS-^$05N&SCISHOs7T7+u)WvR^f7*nwo zu?W${_BBTz8lcF6*GJy^uTAQkEXz8M%U76-3Tqf6; z4EJ6^Bxhys_bsT@i7A%Ps@9lK(#rgU#2=DV{Q+jn>ip=*TV~ z-#9A>pm846Lc*Y$cO%j-ni)q8DZFI%kL9KyB>G$Pa1^>Jf2-wl_qvYB@(Qw81`S;r zYAgvIIOu9nvB1?yfRU%_WJn1nZNaS`DB=aKY=el;AO;1lnXtFk&SKiF0R@rZ6%772 zrtU|(^l9j~JpmNPYKDmwy{beK?iuDhm(dx|OKPF&N8i*9fBqwU#%|~8Usx-B!v1Db z=$dB$xIKFbG}rS{@Z@C<%c=FRo*mh_Qt_-^`RbLyKSmRN#7J>as$^LKQxe_?Q7|91 z>9EU$xhYLi8zPN4&_n}PjF^6LGvnIO0*9?kDWq~Nn_MYMrgrrMjH9JAg|?avVG;PC z8bR=AW^Q`2u;W1!Z1Rs7O^`{Fqs zuuN$oV`KvZ)1m^{zy?LXM`&FVk_%(2`s>5a1EhoSKBdW_8s?(qq*uV5X5w5`TDsA; z!odEuq>x>84R`#_$$?ul8SH9G78ami7gP?v*VjpPtk`~mA`H89Hk~DHJ*9G@qp~xYBao%E5x7Tr|2*1_5aML}){4Z5HkUWo^L;FlBuyW z=!uJz+9K#f#}px@$|@c~B<w2c3s;NP<|u}Vp4*IG4ptQxR; z=o$g-(@>ww8%Wi0Xd+g(k~_+`S506z?YGBQ>Cvni73oe?1%(UYdS)upRY?nIlEmcM z@)YMBb<56dX6ZvnnawiPWDokzti}7!t=`2m&qUmQ3*!q4# z)Y1OkPk6AJAy>4HX+4T#ld9%inwTuPHA3?VH6vRSt$ZXls$SMef;7&jTnPWPc~C%@<@dhwI0oMTkcCYQ0NPitc@RrCgPTeNqm{{)1MGjjucZ-$7_OBdtA=&w%_^? z!+3Ftix3{bN}M>quNGsVAI;^GJQgVj0PG|d0DUucQqM)`t~+5#yeM!^bZm7VH3<)s z+&Tg1N+VBL9~(8tt+h(KdsItJ&D+UOZ;tRTlwzN<@g|WX8WTRdgxhIsr|n!y+~uHe z%u@8;WzmVXV$h34OT}8U8v~{~&7~unT57|q$0pO8x}YV3cpik#?gQDqe>b-1&Vbu< z7PzPx4+M#N_;RblzF+JbTV65UB+5QYzTJ0t{Bm27wNr=7$P+sap0j*hX(^eJFaYjwqe2>ZzC99goC8%vs5%J+}_AZ1^UG$yJ>Ckeej>gIs)BnXG4&^a&=_Sx5Q zX21WGdBMXPsMUMHNpvn;;KngGsg2Scy9c zduKx@{%L(Rf{*|XNiuyzvlC$`AQ6H4g~Hgp%#S{G;E<}HqnFTE6p4W+aylUFGl9#B ztgeXJBZmV_4Zv zj9PN&l6M7MuDFe&oOW;kA1_nySX*2tY#aYjv4T-TklrJMM}eI|i;z-OzJtISP49ih zbjSmlCfe9&GlTo2ev{wDCY5Beip*pGxqtMEW!fg!WxDe&T{SBmCRs5&$gGx2jFM?@ zahWh}TS8j05Dk`)^>CkAV!Uzybu@9ukT+aF_~S!cRt>A=BLGuXxqn>j0Uw{jO`qdk@^cc@Cp z&wg-uG;NV9m~oTm?Lv1G-wd%!z{AXNv==RNPk+kw_5g+v9tq}0pvsqepXfKz+B}Jq zRC!Cmi#99K$?|I@MH}BbxuB9-&Sr_LR^fa<-&e$}K$rV>yHT$b=hB;Aiu{_7ho@?; z86+0jt!#Wtcx9)KbnS}FVVq03-;A5y{+f3koV>gyEbjdS}{tnFiT$zTs*_BO`Kg}Bcn$~|p)wmq?EXV{+F85K8FP4Yr`0{{$-u4OM)=U( z1x>0~Nd5hV#@zC1+{XT43!JY*HN*%sUTLlEx{tbn+i2&p_c9#*XRsM=4@1umq5+p_RO;+2$jQ)-P#eB+p!dh#OD%3!u?{Pc<*SV=6g&{YeDQ zg(fZ84p=bO&cDudJI5}RfLCZ}{VfG&RpkIGIg+MoGpKPgnT`E}h!?tmY2`_`P{vvP zuEUI*(dHx@P8|yQwrH^OvqJl!blf^Mr+tA;Bl!e}ViEI(sB@te;D~<1_vD2ag_rL! zpk^+mB^)4pm)yIB*yq55@3>ou10hZK&)!e#p-P)#o6VWF{6dBHJ-Lse&B`S+jS$D( zZYB=0Y&dS!YRFJ5&+cx-Dsn!D78MK#zFd+uFU)VNy^0QvT%;cT01z{;wZ8CTkn9!9 zbD6HE3FmL82y)onHNnpJPj^52SlQSpIywOH=jO}XzxsaP3Qini8~SqkddP^h^a?C^t2MZQ)EMTAIndWyo-W@@(HopaQ8`VXrG6yFxZ%pG_ruY}fXn9Lu5Oq?)VChw2t1Wx zL`|lT-JT=-O8j}VGMA`z_ab;(w?U%_S?Qhk$NK8tOtzGEK;5)49i7&Rt|S4Z^8&=Z zYamIF(n_Y^3hSI?@L`8K0;BEjx3>2d`6r&Oyh04ovj*Ixr*cBOnU`BXfN@Saaj9N$ zVy7|hEpM`P3eO%7B2!NsM-a8Z;~Jmt8+;LT1|7->zT+vtOJp+_D4yRWd?FMYC)oiC zIl%X*qzJ?hzV{>naY~ ze@Wm)O(P?8lNa)FX0i=Jf9Vo~)zjeYB_@OZW_m-m){1|tvwo`W1mz0e|M%q+k#cCk zb}Z6rXPaUvkC@4BVi||VLB*{j}N{Plbifzd7n3Mf2Ibpy88xT`X->9mu#Jq2fg*? z2iSj<9_YPj#d&Go7|?kkMFiZ*9&!ygB#1MRyOtJ#?sE5 z*jv80*_X!IhGfwRVG0z80;M{-6wtHJd`(FI%4N=YX9~8- z_~~FRsCssW&Tmt5bHB^yI#N-VSo-O6N2@A)r1NfNE>?~pJAL3lviyk{<4y?M(p z#&W0`{7xU*wH=bn6nQyA+r_cjx7SO(v7`P*efMucT=zY3hA7&v&a}yjE$u4ha9`&?CUngVHC{Ff~M&?TZ(YAkM z!3NIpCpLOZ*s(F%TrJX0Lg3jXnE5QsaR0ckV7ysI%3u@fI<^pw3g-H0$8}I_pS_mtXU*b$pxxot-;?M?q5J7$*<=v;rOn5C6FhYW=gK}fB=kv|(hA8BPWR3Sa?8xEnU*AE*9a1UD{-vG zX%%GJ<$3^EDYC{-?(YpA>Gx$k7cGXC`sa3U5|FeF7z;!$*YGNPF3Gz#0xb-jpUjny zNMeX?{tpz5uYdyaygLh6abWj8?p+HmWHfjF5m;3mt;cg=LUAB18G@Je3xs)+GS(Qj zU`V@T0jW3hC(s0NTp@DD^GpeXH+-P`P>jRN*`Os|DXl&HTD@kpm3yuktMR0Go94-U zs@mrQ-A2^}L^dhE!KIlL2*V;UnnQm-)H{*PkmUi<=ahPG2(L!jgnTsz6NH-Q59sQK>^b)8v$n zo`EA-DG2$?TOoD(@RyH9hF=qY8kycD6ieW{8e>a$>4%z6Zn`R-e!jLLPOZNZk#}Hx zTPKu1an%`-Gdx18mxT0APXP^;gBqo9ErtD?<)FW#?)n^vpKc8mrUDIfl_f9A0 z_3TzzA-XZ#ih;7DFB4u_aJLibBFEnyWtDp{a7*hpRQko2pt*s!tCmJ>brQGRx4^cQ z{w;8By;y}Z%}hE!veJjv0$);*#{3?CV`T%4mVW3&B8@#WTA(>0NW1h4hc+i2d+=FJ zvpc4a%O^hgsI{EHD<^itY6dzTbL^UkCFK{3;ZF$vflmr^d%AS;FFa_wve|F<5}91E zuf6>GvYCyzxq1_q*^Pvq*o(R^W9e6gf62!M7;uXo7?Fq6*(__hO*e#@CE0T6MKLTU zE9n~fX~XvVHeB$Yg^J}$z#8pC8`Se)tRBJA2=q!92MgT_Tx&rjQIOK~?qf_fGC{sf zw@`(aqRkGb_g!ad?6B~LWWM`-U z|1tov}?z|gR z80M6n-R+9(_&q-Bl$Je|bXYwbu;i2;Z{mp~47eof;DFAlWDB_GX?T1__&H zglhkk9Ef4g6s!UYYCCND))`T0;pQz5L4({tQC)Faph3|U85q}tfidL+OmD_$TgWa8 zk=lN z!rVx990E`7E10R4hVP{_+-NPHhW90wQ<+vlN*xF3Ix|g+;?yl*3%#SARLRzJJ7D@}WPB#A5XB8wU%!lRa5)BbFd(XaaHz3r2*IDC}5i z|Iy*Kwz{-~x= zF`@eE*!Se`o%x>lxZOl??FDdDj2!$5Zy195^ZW1~yR|?PT$V6OPW5r9XufI%Y0OEC zokwUK{#`kcMI4!mI+GwlXIsl+(zlM~FmRk*b zHFr)qJw3W?t=ouOM!O^vP8bO35=uXCKZjK5Z+GV&wnPBR@ml%}>zje}(2dcbdiOeL zq0giCF!ls`^@yl$NY3=^sgfgCsap(k_9<;qA{6NvBy;;-QID@w%J#R@ui>vOp587G z=fAE#%)MO#6ORle8BBx;q%uB6d^{mB`tAQaCM}abT>!a4o?;**&y<+Xl!2PN6Lsz_I7!1HmT;2^Lf?246X=yxot5&jHEfU| z>IjJnAFY{s@#1r5Yfo2|Qz9zPzgVgLT|adq`^WcK|lOIK?UL#H?87_zrX?c{Th z{r>rNFr38TfC4kfH;VKOPRhxpD3950R#hTEz!aR2_LpZ5Zw-}Jxx>VSl8+J7pOeJQ zX*w||k^=DAD-P*_RbAzroVGBln-@bt0#q-4r{ZOpvAQ_@-qRn&`heq-MP*BsgL^?# z-dXtM2Ajy)DEblE}+eWDR@l+CDBfXRG0kCwb8(i=5G@GM7}yCA{nSNO>nM9 z#T@4+`or)}&MZ)ZJBvw8tI35kag41b!mw)NuZ2ajFvlQBb54hj3LPDMpBVME$c&Ud!mr z)zSo%|49t1=17uu9yv8M;0LFk4OkEWh(Uu<4=ur25=}{%yB&xg^j_dCRN6P|@4!*g zl8%M-E-@smcb3KmK87rdzplcv+4}u5nVCh-cn!W4n!L!KaUX9h64(su)`hYtF6jvE z4^selzzE(H#m)D*I7T2}J!}o+m~w-=5<75?)nAx{3hjsP;g!`S^Z@GdS|Z6B$Mx}g zjjzbRRI%o3!o&y&;admVY5ZsdbC|7y31V$Fgj@a`PJwUhlKXoRE~1HcoEy~`DIT{H z$Y)&$ig~pa?=&uvvOJF@-e;*b0(dWN)g=6PQVxa zA&^7aLC_c55d;(z<}`^Oq3?OR9LSI8qy zSPl1OlH3*W$>!z1b4%z#-AHjz-APD|13@N2I%KBW_{8v-+WS9y!Ys4O__QHSu7rTW zDwr*az@0%HCMd{L5NCv*rHr3h#dD#dlL!EeJSZNy9N4&cEy08T0M)udKS4O?377bM zBH)QD3JDKE(bU@2coi=scaeQZOE)Pp8j=d*6~u@wVkoMUZDI=Wg%{G%4g4G|U`xR( z!ia~BFL{!>+Pa!7f0VTu05oXitG!$$C@?Q$tFs_Kw_y!CX05{J2|%N|j~%BFRL!IL zhE|9|Eo3FcluD$z4h??T8bq-GB-PmZrZ^rYw$G3ZzICAk~8L(BU0dvs=0^$3$S7PM%|{Gv93 zQDwkPgcaUSTTOZcjr(jZyK z(51Fo(30ULOrhPP3cJ@3u~s_sBX84p=ebd_U(hPdXu=L~qPnuN2f{_I^fsGZjQL91 z4<%}yuY0*F;>K%P!4kz5-05Fo#4|W_bmMalLedpCFTLe>>ircKgX6G|HWx~ld-LK+ zk1PEK?xLjuixmmAs87j0BleQuXb&B2V;S%gmk5pe815eYAdywrdFRTxc!lfS5Z9Q3X2nd49E&kUwJ2#>@ zen!0^uFaH2PJ(Eq%6*wgC^xG&9ob=_^eTHmYKTVSzAV}-)K=u4)j6ZD4i|AM-g#iC zIH4U=Ga=jeWGtVTx;;1BNP}$?_b$I|Rg1fuvXRY#9U-Z~>dBK<(cRAJYRct(n@yFU zZV||OXO5*Hhlh1>nX8sh8TVge;@@pZ%hebxU#(J340ej^$8rG&Pg`%XtMFrak&^Iu zj8m-)HTXlA={ii^J|~;S(P;PWD>+UX`8m#^G55FIjA~HZ>lqRWL9Wk8-p|b4pkTIQ zKekQ!i7V33=;6>)$D@pW4V}5tVe2nO(8hj1e}qXWBrq_Roq)JUf?^ex;pd_?SCI#E z5dQ~kt})_|DuN|7*y!eD4`AI2p8!VRQxCEiAKYF|T!Z^UJOwLLel@28W&T&4yj~rE ze)GTy&L@K;EYAk{BTV!&68T?SNPktd4P_^A4O~H)^El8nB6>N!L{{y~*ON9E##$$s z_-}xO`ZRM#*02kKO>g6dj7}44u*a&_Y$YSH1GHLg$;FjhA zf!}b{WOc)?cbTGzYpTQ&jj=QSSIV>-MxQ^HtZhIa9UtJE-Ie$@`ud+L*ZqAT{e~a$ z6*)3bg-bkZJ-Wln-8mm@sIxCg))KfXbi;)&3&Y`*7e(13>nmS9BgxlFX2@LtA)Bsh z_X_PKe&SCl7ik9wcGfThG@et{BR0w6g08g=(v0Snf^Ic5RP(QNj1w=YRlLmZ`hqF% zo3bjuHL=&8id(uU!a?-zEHuzm+HCKjmkM-`6}!A`p0jrNJzTUsKa7jGpE_%i`&`kE zrcLwt^kRRAMRw0ug<%DA+%#77K-X^da%{pqfy<;dwHNIE)vV)g_*pqtf6hL(g^4Gl z=5$08)nz<^>Q=JYKS@uaIz`zvbS`t5qoFG@TJKK>PR68!@Sz@se_#^bQV9!OrUwWg zoCm~SmaJbiN7M8mVld=GhAqC$0aW!rw;VFP*(lu_JGPg<-UXJI+o-I0y_QN#*A?X) zr_%8=RuZ43x!z zn~-{+YR5BPTk&poto?((Ly=Qx>e>nm&+BSN@H+%mbCX7U5ytSACZS6rT|(@WNhfCz zL4L}Zs1qBPFe?ZTzq{gP*lB%(vOXA@E@mV*667OLB*7XL_XEHXRUq%#}0HD(kH z)4tL^TKs}WTtED(y{_I|$+vWrkWkpAtHG^*{*R&lKX4u%#=fgZNC1GDe*!PW{|V=D zb+NQ@{HiSAhk zDiaUqI0?p~reW}36i&RuVSQ7wBvj5y zRXP}F=>8pDUju%eUi!bbqK`TCL3+3&wV{Xn`&VMCHskCvmZM=Sv_+=DG@!_rGAc2W z!%W>(Qsb&*Yf>G-<_LlkHTuLsIh=4-a34^+5q!9ZGm#w*H4XOiaxTH89c<|m&FQ&G z{u4O@H-}<&zI%;S!KA^ymBs!Mjj)HPMdNQUOetQ&O@>}}AUmA03pG(LO4H<17X#I8 zfN+z=u3|G*>&wgQ!_)8b zt5*+Dd>wBNa(Dx+u~%0I>MyB}k?_c51{8IOmlJVc2ucZv-IHt02*7|)xF8M5Vm zXT>M1xzKE@u>%xkgH(no3tHvL!S0+2Qte4G+`lH`5Cmr)4=CJknkDga+x-c4snq{Xc=a%Go?cD>yIX1FMj*RL6y-^L6jh*=SoB*iAo~6 z7NkG}I&A=m<1K_26)52E7dLZdAE*ZrETqfy}9qQe;@%nh9&*VE#54rSD253-#44zxCclO%4AlJv;-BDe zvM?<(2;$)TiC55kxc!Y1rxej~N{xFtyoU>2vSzy3oLtSjwXFmAV2ZqE*Wg#~4ILg+ z8Rhp_I@P2p1KN*W8}4kqZy%$bHB)6`(Yu$$tnbr-z1}wxe9?6FsO1fOjJ6S3o_!I^(8w>3R$#BMVJ19adn!T?vBbCGmLZN~?y z2{HvD8$DwUvs8q}1`a}3Nlj0qILtFVa)M5`;pw3OTpN@CPDz)X+czFC)sN?6S9-&F zuAOm2!vgk#hnGGcmf^<1-;5kpN-o%slZlU6=6V;vpD@drE}=PsK*G~6j@Z+H8jFBXbep~1|yip zNb!x>yKTf2Fg}&Kr{0B!IpFgyPFLhL5YSA24h~oTg`k?)+~XH^cK&EX#K{1>lucUK zA&CNNO85bha7Q?3%tv&F+t?$E4QP)Q1klK2vV%_zyYZ!YWkL~xzI_17N&|_9ATVzl z4g;H45ZYbHYnc`qpW4$Kh+g61%%kTHkT1@SuJH;Q*LAz30 zwBaxevqBEiDoHDSLy$R6ho}*QO`vj zlhAWB@Kf3m=dg`=nl zGn;U2`x?}&N^BI9>P=ldAU20x3~3|aD&H4X9G2vN8%qfuv)<=0tVEQkwgRWrQo1NY zU^dk{iavk9UxVZyz>>1!8y*?=E)y3qf{RMU?zwb@FM|=&FXtmIM{krzHi+ggs2SKo zpeHM!(<+y@oac*VlrW`k*r}K)lrsmsQAE2&t4hpUAn)!mb!SN)M{@Zm2A5OL!DFPu zm3d(SC+4+V2BysGXsBY(U2hOw*lU0LE2!{y8EQfHHi$%qWGba`-$Jr%=JkDkDwxkL z66`1mfOj;(+W0MEA_;++<)4Z$x+)4hIsEZ)e&2tv)vyX29(-12w|bAXaKpFa(>0`G zWqp%o8_q!zKKhxx(!I_uT>E=z{eT50+%mA4YSv#>D>$a`lK)m>{1OJz!1! zONu2|wlu^}l^RzvojUFeW`5$mH-8ZDJY}q>Eh_!l7BpBQEdsH#C=m^7nH-hL z9nWWYM+A{5u?`uFgG<3}MotckH_}Ra@=D3+3C&n1M|qfFdt1!7nQ*`81Bt}@%_n7m z-i74J`AHIU0Rx%S!I(e0dXp|B5*_N-`D1m196)oJZ{BL*CJ#>s`XOG1s54&`e)VtM zu!ufj{tg>?n%xM2^4yIr_r%JvEQ3)8*>81AbH_h>HCCH5ZGi3_>eq^UOs6T{3}PwS zdHj_)UV)*PH@8nlFJbZS(bl@1Jf}l^C4X#my8ArYZ9UpaeX#SL@n+opS^eC~=Gjc; z{Jz>qrTqo{|7V~7bcn#C&599F000m)|2*LTvqSvjWcVL`Gj3Tw<}2|1%&1RG=q!CwXtD_XkZ;BnGPU9AtEnrU zo^(y7iA4}E1j&>WtQ5-xMsE~WJ2b7KiEU_&1W+mF>B-XN?3D(ciN1Sor`7@?I@b)M z)`u9m$K#JPiX%jls}OMM5|_Gh&6N8sY3F(24p+j2xQyg&>k8vc|ouF2kG@7rH)T~Wh_=+H^Z2Dn zx`!{()^GwbA8NGN=yr0|0*o>j3GU{Cqv(&X7-%o&0Xi^3f`rh+@i!fFc?c^;h>$gV z$DizBc4F)rE-X3(g{ev<+DpVcGgoL#L$fN631BD{XuwYDd8;^TjvmtoOfG;sh{cflXomJ@BfFhcL0(lTC+ymr){3@)3$Bfwr$(CPusR_+qP}n z{rb)yGw2;6!Q1V@Oi(Czw6gM+I#ow4bm8bf1`CS5Kn~ zuo_?Hsy|Yj@Y}hjpRuIKbM7Lt+_O*5Z=`pKOWAIWI09RI1o&Cy0~6nji)NCfkc@1n zVPdrK@`TXnkPxW7s7@jlWkQ9}t=3lEv`BVxNB#*(hFI6W$IY`n>6LtM7k1!rhdpwO z8V|UMgU)&Jfr{2e`AA4Yhf`^Ri`>KsQS1C@?=gz)y^6HXt-vY|bzxz;T>R6hGGfN< zMQZw3E{BMD;7%)!fy)orNviZb?|MRa=Kb1+<{@6|@w_OUWAO5Fwb5JI`9Wh z#&o{{VuNeEHNzAfNU-L0~Fbcz1x_Fz0=H1DR5p>g$kzF=?p+27k!<@PPwp;kl zxarbBTc(L_L(2J*qCLNfo6taVTY9;~7&MJ*#TzChE#g-J%=A64AYi=4euVWDC0*e> zA#R&s z@8>tb^Dn*Y+}wEEPU(IDFlgxCk&H<6$^o~44AA8Uzt`^GP-8D4)~}%&hHo_f8vH8U z<+0fY+L}~o(#+ofM0$x-Tv0G0q3b@Rqm0#;JEX>&Ru^wE9>=5>X!nN|&*-ng*zaqw z>_*8=7{FfNOAeA&VS~CQsp=uBmcz;nSYd-tXA7;t5WT;YMs%`dRbpkJ94eh^ zaE5XuvgWB^tWUF*_dCXD%jb+XV;Jhp#3Ys}zA2K5YygeuwX0s5+2vI>UE_Y0#sHfR z^&3Sw;CKBt!&XrJ35uDNtR>-W1J%3#*XE3-#?bOV1;PX`Kq6DGbQ*do+GE-DEgiaPXqh)PP0 zc36gVfSD0oJ&#L!E0qdU+hNYpiS_)(sgC8W9m+3@7(X=?zYa1zv5Ca>CjzCq@Kl#=Bj{Wx-qrsS$5fIC87$ zZ$(fkZ|E@SoqS%~UF!9bF{h>OAgRfY+%oi?jYKWN4m~;A?fmxBL5CI>?+-!>EYD>{ zf3NS4vI2zdomuAV5%>emEh+Gx^h50Wj~oA(Iz&)F@;93zDW9MwcI3f!&HlK&!Wu&t zhD|U)Rv%R!PXZ*SaEvSx0q{423q%UgN7b&pu6nYPk*1AXZ`W58c`X0|Bas+EOWTly zYI8D&4Lgw`#~TC~en32W!AviHlmyWfE0UWHcS?BLr$LW5l`u{;#6a(vI`kLrN!ktM8`GGsCoqxkw zj;cayZGti$d&Szqp2OqR0yBnmAr~DRJRhg3<`)_WFK@?&Io4cXZ~tL@OA@ zmW=-Fs%BJGmXUhrpui5_t(v?L(yBT`Y8S_`2^_h8RFmH>ZxU^p=~yO;8Okco&Ch1rNR7*UsGzd_D^67 z>I%H2-HRxc`E|q8!fQMU1@z6{nn)VS>e9l<**1Q3Hs|Mjb~aBnYrK15S5Z2}F{&kr z>#08N+1$FB0w->5@!SE2d6_*nTd=X25OfNS`J@2OQy65>`CDLzlue5Pif7w{kJExr zCg8J5S1TGtCOXl{VErOq;nh|Ac^>un>v_LAA;Yj`J*uckJx;5=lofqt0rt`vTpZEM zIfA&W;fbUJUSUkX34+66{*|{I&>Z{cNnxc%g&n=WV0)3N?W7!w*mLE(<0|(@iC-c{ zcKLMahqi#yuC=&5q1U{j`#i9;Eo{lj_8qpB>kUWnvJ0!{O0Hm7qTz|PwFtOLHHl%h z ze-E|5iIwZ5FjrG*{wqH%scOcEf%dk0G5w20a|-b+84d}HOn%>ex+<0-uFQFKHnrSV;E3_U_|Gewj8oAQ^L$1`)%)!xtMn}iY+RRZ$=Lf+81N^^;_aA32I^K(Z{t%r;zySb= z|NEK7PSysFW;WJ(mO3`hM)vk*hDQI`D0Nbnli6p5_r9)1?-k%v@rOi&6D?o70zyQD zDhF($HA6(wLFz0r%R3TAOStsroIV5< zq84kU3clO2Kr}>6xDd=G6_Hax62!x{U|E*h#6yHpDr-co%1-L4c)NrKq=&tU3#JhK zHiGB41}ATKK!Ikf+?&68>B8rVgDshMKSl0Ns$Tc0<%Zn!>?1v9SPeOM@9XLlZ%w0S z+N5?|O79Tu>rfr4HAsX0{{E8>Fr2MZd+#Rdl<&EUaF*8wk|mJDWx?=>XF-gqV60>r z$jcp!YKqcepCah&7dUgq6PO{!s!FcF!cCo>LCrIp?nj_(eAbg^8HqeJ?YkI0V^P6H z%X$Y0X8+M$<^;r6ESq!Ae|V74yHF}_rk7~L$7Y+?cW!m@%w-op8dy8>#&@e^U|${D zOcI|h!%_{`d}H)|wOReDL)#!G+lGXtl?}Y!{wbm7bND&0I(q z;;EITrK0 zPRs|-yi>hRyqeuBr%hp#%~r5gqVtWPA7$OCX$cO7^7&3>7$ziG${6CK&$L?&mIa=x zNoUUO=xuj1evQ(F?(d==+-nuAkAJBwb0x=6nzsf^e@{Ay8J5g%o%dL`)CN$$Gdtf; zCm=LQiKG_k3D&CQe*9Pm$f;r#JCb%Zhf_FQD_`AD!E)VwuJ7+(PD;mQUl1*3hs5Ux z?Y13bjOMSvPb<_}?DV<|mos#I)<-)mz&IsR?v_9Sh&*Q% zA7l&L_1bfVT{-Q6bE>~12CVI+=NXd@%UbhmAlHEcx}*jA@a>Nf*U&=U`+de|goG2` z4N!u*wE3DSDHaf}-2A46edg{`TG!gUShz+rIo_{eNz-z+X6kDD3`UJcz|^GNp9XNK*Y&F zRsmz>3Q4b`ia)GLTIe6vB>2M)2rYC4 zs59wIe{5kf)p@Kj2XYXBh9Z?>5~rB8w@;pBarA8eQ2>>E%=K&m>8T}@5MbG51(fW1 z)4t#6L1sp8TN~LWCVMf6`;hSW&1@OF2Vz_;g{*x6`BsZU$DAs;4jAKB+v4vQ1ljl=6Ng(iNxUNtn)5qrxc{pjsJFAuF9JQvLG z;FzF=bAVc-0=C{vwg)!@p>!1ENg++kMyZDvZc-pD%ikh7>>lE~`JJ?zh9tu%mMz6| zeiznNx%_5-_-!{_!=gXZL;B&>T|QIhkH)`lv*&mDCk}($)6-u}tUGstP>zz+;Y&f| z)5@4vyQ--#cHA`wPN2BFa+`A)&}5rMhp0j~@z<#0>~IL{eJ2nUA1Ho=({+gg!MKiO z9|3zy1Ns{ZG(f!$;xt;qu5r4!K*_@pMy-TUGrDxyn=f<=p+ zrw~Tpt8w&&dpZR?Qe-Ha?yT6#gj}Q)zob25`vnXj4cwFKZ`JaOjTr)hh#T$2s520^ z85fFBjW30(M%_QkK~va2+dUW*yJ32C8rvcI9*lq)?U4OAk5YzQc+roFD-{hIZPp*J zFUbBmxBEqd9%|<#O~^x0ffUJDh}Q_wW;#fzi(5pp=_8r5qZJ}tv4{N* z^{?HmbZ3A%>!+LH|8z6F|4T61*c+JsODsAmN!Vbr{0GHQ;=hOSVe)v5O22OH6@B7QiR<&K4Hm9w3oo<&ie}<&LS^ye`C`#dVq) zOk8K4)9TJ+<52GyEK5(+lced*F63a<6LJY^2beLPDHG!a8>EEr1R%jIeGv8~{6f0! z5v|f(ak~6Te_4h$v6&aKR<>Acvt;`atL7(v_gB!@Fe@m|;ZUBX7&`gq`PWlzx2{p? zf+X;^IDr6_H~FKsO7C zNaJr%GiQ?F#l^51>}k|i(~4b6Wvk7I$9&#b1Nmu3{v#x(8KBN)Y+r_9$5+ttCW{MM zw{lXUz7D0TsF{Sms8wokQp4NcHPytoxQ|SkVdUc;OY$8)R_?B9D5M^qnFo2P)_diX zOw4DZ@%vliX-$HlY^nNF&NPar>RQ8(fcN{wW<&f387bva!RByiag&9R`t&uUrODN_sy)iZr2Y6Avipfq z?T*{F8(vbQ4S)YCtJbL^4Q*$%>g zV(!hLm?p~Ef6e_+>2Z=#1>qrST-$kU9C#@}(Wx3cH=CjaQd?b$6W-}Bd{*mY1#eoL zaH{O&@c#0ckTlgXpNEvwfn0nsEJGTDASP(NyVN3T6co!(5TU5{JNwCrLN50yV#abL zYBG0vonk7EWGj^E;Wb1SGylYcU_+Gq1+n$EP>Qm--!;82cCJ$Q1wr05tz} zDbjyG?}y8;Z)RY}ig{rr7Qik%ub8*V zEZrC1EG~ob+q>Z$;L`bEAL)_H-Y?M!ZQ2UWPGNKJYy5K=M5dD+$v{TQq@syRL6o7A z?hS|&^p2!#epSDz$EIooq--`ljZ2&UrG2_uC9!W35fZGc%Qz>d>zz44NP%7N8iaAE1f(?a#J-Y6j3t4_tOB%4UXLfN z)dmQEt35Th)POXB2hFG3`M4^}xR?B1aj*b4Wn7vg*NN~BQNtmLE}u2->)W_X=T+nE zZ_=7gy~fRod#|od(MP1!Bb4sDNp`@|yaP;k2k#v;QS2nvENIYK65pXNU{Dbh7ELkg zAMZG#h9|)-Q7qOw5m>)U2!EAv)XqCEL0W8$3eqTMW0VU=t znHhOb*28^0`Sa1t5%aG_Sdzlltj~YjzK_=??$G)v|2sb<51Rk!PXF7T|5?>!Drwow zi^F^U?CuxRFvD2yb^!`OQiXyih)coq+ZtJ2)Yi|rFT1#GGHTv#r)sS);N&(Za}w4) zPoy%K7&DXDmX-w^4Nmc=2;Y2syw_eL}me|lm1jO%v zaNi*p_ClMd=7O~gxYl?1cV}rsDq~(u0rI;rTHu(>N`!Nj{1$fxD5Yer*t7#?Lj!kB z);IE#5LHrxm@*8nId(oqY9J&8=xRVVx4_87*rjkEduP>MlGs$!NJTR-Da&gvTOu10 zM>EOeOJ$h*Y?!|U<`(lCLiv#Skriy=0w@=i8fz+$tKlMrshANpQD$rOn50S`#EKld z%re{ai^ME{ZE6CJXSY(c!^tTDgQy?;opg@Tp;nQ6&SvMSZM~)Av(|GwI60}`sMe*` z{|KY0U0s!we)t%kwbiTt2+B>VqT;|yA3nFERha3GIGP#OWmF@K1wNugFrt`pmr<;k zOlhu+i+qOKSYV&SuVru05*K`6G=T3qN>%?gSx~|XPMSNHq&a&^J<>>yw~0VQj8RWn zT9JGJpTEo(t9O;wb%=ixccd6Ii>WmMevkXT>yktWkI^HT&7kjS{bM;2^7eNndWZ~mCz@NkXjhe* zik)Q4(wsa}9UMAyy%i^O#o3M12;mKwQY7>?!F~gkY2YYZc;us)vNwmx82H5}pMk|l zXFulBl%~-W#th@4#p6QcUvp8eTk86LCQKo;}n-gZyN)Zbag5kr;% zmF&!9Tx$mVdj+sXIUzR0zUg+?oA!5>?124%T{u8~_KsA-!jD9`fv)VzGQ{x6s(B4!HbURS+y3omB;!w) zD_qVtfF157V!5=Yy6bZvVSu4`scv?_e?dhw{f!7xBlmv zE*0Gi>rV?Ee9bA;FNU^@6^N~C`G? z(^znM2wb)?#vpC!Sag9~Mt;@SbmU@Om4`%8R_fU45J93!7kf8YULQUwM4=5=G%3dr z62U>%*4B`YApiyr1X#(DN@Ae|^Qd)?ML{sS>m%vVLEMF$eNKu3vx=H92iAiaSZ^=S zYb$8|t|<5VP{ zMF7X0)Q#1ZxmN=+|3 zF6j4_VYmD($nqCli}n!%Xi1&AlA+04;|#7tF%jB-#~P**XFA|!ebVXWl zzj`L&W6Pe=owMw)CEJQ)t;R$C{Z@$Y`QgZ*N7Uh{8HwHo_r%qz>uWDSlQlKP4MJG#sM9@U zGroD*^nm!`1xuX3{reQg^Ap>b@(9g)uGaLEK`b*@P)XyWO8kUjaVgqx`LN`0dPc#f z_p8yKR)|x!7qgb{iq<`QldWMNeU_eX?hI_1DzTU)hD1p<<+-3%ZZ**t>+3Np*_y+E z0S!~BN2b4kmWXt<;ww~BA&Vh_9$FXtu0vE*@NzyC6MYs4cR{$U^mF`8rQlFERSq6Q zy2vqX&u{t{Ij8 zk!S?HHA;@r+WeDwMVS)dTx>&}w=08`6FKr2IF6&6&goZqzVvHFMr3rqNiAp4v4L{E zh^G$7g2pS}X4EA;meWVN%VReXPLHh!1so?)oDy8{kJ?z6SlB2=x8nhpK)9YP+c|f~ z_xpQ#FTR)VZ|U8F3P#jPgCI#EA}P^gT92NE*$-ZQw)YQtqACrb>(te_>9TMt?Kd1jR`q`PdJ^p{PDmdeez~g zy3c=TUu7l}hcK=MD%9Rio4VqDKtEnZTb9C=GX8;nhBMv#LcsN;k zKfWs5;)iv#zu91yIsP6qw%Ff(A6?3Pe+-? zY;=Q@m$E05Jt)o`2h=QYQKA!{un%8G%rs9fyuI|jB||OQe4K+If$gQt5w9KL1D=Db zFUG0WyI1tvPBh!7tsTCI$Ebq^czq5|!_z|pJrCSXgF}ZNX}}^>!TBsk zE0#-SrvlfwygvdW3x0Xi(!WadXX?6%C>NP1xdo)+S0+^&zalDki<_$0nRbhxzke4@ zd`-*^dYPQYOpW=(pUa#-sQ%hzugqcFTvncs5#c&=nCbITsOT5rNC_7q|MOz@Fv+}s1*IB51Ojft~x<~%a- zIOVw;dl|lP+SB)$k#lJ2U!N+ZAZl2fF-=*o9>HldvZurR{!NwEf*uhZQZ{0-lzhG5 zNv_%f(AHvH{={6?sNk}C(d0CXdq;rykc$dyvcKD7T&o#RvU4)W_cmJ*4^U@v4;FZ`RJb( zpZ`F8O{LpGu3!KF0hj;)jQ_(G8d)0{8X4&P>vjL&z(q9)n~fjEk)OGm4QvvYnj64r zuWvXpA2f*RJiZ;AknG3?%JR5s9y8&gNch+Dlq@2V_}U}8G=gAAh2)R!$j*eVFv4Jw zA{eCcQhjR4H*KMMl?jCs>LUf?x%?BV0onQ^Pl`S$i>4cZD10?}D`Q>)<*%Y$N9Frr zO=^ZN$p~^5{YkYXdILl8fgK(gZGr2O-zg~b#GAj${Z8k%fhCg4MAecObJlA(%YG+V zy+}!(Q=;jhzXJZrb4xn9NTL|urEnE*@18s2*yz7k?M*WaRN=dvdvF2?IVI7PpOk8o zI}|_Q*Dm-7rlBBYgWfe1_N8*`2w-J3YCYQhk`Jff_YlMFL-l%+kP1_*)h4f{tB_)uXycNLh#B@!N@%&?*C8+g8%1N9xU^ zyoZnWv0kErgY*TF-WNuPSR-uoMde~sKrvsFYfkGiP_MxZPZt+^KelX|NL{)a%KEag zs_7UY1*bjop{OT6jvBgu-9W@b0-XlvoG6S6Ox^tMVVlrtinvkkALk6|9)sMn%bL_v zA}19e#M=)J(;TKM`y>)6B3sj)W1QjC5$cHm1ydys2RBqIAXFBaLC@xlnicau9l(Zd zQ>DHq-=}unbgvza7YM)YAA)yxY}_RWHz1+EXp(ikFwBSAY?s+{q0;F!pp14BJ=vm-)rnJYNJdebFvfbbCM&PI1^m9(@cVVSzYVFS$p%OCtF?_96|dX<^*?HYqpYK z@pMIdX#51cZFaIDU5h8PE`&|&3oa1yi)?Gc z@!gk!hs~~o1U;Y>kV_z~T|SPHH>j>itSuq?FipgoEYaba#TItsxx4Wp9XQh4JDarY zn%wlK(#2b&d|W}xm5*3g%WRg!C2Zy}^R5A4QbdEqp#@nzJl}o2oZ508mO$!jN+H-g{y2U1%TrN# zK?!G%x+}W!_bA&n6n?MrVtlMl&n#_E95LMfw=mICJSR-BWH+~LPeK-djWmTPueWP@ zuqAguTd2!9`-MmP5rt=D1Begz;(!BU1e_bB!qQuY3wbDzK0st}OU!E8t91I{(b_kq zQ@MrY6*wbw9xCZAs$Rl>8*by-`c--rb6`_R75)+YP=f932&c5=-?zW$>HF*fgPG>u zl!$1iEM%5U^HYE9MDLwv?O?%Nts}{Hv3k?=K&#QVIY)H$ENy_)eO!#X8BOD^`ldQY zs~W?z=xzyl*S3PWU%HbT4G-~{R9uSG&ZelcCTKZ$nMN6>u-^X5G=3pF@nFRN3@L5Q zy50!cNRUiL&y}|IpfO#XnZ^3mnV3Gdu<8M1lGpPRqB23b{PONTS#aRyGVaOy7`=G_ zAwf(=q&XZR78ySYHjWyFmcpE(m@})XdzOa}DV48o;M@L%e;W z44oQ?SQVs(R3pGv3{z1ykjP%;K*BIIG{g=I7q%?U82T|Zqz?A*cX0RbT!H*FayJ+#PZD38`LP4M)e{#lqL&urWy?LL0h>INlLZ-5fk>S>;#up zIHP^Cb+UkHPkU_9!_A znHaM~Oe~SseRdmBtK>&1^HJ8YT2sC76K73l7gt)duwOer+J!IO@Lctu+rbeOD~`%`~Le#SJD1{*!o9Kqh1gIfbDrJeKCdxh?AFQ?OxTu=trd`%9bWx%a=HYv6&~Kr4C^pAs$Bq#TZSCq<_k$ zNJ*Yy{~W<879E!$X(6Wa9;3{CJNPApv}vGeBm+>)ox29t>GvMGJuwYJ2@#| zbA)MW$H{XXDGM0S+kiaio(=e3PEOU2g`I$7^~3Zv(4n?Z(~^1_9LMWi=FrgkU@>OY zI1^euu_pW^YLLG`AV8ai{T#U@Xr@H3=gSfXAAt+aF4(`kQ%0ra;&oGH-n_1 zKnrMgL6ZiG2m=2TP^)L1b1DiL?!Ha_aW9TQaSpZ! zj`>K`bheoZCmH$?Su|!=J_+V#(io8xB=vJhVBUA^k-l#DB60OJWTF}wqGW+->C`Fj zD{xGmk)*2!<0J!Ck{0@sxL1}w!Dc8~%JZl!@J@KETaU^@vID@5>y2NK`}hgr9;rcd{4Ic=S6WX z0sQu08m(1!=jl5}nnW)~tkfX|D!QC8zuWcRcyqmlbM4zBm2*{BtEhN)VbJb{_F)g* z5%Hl1Zv#UQ*12;6!s z7V@$BJgX3VLeKy*j$G1M%UQNNPe9LjQGp8Db?!<%W?3;oxVCD!;$Y85*p7WsKE;uh zM?glKNb6?g7Be*R4X5OAm(~kw+F;KH`lu;rp|rJ&U!@PlxQ22fPgHclZ~!bUFIR$e z=J15%rwi~`@)r6nnb7FSSY|0>MMbH%Y20*n&5&&@%DSQM*+M+D50QtX#BAuaB7c;rRqgpX;)9FDXcEHrGniuQM3Uk`?AnnA=Ik7uA2;StM|`BwhsxWfilQ(6>6Vx#3<*!koew}~ zN8Znvmeow8C4t@pwcV$c5LqBpn@vWpfS=lw|4S%;vx+@n(T@T>hK=KTg`y8xCdgd{ z?iX@}$=26}-3kMFwjxE{Bh-0+(6W`JYC7H$PV6VCj-G*z&NTy;KEr;37Gx-kNEk&YA68tf)vm1$h z0WCXmb&Q-n3g)>~WXIw+mSJkk`Hha@(E7>*y%$8dOef=OII#<`haP9-Nh4gxUls8e z>$Dh~(WihqW<7#YdR#Z) zZ-F}&UIWoDun`=y2>>@dHw;`ELO9@qpaUb{B0MRyOE`js8@m7V3pBATTeE5>%nm|& znH(%Wy68&MDEH~=P~6w)&RQD}8%fDIl`Y(Oudiz7Q7}_ch<(eQEe0aC4%!dj+^K62 zK0!EABtkE-SybecKxzZB=`yEITkj%>ZFGQS@J(E9%}L>m(%<;3v^i~?9f1QFJw!FM z0Vv>n9#x>e14cwdqN3}~$CU2ym;`@kQ0Nl)$w9Z7%A6c?N!)YB{6MPKa>X=OH)sh^ zlPW3|`WKs$shkeEr&x1^{UqsE#ZVoiwmROI&snMrp^62$`t-i}T1X_CK5&dw3bGIS ztK!SYa~B4Xi-ajo?LnMt>kL)I|Frt^iB05r<7sw27Q!UuW`5DTK3tP=rbW$^@H{Tn zjU_@c>SbprliMlgk-nOeoGyJ9IE3ivgs3HBci9v7P4r3QA0tsG+yjn0RzSN9BM!-$ zwjW}*D&B6SFsgGUbE!s>P&@g(V0T*(NTq!rGQRWZ(U^#8am8e#li&<7y`{7Lty?4X z{772MiW1w-lEU1t?Ef0o_+)dF;cyckVXE0ZVoxxTq~3N{0IU@3X0Cm}&0CLwSHI`Y zx_)6f1_bJwkDM)CGv>StK4B%>3(v+40BB=f#R)nYG9{Rn2o+S$xo4n9y%>MpQv%@0 zNdG|_f>*5F`y88FcTA(i0$oKdVhGl(LBN+r_#ARhoehuz%Vhmx!BsK7CuUCj^hPg` zm?zX)XKLuFA{!0GOWY02k5dNLU3lOs<-1+M(S471{nJTLclpaJFv6A7LrMMlEB2-a znp7Q^dgHy5s9t|8>k2*CB~iwwp0F}9c(eSfI$`CQbw)hV)ufB5QMv9cSgK<-k>QV7 z=(fj-t%UQ8qJkIJGHEhaJ+`$Xx2ee|9d}JUq6(d?yoeX!ayEI61S3bQvVFQUw5_^a zn9jk8yGqk|YkIG1K1hvyoDT0N6Y(1eQ`Ps1Qa=|LZNpbmc$WLK9B(H`=}4Q;i2m$o zRGeRLenp!A$k3#G)W$vm&X=Sz)N@H=KNT8*O=QUYI8=H=Mv06(hc-mV#~)FSW=y3F z)^f^O;CfDG8Od=6`mAAId>uwT?ZqjLV4)Lr6mvgE<9xndWK=qPcu{+;!{(m~Yf{Y>T zsYg;tizbQFN>o;^F$(65pcZ@!j37ZjR}Q9`|* z0~F0cpYjkWqAo527w63!w-wk)9DRlrY7Ow7$xP;`jnpc?k*G3j zDMUj&SdRC75$0V<=v~P?yB2m3=}%Z7E;}TI(1VlcxfbSfE_xmf-ld9B47GYA*W)Ow z8B#m8PT&-bAIk)nwyMJmGykN`Eb6i~u9fZOkTDBuIW_Kyga}PFYe8w_-22_36Pc_b zu>j>-zPg8^MU467x1vSx$h`lg_Y`Ecw+KJC^1ho?XKoCG{;~9tTa~xkBEA`setyXO zbQS{D_(#k$+P?<(?O0#H!Vl4h1NSGu|M$WDkLkOPo}uA?MS1F^#X2o=&-E7tNM}K8 zYE^RYkZx{+EV|LmRvSLDn3z26VR8pzZV(U#VjcWfS7%#Sd$%yu-ySDKQ|YqAx=@J8Ql&TG zQc%%!57kMxNMl5Vp+FSzI1nxi{L1tc1gNv*aG+?!u%pcMuT(D0ZLSR!7AnKwf{s=r zEFt{KgeLIllR9=)cmulR1iSEg&RYFZi2ecfu);Yqu%JBBJP10l50eWgIw!E>>z#+l z+H}o1F~3MLYZTK-eG&)d44)YINSz@>3e&Mm`C9(I7odk_8^L;r#%qNH%;-u{Ht8_G zV*===G;p$mdt|mK=n=ytjR2ioFfJ?1^;c=-F#ur<0c}is&ZHhSt?u3 z#PuXo$S#vYamjM%sXfVZH?w5SAo~^N5T50&sNFD(LXw!l)8cz~_jDi?r{ZRr=BhZi zxShugEf%v$M{mcsr87x#s13xV8nEkxgJU@_zC$wA@0*9-y}$d{sAPwN(L)!65UuJ6 zTv>~;OK_|Rh}~dnEfEiOkbF#p`KZi9*CIErjq1@N>NJnH`FMo`{OGNmdD8HH{txD& z=TcU>EP+wM2f-o(k>x8**$rG}mmHSmaHS}+aJW_jdjXE*z^%jR3#C(_BKWNvdNnWYF1;pmbQSE{A0wm6=*{w= zLZc!6d_}aB^c`$M(PwIIO;QR&WpHhBWFsTFu*N#))pU$5+}7MSYkIGX)o(JGxusa^ z7aKmu`x=HlMS3XE=39$DKqv?i{o-3oX2klB5AIlI9qnzIhM`(obGPhQ`;ViU$qI`v zYRkP96{S)Ox0mPaUts8SUgGc^KyqH?Zk$ynOutSVJz#=c%0rNMs&} zp1V@+#O{^KEblgoi%*BDk5}YrTh31K{v-&GBWOB3#0jhX;aUP!{PQkmu+b>ACg%k5 zR;nf@w@}5+Y!2HoLKKu%^t|sRSx5OA`t@&5EBwd{buj-F4hKR20M7qEg~P$f()i!z zH2+mC(xhxQaN&cmzficZ#FLaBRmtQ%Mr)I?UfnD$lZxpMO4X-EJUV}$8)5?kJ*$SX!uZ) zXnw5PRL5Sl;a>AaKIS&BFq1MXRLHE6yTLPxk23QzGWT)hDlUr^Rc9;k&&C|yiAjVs zZEwScyvG(a5Lx1Ig(z5L5Ea4_dJ^QE#q{^%7Oqt{ORh-Vi5~2kVmjpFO_nV)NCYgN zRnG@oF(yp9+X$yXtDSTdp}stwwJ{m2TWdv6hdG+ROYtS`?cC9)3%#N-RBmmiM&?^p zYNpnEmyRHt+!nkpLa>>xxvjCI^rh7BmrbnVmRl<6`F|*zNh7Qd{N`2Twk6T zFQnJ4g{7Jt*7~37FqPzkLH@p_oazh`duWo1YnMQFUoL1UTs)y_&t|q4)-*5}0&K z4DQgth0WIuoP@Qf6@5lZTx@7WrioOVx@{Dlz$4b9%r*91nBCR(BR~AtFkEzit>?&c zGI9tJD`ltND2#-as(%0c8<)I*9tP@Y4QsFv9c8c{0(69q>O-Hrm2ktAQ_+@unA2Ep z43qoNY6RE%ewtv9^81lZIU`GgC**F!4OtMwY%%hbdA9t^AH)I>#6Pxw0nt!)S!7G% zC_tiZT=^6UR9$il*yLE-v(`e@Xd0jN3@j=y?WIbUyA;kh96Y9y4&h2=iCQAisW*Kk z0(R!=Z)PCx$p{-f#dmWL75pWxjG3G*Tt2zto1c?5WPwe*I4{>ehf$cC4Lk&w@1{2G zEvi<f)6NlO#~Kp5D^V*#lq-X1_sbI z?dQt9!XExmt6`M0m7T@1zDFR&*Rc4wKFHKF(#S;Km>fM46Iz!bVZm>dIP=~Rba}~v zJ4;TWt4Z1HhFBOe-pUGUTKq5Be08%ERO z@xc>fD3g265qzKc#k4)Exo{)iDc&8(_7Apz*d?V5C}Gm}#&rA3uG@g3tHvp`fXg?- zrv;bBUaQ6l0O81~l85^qA|I-D3gIb$6B=jSIv^{5k!}t}AU?P}ks&O5XWC^MJV>v; z(pz%Np*Jkiq>9M(owUyUWoAGmgWQaOB$KXOPh4~$IKjqA3yScwCpy?Iyz=IH>JK4( zt318Yd)}eaHpVa?+DJ2`ty3==+uq6)K=aeCi-iKu1}z@W@yXRd5lh@s|8pz;&LJ>H zc2ursjZf#@so#n{j?2`ZPIrZo%_ol^gynNlBfW`z9kM`p;?5w-2If`gb9E9=3`)}t zE$@JwIHkwKAlj(LMtSuz6wFwEHM45q#P_>q1FQgn&-$*^Naa`k+XCW)a{*}hwlAYH zC{96GoTnahK`Hnv0mhw76yuEKY;J)dwJ5-%HUe`JE(s>brxqp>;3&WIGkY_Q>t9{dR;953}8HLmKcUS$Kay2rR(eJLqkBsrhlxBArS;f$cut&}?V0TB zvBIVEyh@W(E+e&ItJjg8(Wx8t4@9k=7Ih&7+q!+My=nIrQX&D~(-@szSk z4*{L;l$==JTpQN5cJ7&k?vtBD3zqI$VKc%zrV{K&)686rR)YG*wj{O`wC)APPW$}X z80ezUn4iG^HSpd1DF2fFT`wQM{6dcZ%X+afaI(^KvNQc_W8kJ|>}Y3i=lu77pIz$x zwpstV&!O@)APMoRMJx#~Yk8hCM$>?2R-vwlwMZ`X*4D$eXh4+230GzVckZQ+x}8kB zxttdt3jq#`I?t4Km&YsL1spLab*Z91g4OTQ-YOUQF_c8~#JNw?RAP~kzSf}oZZy}r zP_o}rDVDw_3bcK$)JrMO$n80SHF?EDQEKXhaXDG%=>G}Kh*Tr7*DY@aQh-brNK$-O z_G1=Qzc8!1PWrY+WWz)4du4Ug064|82KD7xS9>y?%Ez+3wWg2~O*MW5T5qo2eG;M6xF>LbBn_=%h*0B$)N)_T%aAkgOhB?=U7pF2*mI@{4 z@Q{f!#@DgH4?VN0ct#b+k{pntZ6OfbHfcI1v?Fc!o3%w{M1Rn$g?WhXotKJqzEeI9 zfUo#ujZq(PE#ECXaBk}NwUw?s$L(h&ah}{Al9V%#(lq`UXN^89+b=>Gb^((4JdA`K zT;X(sa%wgZPisoCw9i%lsV?(X9n?n;X0Ci>1 zr|FPOi+g0aH{p;%kN}-FA`=><7|Ml`2j9XWUR9 zYDWg@Tj1UemCU_rIHQrmSaUK?qW>$|JY-`b34yxguNy{bP+7V7;TVgw5Sl2cK7Wj` z5_FPJn<+;|M0b9>p2Y?3Odhyuohej5b1j46>qcneT_iz0 z*Ou|Q)R2&F$Y%<@6A5Q}|633M^*??1`j?R5h}jWo$%U}k1c4yb_oLDPq;jCqy<^Sd;|*v;Q2z?jD&~|s$*U-97TOYN<;RdDBM8Ijp$@iWTrmBXAIsOu!8hZx}uC;nWy)0 zPv=ooEx;r*9Kza6iBtuHEpv=V7=}>K7#=BLy!Xa|0;!S0;hE%xkSe_>Qu%z*^1?xs zoDQ*A>a<7PhivvX?v0UWRTGB3^Q!vfyAjzWmRQ2GWX!7!gTTKD(q#JGOggE# zczHe1ympDMJ=W@+#dO$4KgRYxZ6xd4IFOyanxc1fxJzsh&6TJ_>I6zM3L>S*$d-SJ zJq%AMVa^-mc?^pN^G zFXji^Dq4p%OpH3!qZ|$UGKuTNOXir}`-2}u*0&LJPwJyqCfi(B4LxZMJsYu1bZIe; z(j)Z2LV}>li0*&dpk7*C%a4Fdt`0jph<9B;cAZVI3f8glKO|ZLNj!PBN*xz3KTz$4 z#l`RPAjru)OddmGi1Q}4pTFlJFaR^a2((H!UGxd0U(}wZRA!$-tF-VE+~It`U}H=ABVrmiSmeQ} z9=B6|_|=t*S;Xr`d4$y89LvG(`|oecnD{psk}opJ&zxN7u9;;4s+M|FQ;*#0;Z?gXL}@ zm_NyEfzir2XItEYyFuv+hLS%Wn6M4@d$g!lNHWq>^kgC*Pkzm*vt$OtIXD`}lDHtweT@PD4br)i53XFbr~#Y$1AK-5HwILwkZ$e6TA4H?MR{9{ zpiZ=%VHKZx5LL4Vnwq&!bq)hAdkC6TO2o%Xh!&_rs>Q8{rF=;pDlj;XX0yLC`$Uu& zaz}KN9KBJfeIO0ZPEk|IWRjs|z*AThfz75E0}cu^oRk{&O&e5J9=G=t@}EuNE;W|H z&gip*)HaIE#~y~3jbHkDz0vRsqJYTzOo9U zEi89iwqNAFRvs=ezrhLbc^O!0R7sMUUWf8r&E4(&x%Sr4mtDRPNY#PqH;UFMQb(*C zf*vmvzgaHN&v6C7q86RT<$bQ$ay8?*2}r%sD!0#Q*Tt0Q|`1DAK9~O3%?76pBCTa1|7Za_Mz>8c$*1LXDnB-2x*TX=eFu~ zTw_i-a}k2Kygp$^?ya5!);d>jD!{z9P#6Wq{TGSYVwq8PN&3FbX6M^WYi8}bmUQeh zasA5=3#P8|@cgrf-+}pEOMY2q%(eGbqLkg?;RWO~sQiwW^5%+cURzCT`1J!SqnT!N z`rlwPrnVjGuD_7P@!ShJkQo-E*k>MH!J9Fk96N+ z?Mg_SP43#_8;pzcOmMd=q=6NclVH9IX~t4D+L+I{PDiMzkK;`9QvSsUd3SM|*wV ziyp(zE3r`w1eSqM#tNAKN{_F4eW3aY_5?tU-^YZh6fQe{jjp3RePf+SvrLJ6*zgF8h=dLgZLnRn1fC$i-tNaPDO}&>|Khc8B#0IA28#!5}Qu2!JLyFj#cj9vk$w7g8 zRVw1)FE7e-Eu3EN1gC4v4jJ7%PQ`}ZrNw%vT3ZQX<1Fkp1-u$HD#DB~!8GD8e+rG_ z5u*Z1VP;t+sLvP%6EQWbT3UZEP*Vqa=ZaszHE?Ux9D+Dq&iS!JAQ`8GMAad0b#mqJ zAQo+f1FK>~hpx~3ZQkOvF3~(Y<&lO6<$1@#KLBs4oFoQn4Zdmc-1ah%8N?^c_{2 zF!WT%YBY|W$aZ^$_Voj#yUm9>8fj9fmuPy&8?wiJ*`a>l*%#&c3xXeH%hBB)r3O__ zUt7yKN)?DA7{EHVEc)?G))St3Ki#+B&!hBRYj-Dw`|0-p2H^=_`=&2yp3A_^_9nFm ztMcx`^3UO*J{wQ)Xy-3x(u zhb|trK1RVx6krfb(28;qVX@}LJiy+z2)l#W1TgMhBQx(`u6A8PU_{b_#%{U}%iZTh zAOWbi>v-c7PDg1@aj2J4R8>jE6FM3v%%ciQtzwe_O8z)jse*Jk&7ysD^o)^owGD=) z2Sv$@7{u@CYOn+V(k(7x{k}{oLklxSglknzhAkzL+Hsv`Xp&MAC7Y*LY9d6WQW+<9 ziy(bz!3o`?dBgMgn#HW3pux!bzQ+bOwby9oo$3q_10Y6#{j(<&B67-sn~>Bl@{_pS z2jsykp~2RTI>;P*)5}tm|8`o3@jEVLl6m5b3=jZac{tFy|2S%s5qpjrUJ}5 zfk>$hx;ham<$W2-4=QW6cjCmShsJ6C3c^ZQ{js)$8Y zGP3)Hou972cpHxgASr9B7v{(lj5=dYMqLS=N2F~3A<2gXoV{N66B)(L&XKMAfCO)= zI^o5=9mN(;DMfJRDN3>ZLskKbZH`jXz_e4upEh?eMXqdAdU?VVPEpGL^^q9`se9?h z51aR$w25fBwkH;P@dU|ibh$*iPj3=r;X{BlrY!8K&NPII?AEN?Bc6*MGS|Ah@9)qx zi#d!<8daGMo)CpXAodE3O?v~1Gz3I}Zr)XN21ipYblM+}-vm=>Phd*PKOAxfFiB7o zdb2Rr1ei||xMcs3lt2RRNO^P}SAo=YY7Z`zrS=Cn^zRz~*_}q#!RP=*kTIwll(%%5 zP0}*7?-_sG7?x$f=nSrkCie&|men^2B=}!zqC*lHfb}P~4xwBA?}yNrnvTrCyyWw# zAKl@~lzX(`syCN8NlMY$<5e&m{EFVteqG7tO5 zS|xjyT)+4qloIjeOZ735kBhoL{)IrIoJoHJY!AV+xf|bY;+$_Txn>d(8y)t>)(6ua zT>$jV2tI?F@@65UuC3790L3i|b`?-;dXnMQ<#Kh;VeYNWR$(LlzEv)IeM5%68j)eW ziUm1f^4kpmFw;v`wNj?;N^w`Q6JBywv%By`Oi)L6Gey__+prk@xd zmn}MEX1lmuHhiNC;GfylZH9Aap$3c6>U^v*@5a6>F6Pd4TO8&PlwRD7Y#;`U6H*sP#D$`HrmEotq{(G3 zS5?GY#+-aeUF5yTq!aK%j0Eiz!X;lu5=!rd6I_IzRM?ysSfmRzAuYXw!Tr6#f9avz z|1mTjZB|i{x#s*2(X$i|JMy}L{ke+k>h>TY!K-yR3RUadnN&jjE5nhnN2=}hzqRU% zF5?OQM|ZIMeSb4n{|lV>|Fr7=xORL<~rNeU`29PjM5{{~@H zDCgmj!UO;?cLD%l{Qvk1<6oNNFYLzougQN(7My-Z17m~#$q(SQa@rJ2*nOflEtY~z zqEJuj?hrvWrN1Q6DwjIQx~}%TQ)iHXksKoM+apYG@A&cA<^|T*1t8?)+||LQs+W+F z|9yH-mZM(WRh6yrRXY>ay=oc^w|x+O6Wo@&AYR<>{uwJ7d!n6u2KLq_Qo&K>q-Y=p zx#!>*eBTzfw^&_Pl7jr=@6?AA80aK1)$pY*Oc^aEl+1l!i5 znmK5y8YJ9>ZI2QX`n=HbD|gbEkk(#o&BDXNbarU2Dq`HhqxwuaeLmh~)1STd7uWYYqTH^)ygnM3Ti2Kfqjd3A(uLI*xWaB)FW-8X=JDR zXl0;GZIWoH%R;K9gg~;S-PH+@R6$VRFvG`%FqF7jkJRSKZ%k0@0upz%vPR7a8Rqj# z!)|gNX3BQOSbW+~g|l8-=dLD8S}2w-*}K8(>10= z`!br`&7$kcaJ{?fc6fk$0U$;Y5rC+xUvn_fPCq+f1>hV|BODTSo?;nLMkyiI4hH#$ zRUMGjz=0X@-D3Id-MgrPBI<>Lqk|S?jsL^9RiDYvqz3NfTXJLW@ebX};#M0lFRE9T>kB@Jeb3 z;T4%p%cgHwce{@g!>6H3MnjLxhUrW@j;C*&SSQ!tQ<+36@SB2yX@c-jAThv0;i*_% zQ+1btGJ#!Lp3Cq8$Ty;9$&xCxi`2A0{!2U7@1~(84{H)PNl{>9FW&62F&(~`kANz+5+Xf7*aPNiD_fAQd7dYYKOYgv3 z7w5bDJ9K*a($|Wo^bG7S9W>9%0To1dJaJmN15?L4NCL&F>7UY{904kLCR4e>qcTBT z{hEwDN-DyFH!n+zH*JYD;8;u6)75Vj!$f$%7=<{~;hY@_X%2LXBCe?q1jC^}0pllv z7+-%aFfWaes1_v zdXxb>jtfHS;s~6sJYxqaW0^thvDxErFHxyau^l26>i(ot+;Q4F@>hg43k~1l6qSEV~hlUdy#o z8^Ex=IA;fd?(L$9s=A%lyrQK6#KYA6;=1Gz8~RRi<(J5c?0xSu~?I62H z=jMTj<)NgnPe)uw5hlgc0N>Tl_&@oVlQq8c-4>Rb zadXay{K24xkzVbi2lX{*;07j4O9=8I{l{(y)t9l$s7e?poW5Tyo>F+~THAE7qCxjq zH!7lOuM`Gh!i`9h?vk#W>Sc_Udl_u_pwSnWr? zF@@yr76ypw;tD8=UpYJ9BbF_a>R-Af+z03c8!WTD1o+wKz&Nf|xT<9H8!|%-8j^Ob7g&He6MrwfFSu(`!^kx?L-Tafw~13ZNlQ zq|#soc)$zmPVY|jH!;($R003B;2LXr{e=hddzLOtVMrSI1NbS=@Chbz%B@h#YIfj< za-!ppLJu>x@(8T|vF-2-PiXMhbmcgs3rvMJ4p>X#9R#nSImH=uqpI< zmA(wAv_A02yCwiQCk-GZh|k|x?_7x~*s`94qjS~#k7=bxnvz&4G~X-MJdp#$(c5?i zP1;Ig0D|^~?2;OG;Ax{QV!n$dld@5!#+F5y3wvfxZ?Ngkvk_QUdI?HJBP%i{i~fo2 zuD-pSs)0c>Q1bp?c>}{74}ppKy;E--w4Oh%1Pi=6ecI~E7FARF)gVklX?*G?E!&_- z^4TPjUdI`ce8}-pcg;2nB*>{b-E3-xIYpW^C7^hy_D%me0@b^f;OQU7qK0F>nkSN+ zZHp{1UX-yUArMqYEYzXtP&ma0v9O;Z*+PKwX9|hO^;3Muv=>kVN~}7IzEZ^F5;ViR z^JB8$w>|K>U+sbX2M`hB^K>Yn8}NgMY_y+la>oFV^*1M87prDXQ(QZj%!h zD_s$Dq9A3_BP`U;J54TBP#1O2lMb?GUj2;H``EBm;9cyQT=}BrPcD;7&BO!OP2=V% z@?%;nJ;K*Dv}ayW_be_k#^K&{maXNaQcUF~#snROfVOUvVlPobiCHSLYn0azIl9aC zaC6Hd9KFuB4Xul^$lO{8ycCAT2o?gQvIZ(ybbYd^vg4c|9&ot-JYGR8&(=S%vy<7@ zJ|F546}@r&=L2>7LxTPXj|e*4A?in6)ak`s-^%p9O`(m#Q*p(TVNqSS~g}F_b2}{@LvP5jQhGXfNqWu#Kn`+lK9LL zk%?sjBdcUu#Wsk9cjX5CL&MR6^3$_c9d?c>R&G7?!5whxOS5$z-| zNxo;SBDfY%ya^VtAQGGCM3^L`_5VRJEz*uq65 ze}IdhG0<{^UcT^PDIdoRtz+&&m$LRebf9Pd%^=GJEB55nl;9t^_$zg;K(Pq}>*ov9 zS+roeNt)OszF910+)0RtCV&_rcwOXCeXLF)ykwBY0k%DV!@(?o$|YQ=nGu6lJiKy3 zkwh15Sn92?9~u?30K7>=A@M#PyimMzr)xK&Q;~_<*mRse$dJYgBh54rCJ`qHs$y^j zDHJ>L+wKfqU$dfmFJ4|h#8x(KHto&{_?wlvHztK&aPG}n(7G*1{fzCJ1j7yEB#hO^{6eRxvN2}g=pe@jUUz2ghwujdQQLhy|b zNQdVs&g2bEH%|bZ!IlymddD@y-_3B3v%_mTI*V#Nq6{zc87LKaH+xpO)(NUROgfvM zsei!8TK^_2oYG=Vt?>_HqImG~mpc44K}TiT+@Dd33rH|4l}He0bpi;LXDV1imDyiG zOju-G>tsZ>&l5%;TnCX%LI_r%%5)x@ng5_W2opKn=O0No~wzA4`z zNTkS7i&Ei=E{uE7@43P)ZX0p?NVaMV;(`~KaiPyy@ab|Rf-{zfn6Yv+uUu};D`dm* z+OXai))D&TqYZBCK<>_T-v8LN`X=4wuWWQP<4z}!|2s@W%CW7?4uItLDL%vBkEmW;88Xmk&msXy9;$ z3ydGiVUNVA2%kSpiQ8P}FgM4pgM(_rjKz%(j+atOY)uuL#!i@cbD>%+Jy29RBC$z0qpN&eJi?k4@kvVQHDuQfaWHG9<=qF#8 z_^8Ee-ZOG#`Nc6kx+(6!MoMb@|TBw3-DS&nHtP5aKu>mNARX59Un7I{atO+44;kCAt8`p)(lBNOxpLn8zD0$9jM$Gn3dy z<>fmz=NMitYRt!}=dWEw6OYQMQrkCXX=Nf=1hUsioZFG72)m$|ipx%eYJ8qooHc8C z*=gW!%giouIBHC#4+FyML=Ws^+}-L#Yplq0FxOW!bOx-SftRct%=Hh_@&S3az;Q9c znv)m%#KmEzAqM>3{+t&*G!??x;d^~L?qrd(rdi~c*F(BQ7dbN|032`RR|+*oY_b}w zq+a^RY@{}hV}OKkOaw~`R8Hw1pTq)H7^9fiY>$g2p&$z7XA~ny=Vn0r7(YBGvp>}4 zh9e!PXu2MkcKcA+9=xw^?2KHpIRlrZ^fel{;ij2Rpbj%XJBH#FULo;?bqmLT8qeQp zUlHntHJ6g=CZbBS-$?;$P^Ik}e#dkGG4K4+Y~m3}wv=MvKV{G*k24YvM?`C*S=p9z zny&p^#R6MavZ6Q58ORk#-7$D;G|kKSnGec}v4ejQMv_0TDCw%=^)(EuAQV>+KsAM$ z)p7gb8Jo3&oWKM-Jm;Th!B!qhvQezS!?Wrv@8rBV2bwL^@fMupj)`Ui{u(&N6An{8 z?{|z%B#`J@x5oqNi#)f9W|G@ke!(Gyz*j4q{}HqEcoV$@bxGPHrlNh$;aEpNM^YNa z?W<2P5se!ymP$ss*o@D)OfZ}ViJtrIW|X&6w(uZzx+Dob@nHGb52+$utPzJ7uQ{{c z>~Ae~;3Ync%JO8>$!*fwRQCB7bB$4hhn~&+90C`sMXo+)^p`Mv3+YxfgGt6poLn<( z(v30(EihC~d_qsF|LCKO%Pn%f;U&q9A56u=+v}P$cH#6Pb9{Js`tU?m%u0#PvRD`& z@vx%De_<2`Z)Yf$dPgpH$e%bKHj?N~fF#SDiFlujZ)1ycQ+}Nnp;!Vm>awwi7cZ+%%8$)9LVrzqo)?O||)POMs8` zhvE>G^b3&N7&46((q#clyi?MlrwI_WduElraPD8%-~D9`-YCgVGvD*P5WmbI4miWu zV<5-4>XT1SJ-PQ%_z9UUMh8CxJZ7f_5^d9x@wqg^2_Z&$3jzyJNqFKrS*vmt{*Gm$ zu%0o4%NU%Tt3P}*$zdCGAIpnzE)ca=#deQPM%G4s$;|G={kQnyQ!7BZX^!)_1eZfV z8i&+=|NgA9bG&RAJ*S20!1yz@Ey7Mul^hwO%2ZqLU)Lbi`q&`D0%)_#q^bn+BYfqx z88;QJNS1XlX{dHvL0$~xviUOIMQ1Q`+H)B%UAKz%RtHYk7%d4!{Y{m7Z)po*7-FoR zw-K^}725zHj$Y1Fz)^ZV#Cc}{2j>sp#$ZDpR5e(GfVu3F`y@vacgmM)BS&1?D)*?Z zg18bbFf_7p4)D*hVwc?3$n#A1XSA)c-gu7N7sl*(>e}KJ^zU?M$H6;(N)l!+;7-IV z^5|M^F5CjYUfWC+7Xv$pog^#V2*-^AAfvX3?`)XQ7(Btkz5b>^hP0(vmLpc1fi8uX zeE-9HYHCN$CggqF$8AgaH$po0K`|+dgR7)!h~2&jh$_;YUvw9A-jk858-njWqmg8I zjn(03EKD(}=t8bw`KQB38mflDtu5O@X6d@1ik9R z=4W=EnHIZK6iaRa|Alm5^h{G8m{QXP@6{Nm@6YaFtX;meo&$)D zey>z!YI|xEtFk6~R=LErL8|kawZtYXr~LlOcV+%7k`!c6-QiiedZt|SIL;KZ0ZdVd zKAW!wh({dnokXg4W+HF?0OlY7`f$Z|ShB^3>pt&q9?484gHH6S?`cu%?`ByzbhwEQ z5i4zU81sa~RHboHVc`Y0Bq#$@cUgoki9*7CVq*zb%|xWk;Vq*@_(WBOxE*!6XEJp2 zIQP?Jal+gfC-S`FvuK>mr$S$xGJ}+edUO=_ECL!mRYREVlc_Z_n$l-EhOpOkbRxOT zx~HR!b2lC`whz#xA;v}_=OAy9KhAu7`hnf5u?wbi z9x4f0RHL|3+LdxV6}po$&hhO^&YxcP2VXNgQ+zyCLoKpCf7e=9^UTcE$sW@1{$U#6 zfqG*T0gjYvWeDvKtN@I9r6BoPTScX#HO32z0<%?!TqACAvXR10ahu?8T#}2vG`%FZ zDY=9OrzW|E2D(=qVUjeguGj~EzAOnb2@+Q$bwc4`PP;i7Xvh36qGGJw|XybAKzW-=-whuV`UE5>t>;J&?`+vzp|7T%F&xPqf5F}m;r_GW7 zXftGv#UN5BG!s5MBB&=eIf-3TDu!k@X7qH>VB>`07$68Z3F*~ex0~C{eOUmM>*21g zDaq<2CBM5tX>B{!c+J>k#`S@RXF}Mmvo2jH{(*c3%?9{=O;}ED)uh+xK|n9OtE?uu z;&dYwePJ8LLU+|*RzgrJIr$_@4b`*FpJkHJV1-EZcuf<*l7@ymk{vPwXrMlx zA8aU(p9+mO+JI6y^#xOrIPZY0>sln&A>M;N2Y$}OEAHdEt8Yktm&W79y$4+5jji!R zPqOV6WqQBne*B`^Bb^bTlGBrJ@&D#wt$G{0Xd^gz%IfsmjnM*IRL7BC8xJl?q#I;I zYX`%V*Gi@`3!pj|nvDCorVl-2+WyhV<@YCzk`fu1Xg|b(`muy4b{io3ycsX4=-y#r zAa+^k|20K|{z}JQ`;BdD6HSB;Lks1qG)_7T%59{6&xw#jt+Rh%Vh9~wl!vIQrVe0H zU^vTmvf5aGLDlwCjlh1q-i{^aM<}5*j0H7~v!Anfu=*A7>guOrlv`<>M z2xtNws1i-Gm=NMc6Ud%+45!d5_PKMcTb!E?Cf#le1(KLGz7JWcIZ&29;zRDG@`2q2 z@apvAPZ?O!!>ynXM#63PXq`_pb9H}k_w5du!u{>~20b#B#eBI?4T~1ls{w}T);Ciz zzdUlx7{IV|XeDogcKQK6qi-LqpV*k{C>wfX7b9ZVB2(?wF7kuhOOBx&=r~97I5s}mrLs{Al)=%-?z)NIdwuGA;4Hz#lG`oCgU43DTJoKF z+zqY_NZ1Iy$}oawb&Z2yGEpwPSp)Oro0CD;CIeK3HN|hZQPx=OnCfoX4Jc=0DIfl5 zg76b8bOi~u8Kf;LxCufUf#q0YiLL~Ht^GrktR7|8&20i|L_yPTxoYMSOj&pD7R=!Y z(B`nB20PrIlOV@Duvh)?u3iRkWD7t>e3z3rOfQS7EXz3DOJtlFO5??l;o+wq3;G7hBV9;J}ED+add) zn|ArYkg#_4l2ux?|CFBKucX#~cRytnVE37!`PYy-qky8yuNs}3$~++wDay@H#7epo zmGg|HQAQ_}g0=gX+Xjr51%;hN))yWt5gOp}bS!7-1{;OR?TB*-DW;@ydl1p?E9b>s zFkB0VF-n7yWs2Wltd>+Lew8HNSzg6oP&&s{XR<^`6zy3g+*`5OwOI5L3^Z;D8is1tqp$c~*`2H?wCxm=D9@t^_q0J&5j%ERQadKR=qz{IF;~)f& zys9dR%_z;-i#0=#Yg=lvU}%26YANoRS#Lzz`amK~ePF7rpu=N8T5fC$nQ;SO^&590 zKx_J;I#|0<9$aXfli(t$-~s=_DyJJ+i-Uq zhvGnbpyXA6W7m2G<{*7E*FeyeaEej+4N~21%8>sn8;DH2Q@>QhMqzoznzCmGn zLG!!7EnD{Gc;W{7tl|#*LDw;T0j_$q7)pcz1RnfBuOFZ#f=}yJ7VYO1hF@*^-+5S- z?|EA+HCcJQRDCv;zfp%wq~T-(r{TmXVoOrZ^zzdQ6AjVSUvK|0>7>6l?-6a?X#zu9 zQwIuDMu9E4An^HI{T)zk;qU}!Qvi5G@>7@_oz=0n|Vel8?mD_F}`3( zyG67?3YDmdlUcXEfsyPIn(UtWkScU*Z>lilp`SJ#!AZ2jg3X>uVSZd?qWxUC5!gg( z7E&=aD!sH}K_5;w z?<)r6ucl~iMi7>OU&Wk)*bM{?Zs&W04-?qqr$@VjT{C0kjcIZJrsJ?XR1CkSy=5C_bxxZGq?cW4#{TGUA)Ffy>cN8FiC& z@zpPMf8D#q#^sM$xQ)$>EUz_Ry{a)2S*|z==ryEkdY+!9%cufgW&swz+7I zp8GU%v&=@PMurA^zyBJeZnCnffOW0K48-Yhf&YGfTIui~fyN+CRq#pBvkYPpuL7=b zPzgdX&?n-343oPk%7$r#4brIdEuQ;f^46Fb<2c}j_ZC-%Wvrk+hz*f2Z|ANLtT!v_ zoPW|boMOX@nT{OnXqydiGWt$$a0~dlOveZ;_#yp#=>%XK$<>y5gf&0x`%kNjZp@Ts zl+&Njp#*C1oqalu?v=4{fj3v(6BULB_w zTrG1Pe#EfOCeSCYG?k1pKDI+MaCe9JqWNcFs1%zyHF(-%pnSor$-HH}9KHRl2YNpO z?{~iuU-9d2eP35|dpZqj-MVEPTn$?(qS1LwR029#B)wG`bLRdd&`1Y3nrHENV#M+{ z{0cOg;LMYOyOspw<{D*y-3-c08NUJz4#gla;vEDnekra?SZKg*;sB$0HRvjNsS5_X zfEf15CYo^xroiO$_9MDZo9G6Q0Ux}BQH-fQu~T<;cu6IZtUjJyJU!dFM0VoS4&{ja z_+`n|T2>L{9g7U9;6U>p(ghEzJz--B=imjuCAik4vt#<%*p1=kbWK{%) za?X_bm}vBKt?FmS938(jB@qot8-s|sA)w-sNmg&V=?Ee7Z-iH6`;@G_`(7Y)mC_uM z8&v1idHM<;Cx5VM4~KsDemRc{znp&JHOI61UN#Sp;=DRar&$OWF~sl$-dMc+N0|9+5tuJ%UR%eBUT9aj zYKG_fjEkT1*wX}omF#Vw>sZ3x$((+bmTfdlwImy2gmHnLOu#V^AsNdHxL~*e$rnG4 zQFH>YFJFIX<1@ev#`5&c)+diFw9-a-Jws3kkmvT#}^~MWt9QKAx69 zdbaVx-5Q3?ioLWfGEUu#*hTH#;F+c#7Y{R~q6v;Ke@WPJ6oJd z?FmOx7a7x{s8*HLkKmD0z0rv8;fXFz(0`rExF{zn1ix$>IWyrP?$BC%#m~HWZohi{ z?VSh=(G+6=LC*v6w2el;V3IM{-(*X%;MS@>OoK;nt1gpDYj3A@RX{7|zkIf4Q2W!S zu^E-OwK}G@BL?7O4O`v|*k|B!Ox->GE$p!*<(f7w;$YF}h9N!CDBHbUr*B3to7Mg6 zX%zxF-?kMgjy!D+%O<_EL1|t}+m;n&su^q7bc?Qu|5*@_OQ(KT6=`Wip?Ut?agwpU z`r%dh!x57bs9LiOq6)o~l#(RC0If?4tf6Nb4p$TKx^(`+y0gY5`s-&V>@z3)vHYs z4(`<+xO0}etO~skuPYJi&_^XbT7v6>_V54}94fAr2T-`|d9P^zmjErO$VJlk^;1gV zsxpY^Ef07yFN~_$Xm#=zT!~P9t_8cV?kd9l4N!do{u6T_E<0n<^%pXr%TzUj8Cl-0 zBo6XsNx?K6W|#x}Nscr_&8xYvjgD+`nx@<|36OXYozD|=d=I4!c8KZ+Uy|7Ra>`1I zm@7mVM>te$pHE($Pg<&*Ab^&cKa@_YoI}@pF&?6a^pif!FN`A!#aIq(OBs?yQoMFh>9Y^8E z1(i?C;%WgMR7tQLnZ*t$w0<(23cs`*$nOKEiZ%y$#k*PN0H(X|MY~8YPyHP zzrC!o356BgR<}K_47dvr&W71+%c=~a|AVu0Y7#8!wrtwAQJIytZQHhO+qP}nwr$(C zon2o)bo9LsH=_Ieh8<_`SYxg^?kvWd3UVeTPMC>zhl6jRlDS5!bQ$mV+OnpqJ6i_O z!W}1GxFg`&%hR1aT^5g|K^u&qpFwln;5$`A4 zR~;#*jzT?J?bmy3Nz6J>YYq~TIlecoj%b{wlV$Kr+K#Ypcg&rtkTJSi zF7!lx;hEtTp%SPUQ0L3NFu*DZIALs)FL1XGdAK-uw4^XYaMP6hyf7NzsD4^u9tVpZ z6XOmpNezfd*zZm=8^T0KGmS&`R*g(D)KVZR)(Z4gzQq@8^}bV{g1bsE2lwwk7{dk|BqV9v52KT{MV#bIchk0%V(K zqLyN^f}Mxc2-6y?^f~R8IP3H3B$^wFXrvA@9@xxnv#ZXAxeaRVp6m&_qHa#WBV8D_ zSAaFhI^or?I-N@!K@Ov8PIpzl@!M=yq>R8IKP=2sA28^gls>DJf+mLcItPrq3U|Ys zB)Z>TxA?$xJcYcm-RS^Qm8=#%K3;xG2UV?%T$LF^Ito>x2%=3OFvfxw(v`cjrXqqD z3**T`brQvsRZgFWNgkU`j9RgpwDKj9pc1@5L&&OVUyxx!SiBc$Y0ookPt8P0S(!f4 zWDFB8=qu>&>}x=k>@Z6*c%ETcozpwcH;m@s`Pt{(O`5t4`P7~IS*05F8(9Hbw#Q^! zr6uCAsK@c~2wPAT!j_B$rrgao_x>gc-AZW&Ff?L?BsEn@pe5BHT*VRT)eyIxzi?fb z9ItPIqC9!v>*5EepeMuEj$i0YiTLC}!!v_ykvu#ic#=ithgxxK?yq|-;a^%Vn#d-> z-?N;clwvkcm8BNa4;xJyAk7ObIK*P|{HvS=APPpVU|M;#PHH}`1$eYOdZ zB>SgfWcrl^!FB9T&=#G3mwW*khP}i(Fgw9T1&Hv=Rf? zt|8aur2}h{NN%-DZcT$OxlU*2D4WoEmInNtB!7!FEmb14$pC)r6yeZSo@B-vP0sY zDB$&Kj#3Lgi@r|racXsi=1F`eZ^gg`M;F08C&rb-dl@aWhMumiQSRx!;+uTNPW_nz z+qf?3p>PgP+KG=SHnu4m5w!VDd=Q*5IT6n0dz-9ycS;z*<%Dl|fBwUzcJGtPKWRuk z1%gFkfVEI6ZB~_f?98xt9W4BGAXZ(>hhMh1EQC{SH`?OVN|5@n&S$&iV@!4a65${# z@79z0#hcG(fB&-hJgh7YD!aFyzEyII1SpqY^h^ zu4ThYfD(aBx+FRr5rv3EzgUZ?sXevAO*Qly&;Hxvw1BH@iDsyGu9BIvoOhr*}u+%0_4UGlduoy^#uHHX#{wU%h34(C*v<;vg*h z&)3rxTC;EtZeb3IhfDOGquJ@$h@0JOS>;@Bpb#$GiMHD7y7+=iOp3V<^WR*9Fc8H+ zSMmG-2`Jy}){Zfx1c zb;m!@d(P3d=18^xh_-`4KZ!?4W>cxs5||=U8r}CjoHPSL2*tpQsLdmr(*VN$De77> z5Vb>Ib=9XOjIK2vXGBKvz zi{0191Fm>aZ_S1l%a%d+&g}CwH@=Ho_0Z!`E_W7&s;@qT@#H(X4@(}IYH%n=|GwV; z)?>)b7)&aQ005xP{r?B182^E(W^7Bx!fPb)MG-&yj9R}U>*B@V9c=*O`+}=U!Ne?KgL<_qPjrOI$^=&8*vxIj#NEsU((k~p z#Cqq~KQonQ)AJ#z?BSFj2q)Gnb!GX&+7Dzk6;`z;2&O^uK2AwS_S%WHYTI@#HrN?H ziz`gt*(2|~Ip7B8byZj?p|^Ls|3X)bmxV+?xrc{`w+|GB?9>?t`oh~o>LQ8`qXzzOCnX!Koec!c)*BR6 zFSs!5oRS+%F=v8-x|LA9+gm{iIk#91k(PYo&&7SA(jm2s5l!p#L=0oZAY!qx5&+^x z2$n4wN=;`=zmJ%${Sf;{(TW`k-Usjl?q$Wn&q}RqL8j?(9@vvg;nB%jzjGwd9fu;=h|7#^cJNGECZ7bh(G>7hW_|btDt)A&mth#D=u*1Ci-|0qvNj6v-7BjhThNsEdNSMiMb(&` zB-`t8_^R}o;|5DQJot%?8Y8j!;D0>-#pLCH9%MCR7&hs2Sn2G2nA=iv)EZ_cj#!8n zctZ)p<;;J-nRQl1$Y4Kg^*pM6S3a|tVO z7_r4~#5rk4Lc^Cl^+XGa#aM(L0Q}j>`+;VzLZ=O3?TV?mHsI+BWAAaJ4^ZK=LIkP{ zsFB+dYM;{^st26p9qND+Y_NnD2g5L$xZ#V6P<(p)VhAuY$1dEDC8F@>mJYh~uMB@+ zC{72-fwSnIo?v$Cg&l#2Q--+|_*sV6yyXx6cw$Q0MX=4B-&+IX&cSZaQRLtEm6pe6c`4P(aIyh$ra|;9^X>=>+~mXb6fP%1z6D-F;D~DN0kX* z7zQo{oih4Z4oF_gi9rK`6$fgd2)wM+sGh*_E_;-hk0W5diolrv`Y4-DaEb{wHWV}` z-d8{bl3xvrD_l6C2$q-BR5zhTOwvKpouAPGDq_*V8ZdLM4+|l2^%`v&_ke~!$B<6~ z6VAXb!(xmEF`eC8k-K*(feF^lrrL^r@lLK!`cSWUOm#Q@6TdcbmEMJ~8tA5%3=a1dGkUS*#;nBo#D zV^;$0Tu(4{CNSF1XM$;YxNhXdehLeg7~Qj&7#7o%3qnpVXJ#ENA&pJ{yO^Im5CYW8 zTY99m8L`l@iK1eZ4f=+p2^Xa{q76jj8DcZCK(vyj0e=dmD6xG6jiX{rw1|Cznj(F? z3QaXZ3?~z;Mrq?=)SG2GcXuP}Ts5%@qGAeid;Iu>7BDYO>uOijBFGeXnTdwVKNEXxVN~co8N>mQ z9S!3XaEx~Dl+ExyTfri`QiwBT82y)}@;G8|6QTaKD|D#L36~<@T zA1<_98dmSoHY2{V*RVLcsv|Bj&)<7_UoX6_(K`AjXRjthLyOW*r_VW$PWn^!0J-t5 z!Y!mIXQy)Qv$VQ3{54jZVHJPcJosgVa`h>=^;`M>V z@C~kf4e?HLZ!V@7k;=3(uRn0SS{2v1FMFE{h2tTxNoHIH||Ggfou`rp^Jz~Xo(fyJZ&IPt$(4y6 z_sR*jgSg)t^o8f0%xwr6SXqq8rUI~y=xXrT@2nX1BlSwA6GO!rMX4V=wSe)D9o5`X zWfR4ZpV20zI@di-I)Qd9T~T(#)g6?X`vhmhRRyCVQ$>;Jh^~-6ZcxaN*l+^ECZ%x` z{Sm#OFqHPwrEVYBY4k&!(#!0IhvCg)p{5-}3Qaw1fau6yd|$%U=9ZrlQ4WyDeb84S zip@{aQ|$In9!+zvc!czvzj!xRdZ!jNY#AlV(d98g4&(Sel-vE5DPxxTYurrQ09_c> zP9AQvxq!+tPnb9NCk3@YaVa2`-YY(3DGZrQ+*lsUua5y+-gY)@1r;FrH(1$U^K1sk z^qQWZ3~zWm{$!ro^hU~GLqH`P2U*;;Kt;DL@vra47N6m&f?gNcRV6~~u2B0K$-IL| z7*d4Sp>mi5B=Opqll@Vz-IQ{nUQyXYL|tX=2dfVCwHRLGQE{ngb3zAJTRG4PwzDD2 zIS*6TNc$r5WK+%Vq2tF4B8YM9sOq=VCqXfS^kQ*_o5AOf;+;VsLy%(V$@ih+w{!C| z%N5idGCe7R5YI4tTR(uYr+0+4t!o|ebYVJmX=i+(q`7WnK9r&2W_lb)diNU1)p_vQMqZ0WrpYs-KO}p)L;HgV{ zw+CP7w>!N5j6|C$p>xN+trMH|dgfTpsRgCnTK~Ko%r85mF>ZIcP9dfxI8mShmKBuS z3aEa=Lq`j#;Ah9Z^MLvn#{^_yA_7jgh`}oLj{mV`fV2!dGBk@ zjKkYzCzw=6oH9kLbJ<*MhiAgqPXvWLf>ve2q2Buos0R6~_3=tugDBP1_~%haluX9O zvIPyDg@DzLkxCSK3mQDfnUP1m-7i7s_HvRmzgYQt(6Zr!j^r@!WJxd@;HHMbEzJ80 zFErHZyJw`Rb8PMv;I2uP%y5V7F%ir0T0?%7L)frB>ldYZ%yq4l^lyP#M3p1quCSvj z;vEr9ZKmpqDBF$h4ty#<6tbSOV}Y$HN5B$Oi9T+1AJ!VD%S2(`cE_fh6KBms|7TQ~&3aEAEjr zuKjSWHJWU}(H$l~JZVk$In_m@5VbURL67I^AVqvML0L-60-K_?QAS~OCpq6PnC6+? zWaRSPG!H`Ra{+Z%Ear_kijD^P%CILh!!u3?>^301ApgafsEj{Y_98bl#N(GD@hxwz zmjL2X(?p0#&1;gC-pkv5M{NtUnq}Q^I=1jpR+AW5*n~veZ3m#w{`VR2jTQlESbd4?wzWj zp7=4hS`r~DYub~}g3^L1h#_ak)U213MYoM zI17F5erz?B{wVfUckGwLVe4q9E;{y4S=*gBeSd1lH^=A?Htxsm`55)?`x$p|;Kk>e zbp#;*Om{6YnusLKk$V5jkNoEs@;#ui5}svl&t_b9w3JyN`n_3%-4M(N(uv~P#X z1fedrft0(jqj%Mbhe58lQwLiI2b+hho%uJ$TxUahpHFAJYKW*T+=wjrx2JRGTw~2* zC!H^KNbXNSsb`;)wNBDiEk_O(Y_}}*bhi2?A*S=y+1c6ef9(N28$9o(`DenG2>XAn zG_lgRHu&GMSxei)f0ZVus!tmbQtJsLFGi-wL>rys#Ofeo4DI7oH+BI8iwcqkA%8G= zuI_1GdTo`R6R$+$3%FyisAa@PkUA?mI&`$Zjhbc`x==prwz$KWUKyrUr{SMTV{9(K z_jxpXyc)(AyCE3gZ8~ZX7&KBC$QQgYP1L<=?SoX2#=piBm2uF5-$*DXxH`%u+J9;& zjy^)sd3|u@h8LMA3%{RGf3Ux$PYNqZWkI}RVq$uBP3yNNi>N>bBI$NY+?8%#6hR*v zCLGB?;eH+-bxF;Uc4dRzsC_F7w9x*^@}RAeSrvrDbZLn>2*tS4#i+4)egB*PT%wUrN(ILqlOtZ-8viVzeCcZ7pc%t;;*+{v((&?R7z zi^1|yP(qa(HUR7EP+0XLqp89g#3hyGAd1_VCE$bu^*)`b4a+#7AMI!*%@D=lCKf@| z{5Xb0cEVd3WR0pS6Pzt;#J*KmjX2D!4-Gaj`kCjdBkZ6hfGRBaNHl`!mvgp&pPiF} z-n-x4bE1Tqh-+#r>><6#ObQw!{N8d~w!jPP!yNh22TYgZ{P9V~&E+SkFkr30+m^UD zu*l!xs!)UjeHSZGXkReXPF}I?b5-u@*7Sh^8}cpZ2(|by6oPhYHN7|a3HL-6oF?7i75){z#RBBMSjU>OhjvEV(Z#PYiX8pCkyorzIhNe)BO;E zr3=;h?hejAu%!Q_Ryx6pbfj2XD;&YO5~dmEn2<|wN(?>2>%*};F=MsP)dz72TKVPM z7EKMoGfX9tt?Pudx?}9K-aoh2?h?G@=0tiY@!3(-q;{mBzqkAW@3u%I?MM9+#^{^%<_bl5k}nNu3;=pNg*dy7rrP5iKX#RCZ}J8 zP1tvPtOUZx1mfF^gzoTAmHZw}#n0ED2{kaKrI^nP9l1e~oGINDL52{&U(m$2r>@+Q zb)qbQ8j3LArwwiOzyiM-24e!L0e~f*aVuRD@OYkT2>73va@nzr3Fs?gd&l9?tc?F zChx2~8uWRFyE84VUWv2SNhUy8ztw*W3_4YV+~L2L?ztqFm&L-@rK+|=w7%c(r>eEP zUZ1dLr#J8-l0DFe0$A065|f>OJ=k^&(3evElpNZt4a@>x=%O4$_Nc&ij*^KB0}4>8 z?XCO4`UEI;fDFR{0*6PwTFI@1Y}tkMX_LS6{~{$;O)1x+Ye~()(}(w-z4W%)PVNKhNPYbA!k%?CPDfRh%?)|)Jafr7*(}|czJEHHe4H&k zJ=NvuwW`27a_7+=n@P~x5qYr57<(YUQ@{Qt(wKet{H^$DO7HsF<(O5D4oZnUM*7@1 zW*{@_!o0*YwStV{vF91|<3(eYSEEuTKkBDM8i|@RylR_>!uv2P%S=n>_lWApH20-P z-`uRNgBJaN#L zFNBs}A#LcXOj)X>fj)~c^^20tn^fBwI=`@{|LpQ^*ieS#-XA9r)1hj-zA6}oboA0g zfNXu+%Q_VPvG;8UG2IElfL9t6t)4gQq~!}tqXEg=XM26(bo61I(x@o*O_tgJt*_>L z6lEnGO}NV4Hfu3f>lLvc(NLOe2Ho1TqnZO1SsWp1qGbwTCgSK+L;Tr3yd<8H zCGei`j*-Pl2U_DN&s{SBND_2lCsZb|RtVRH`|wv-O3*d2xHyt{qP!VGV)4`L#dI6w z2i$lY?rW)m&bx+>p*ZXl%Wue`w;|Nd~xq|m56}pDxM~Z6c{+% zBNp#ZZ1_aJ3TB>jqMVdQ9X+eM)0_pa-Xx>@3oFT#gwzl)k3yJl%i&O}q=J5GSugYV z-{U2UWGJ#mp_EkRwIlqYKB9WCH}w1UR^NF=ve8Zw33Fxx@Ob4Wg^b=~OOJgo z@5dy>siG%;t5OY26t0XObU7MxINTF_>PVlPNT3aP0jyRF zc$NMfSobVNbpiuMzRDFHX-`JDV=-=LU%qeZ$^Ax&t&77PG!}vXr3D!nGdaW>UVo`< zw+P#tdsV@?$XRy`46I zVrEk%%5)1bD*GV2?il5t^S(V|KMS%V%pg7~Ge8~nY+me5P%=Uay?6e zhYKO^)DqjkMT&lyvOvu*GDXc&=iCH-->oNtem?Uv-S24N*Y$wKajkk2$B9A?`Z5>4 z5B}D}CBo{Fx6k-ZzrxBXi<$U!JSwH#pZ(Q&j42GF8?`kj3!xbb=P8~{bvC@cdbi}E zR{@~kcPOpHhe+%>%O+{ERG1uJ8ku2|PB)%{ywsv`kOCP(RaEK67E2wN{Z-!YAjUN| zi=f&4vWv$nV1NgHgYK2tBmTV)0rUmrb+~~SmpC9%F-=FqcvSm!;C@>5mf4>fpx*NC40j(Q>!dJ{0+3^(+?00 z=Y=-QvpkxyoLC~lIesufEwCelDfEZ>n*eTZAnvwv)AHz?hOx=@lOt=!`d2Ev4M7xO5^umn1|sfFsDdhMh3QU{ zVe;*?cT62h^vv~92TQPUW@PM&+zz7GkF+^I+}-}7G95>g`S^^L`h6iF?P>W5IM-m{fI0iEgyr3>A#30WHy;Wn@%TH1oBldR=Ub7#4Q`{dVla-n~aKnLb* znh(&A9$3*|QTm4uS6-%6`L&lCR#GTdEFfZ-QCZYi#$4bVOQgE=tDD~{^Jpy88NdA1 zDhah#Hc+PYsYC4P*Q7W(EzMS$hY%yxf2~cdPeahcR?3vg9*kAT8xMbqT@dfv3B@W* zmM%cbDOq5IQBAU08eO(EKv_Hw;go*cr!7o-=I5TFECzJ=7u^;aJCh5&+9K#ds&^)w zYboKGCRpBb8s^_9%$B&j{C21_-VE1fTJn%3Cam=u}!qt=ky1uAO*;Et&O`%Fg91Xvk<#Nt~$*)nDH(b^m%`ln2^E z1S*25Ho#Bg;4OJJ8mhILs=#Z`>Su}c-wCS3Z217RbMp!uk8Q&dxK6^0u{$gSIG}PG zJsOL`!^a6?_<)SXZ^99@GKVmXUX0<6Fp0%AWN%=q?Ck3&kga1Su8guFe*3im1emt3Wn{SwK3$)D0|Om+#IwMG3x* zGM^bAR+HKij=@s89bNb1NhLV+pcHkjwg$#}lex#`J2+mAL~*{H2#Sr!h^qG(tSl{j z8T2Fsi-NkgvC@M-fAt3DMDkgdindW|RSId>;I`y&v~hW;Dz_Sw$VSaZmMr~TO)y$y(HN*X-sJXW@(Uxx z4?M#ZT*`94AdV`Hw}a;^^xPV6ffHuyX-6Wo4jpU;VXkyzgvyE-%_(_H_HLwD2C6Bj zU5fzlxl$8z{mVCMv!H6yy{$0-zr0dKxM@UEGX}31P}W233ij9PZKz~ZQkc0lmr(Gb zyQ#_yvR~^~aoS9TAx$glMC7tI>ef_5vtt^9cUrIH9BtF%159lCXtO|D-=g7Scq?fj z`Q4DsR1Zb#vy>?{`n-oZd)t<{D2*Rdkh_^GKxRBjy+@>Av*$jL=0QKr%jJDOZRj|> z1KbKsX(ePVUwV)8rroD*ZWIs>i4(0d)Fe|IAN1N2nK9&DjU*bMOUu6o{#_j%`%`vu z#2OsCH8&-?A~5=5v7!EWjSfsXH!Qj;Fj`i>VANw465D6_re@tV&deDMA34mq6OGUM zFZp9$X#=8>9e{lq2keUKe%N~0g^!yTjG$!7_#nzBIN^23ILo{@fw-aF`W3H9CjH6a zY#Kbc;STp`S7(&_BW^T7n4nBAe&l9-8A1N&cPiNoI7RE=ySYd|`TKVtv8vaEeKDW``S zQMEW~>Ii^;n5EB_RR&E;H;P);DV9x`qZR)cz72FkBb9@ZQb_2V8h2__y`KNBl-xZxQi#7S-jMd zgDe%~1S+Oaw7IYWFrKoT6frvtr8yXYt%nq(&(?ZMy!tqLKgCnVZSYGP9}SuSJv&Jf znK`*WaO}%iBlR4(N~#7sv~(o((@NQ}^mKVKRjb~Hd~Chicb04-^1vITkgDrm-s*0fgAt(l@15`y43 zE`6H#RnaFGu`XcciRg_NNR7z!J!&F!~NLorAcZEPU# z%Y;-|BUycUF6y5WHo}hf-o@ferV+@aJE#4tfKi7tGY-Gm9FDe5lSPe@-YPSTQ-Pwa zwZXBrUHF5MbXDVyxWrXo>EhOZ9(P5hLNY5^!lTn4usR>BLMlK=z*;qQU1+?AZefF9 zt8tFYhI*O9ZFnF9l4tzbZ)rr|KDR2xyo5$}DOF)+>3O?qJ+zXG8mhsqxik$2ps^)% zdWOqG@&vq&?rSb7$k_O)NKuE0e$R9Pw`4)?cB~lJk&w~2#t_RwT_?qTtwKsNzXjK< z-I{#IY+jtGIuqe6++3P=IRivz@pktXZe_7-3BS*IEOR=YlLd&^0^C0O>r8t{(8zg> zAZD1>DQJgtRB-iTX)lsV4NL^srh`p{Dp2y#>bCa!UH9@qs5zmx{ym zqLVz0g?)Gw&@-Qpr4uJRM~BBs>3FH+l@34Su#Wj)Yv3#{3$6D!FVb9T!UUjLR`#N~ z*~3}juk8|_uNk8g;d(cM#aTw3|EbgX$sgv|A4=naDdoIp6C2AZiW6W>pcH|8Aghq$ z&=eXTOx{@<&v*N)teYK70gSI7!paG0Z^g0Mv#K}=uoU>Uc{&!R4?^-n=#~!pogif1 z84pa#uWwj8H$%9NrCZx>52>bq!inR<1f9-PAmkyA$*<*Yl9Kn%QExmqT$m?o{Vlr% zmDhdY(Q9?aA`%(d;o{93IMefuiGG$6>&yHY3FhI6F1w4_j)4vyx{Etf!*&YKc5M5@ zeG`RoIrbynw*{^Y;RV_uN&3=0bT;)xajZIS_%*=Q%tUp^gkPl7_8nSi%g;BG`twQP zEL9S9T7iYP=ShK+8AQM3bvl-fY;uo;j;I4k@gTfQr7Ntr_HP;h-7bm z#hKibXY(%>x743;B^%wAu+>Xj%_B&bxaqZ3Xmf&5B>!|tQ+%M8N~~hNA}HhTdIX9sA03wbgS;;pzRR69gnccm zD6dtS%?#%{0G25|Z{*Yb(L_tXOV|~+*t-ml(5`X*xjD~hmf9h?UCroDOJ!rZHJ04k z@JvOWAEtE9PEJn>f|xtOCIPFm;TmDjZMv`>eaQMv`P5}J0X0nBgRR`yMeDBL&Y+<> zjhrQ}H3tH^jX`Q<=!Lffn&L29V(CoMI+32{x6%zZ?gaYHt@WC(9Cwj7J@prECW#jt zq#$%3&*VD+uGdxE=PHg_y@X6>ND!A3j_}WW%J4T7n_E>66JPcwTG3N4l`P{mcE;BH z^M72b1m-qjCT0juG+_G*iXk`l6^aayPYk_`>kM~p4*G~jj&NIDTC4Gp;9G{RvcfxJ|! zUlXIxK-HfO@`<|#qd|m0{0TRtoKy|8R)4yfq<}U7{#@E;wZfL$FpjJ~a*PFl#V3%8zxX7ex11D7{CyyJJWP$V` ziR>&F7*&8#R~v{6Hy~wdh+Q!%rq~E9 ztJdc%UgtOOH-goL-wH^Hd9RH9no}}3mKvVrPcAU9JYDOmd9*dAZky4uHtII2WvOV6 z>@t(gp?=`5by+vi!)*e65uFCuX4D|HZ94(i-Zb%; zT4nW}t?kO=nhm|3#ZWlW)Y-ysr)dP;j1a&LQkPzRRvKeWUTu%YE#7W?P=-R+^?9dB!aP1?5*3XYL2}{2@sGVVaLOAlRw$ z?IieS5mgt2)T%@M++1G1%`H2ZEjdpO&8iWz?N%)XN&HgRJynJ55MtALoCqrw8|U9G=MaXFt5+_Gyu7d?Fl(x^cKNye z*v8Z18@_S`%Sqxk;nRa;iILdtWXh^+n|vHr`{EqlC^=H0!b~!J?S5J}&l*bo^~waF znqBMc*5jnwBZ!y$3lm7k08J~One|OYU{^-8A$N}~M+mfDXrN(Nc%e-2$YBZ&r_t{_ z1%Q?(lF{w;8R9PYnukWWb9AK>2KMl22QbInvVFa#gD_Da5U4$>1K&5&5Og~l{(_28 z)BSlIvoNENAD~4;7z3}R;ss5HIV|r8Y5C>Gpn}u0pmBcnC8)zUWLS>g9bh-yn zLV%my6yu!N-v&xM*IfVLK8D6Qb}5|^wexI2va_GjralTN zfVYX6IMI&T39(feAnLbC0eC3SK*~#Ker{)K_>YuP*%L4?{h4uqy-QFnT_At8VdUi} z9--@u4mpMWBFhoBREq|#AiV6o&(ZR>*PuXKu8A2Cg@}bzMx!2aiy|#ZEFqL4H(|t@ zTuG5|C3r;U06mG2dG3%C=_*9~s!!7lAg78A}|qV#6&pkOJjAx&12j!-pM&!gdZ&w$%v+weZ$ zL{zk90*!A)wowwv$ON-!!7sQi<<$Eq9KE+_UnIE6j>(0}$}cos7y8-Nl{y{w{;T-( zfx4)zN~3Kh42vzA1;emfYvM`sW_ecfRx9k$Y(EPF83U-uWE55W^)i5=WrU543c^fdeR^ z-2)&4c))*uIskm8_j8smlxV4cHo0%D)6K9}Z?~~qbJw(tx^M5$-w4J!<_Tao4Oaz1 zP>Wy(t!W3r?day$BFuy&!h)MS^~8rXpth>i+IWU&f`R&viiWg!a4Pe^As)^}^8d>k z#E7V38L!;T6=&9gW36?@HyFz@+i8((+((9Su=T4q9O|IeJyS>gTXFQ7gO&a3F0-(Y zenpLH?&T?o;p{m%N7rs0k8>jAB|7~uaNPz-su#l~o;##rnr{#7!k$5|pymJ4FE5gH zYg5u!r7Jv~fEP?c$4Xrx$HzJ~^>p;}wr?r4*fWA@RlBW%NOJpV%w<}PZ9#fgY6b)! zqS*|6Jl9yBH$LRDhu`?NuS+U;nxH)^CNWbNPmQSS0R+fua-a3?CG!#R&CYy36KG_0 zbL_z(&&xnI3-T0c{4*rWT1#1LTVF8mf z^@!mKpSUw*XK^@N17?cE5nW6$4eZ?M4I7H3Fol`OV!TaXR9^WF9rb<$rlB)r6*2_G z!?PeUckjV4FM9`WKUCyN1dzZl)A!T+Wcw3(Rq>u|a{C!1M|+9^Rs-R%tU_VxW^ICR zbfzooFXt$!@y?pJofDGt1HYbCuI&A^q zU%ewrR-!UBuq4L1`|5#d9{9xCxd0m77E-V|xV}hENk8{4d3i!CYA@}c^?o`|;1{|b zy!?;+p6F$3|AO;B!)3rRefQFX*VVheIInIS$FxTFw8AgwZj^!yHn!yn{?M9;dP#3< z5E=tF?UuN1o`=E`bpcZ6Y>)N3Xn8j(e3TEvbe8G~m`Q!0c}ux`)w6A@BeHIqheC6S zT;9pBpA_9aXtTOg(C<~A)?Y!d6K~;q>*Zl^lL=yMZbk=7AOW)&_hgoShvr}2BRhJU zRk!rX9f8t)upNjakfPg$KbHQHawhJl?LqgSEVrTWiwC6ujvUn`Y`{#pn6{-)a0a}< zU8k=!vV9*)Q)f;-cyp&}p@yICe7TuhBysP=EupHjwAI)J9WQu)L9RBLe;`Bv#4uK_ zU%q30hNZ(Sv<`z=74}m&xp;k96WV$E0Wcj_-1;DN??+KLU0N@P?`56)f9(4QbjH4N|(kdLkfOICmH1 z!kX*9*tnM}w$kx`N2%2J7Tv!(Cez87`jGZ<&f!^%`9P$h1sCNC&))L#J42on(1@ETc8d#!DCFx;Lil5-L|9(^`K5kv8_o8^!nXk}Wn_5NA`m(6=Q9ti zv#{6Jn;xzXJ4Y2{Q1m*H~+b7QyN4w!>|5+u>F5S+j zj~{>)nGJkz+kzB^HgRhXf$iZ83H}?pGr!n0%7ID^qWo6F4J!HAr;E}k*p95d2n8Etg z#v|Nx30s#|n$gkPA)J%d5f2_VJT8b*oDP6Y^ZU8A(~m3w1eZK(C?#yT=QdDVEv0!O zW!;*3D*Q<|NvFzHcdJ%lMm9wwIcoKQoH}!0Vk%;SEc?ELrX?o|R;6f;lr%B`d(g)w zYRuL_hlH3wxjEVR1JHMZH#kQtH;c~g-`?i#Q`04bF)y>m;QwOm z9fM?zf;HW?ZQHhO+qP}nwr$(J+um*4z1z0?_M96tbI!Rl5jUcue$<~8tFkJ-H#6US z5=g@5<@YzDin^K7D1RVL+zMbd2e)MIJYZr67drDm;4-2mj)1g>(GwcVf(>M*^oWcj>a1&W~E zX|O|QS6YR~GZ5qek18Ms8Xw>F4`aJM%!!ZtY)WN(bs;d;FK^AxoVgm&q+ueZ2Mg{)W3?-;sv%3C7oAZ3*PPfn=mI+EE*(wQ{*HEV}Eb zggR_MS_S$qx0)NGpMO|Q^-UPfVYB*+nLZ|?VV7^KX|~V*^L2mmYSZ7ywPoa6mz@Vw zC1*<1tQSB{WI9U@kB>}FKiwSTaN{&d667@QA%?V954>lKY&6f)s4Jbx;zcA9kEk^dxtAx7;2&3 zflvbc+NLuQx!Gx08N)Y7`*<&nlVDVfdWl%1ZBG`IV`sSrfVXOdjVN(^A;lEs^&!~6 z7T(C(+S=LIao{O=t3;}v?sZE?=j-WQ^?fLNHJ$b$*vWQWO|ZwJG4X)0Hlzn2HS7L{ zDgW%MFT0O+mjBD$-GPI-pDj(WN;2Ct5^==oT#;=G?*x%oI3Zi<+Jw)&pN-A9y$dlB zHzcT;4oH>}JaPKsj9|iiW&n{-cP>M>Oj+%|9Oi)eN9f zWc1;Ob=2s2x^7`|V&@k_KTUX*rja8sG6@6U>(=>#mtrsfv*i0%DwRD{Le9ISoG)1C z9et98*pz#_e+HR0Q^zZ~4p@25we%oX(K6%~cXotkxm=SmUNBq~nZsm0<_-5E9>$=UjjARhg%K_pfj$W{4NaCypW zmss;0UtRQZF%|wZKa6$$i)n_UCTnj~&znK%w4~Kf>N~H5R>H%>qBbWaaG)q`7~SBL z^xa$GUk2BuRs8P;bk8GAl)o%mQ+~>*DGcXkpXSRNrx8r#n|;L$!t-2GO@p+qQ5X)_ z#7F3Kj59DL+-*^~I%Icee8NP!sPmaA7;+GU~q#DumCs&lv_1QZYl zSY6h-oK$;rmV9@F;0+}SqfeA7Fk~yVt+oX>F1G|gdBB<7c^b^C%q5p!z2Pi#-S!}{ z?7c9ne|X_5>M=F|$fCE4Ilv^aMLW}Sts1X7x%VTc*^KJ{c-+cx|F;nj}j+W9Qd#(gyauY;3D zXSvNgrLQgk63~T*?!S0;nIIX^J_>GWT~&W98L}0uy}LN!PCRO&ZzR%`z9ib*2_UQ^ z)`;uyZ3*7!$Kg`}i{dWCSX`h1({mj52MgcKD~Sb@-*yWJeWZt0j2ly8?yGTAd<0Fu z9mI>*IcFgQj%UYUE_9cii4n+TS&#rF5fd-C ztMy+Xl*dBA>y71LAjEJ!XdL^VjJ|}suE@LV6BTprrp-OT?XYkD)t`N>BN)o$eXth- z?WV4!5c%LY+vJE-WGhnogb<0Ko#!h70p-cD#Oi0%)cD}E5n_jsB2HBJ{^JOX`uAn^iJ z0pRP;c_e)R9hpWp!3Ri}1BLv|yCeR^5Qy%02W;j71=NSXzN}DN(J1o0u!&=X2{)sT zEB>f9fVxDjpZAE1p5Gj{@x)AC*S8t_+e!0u=fC>IekbO8=bg0}ADKTkB1Sh_bq5j2 z(CvL4?Als(qW6p!43i}3E9gO7ttk`E$!xWX%F)6De4u3W64UT!m%aiq^{aZz3f$v37x*!pUgH3y^d}a33+RclG(tk$lH}gVW#H{ zjZ2TelN^W#%i&4`7nfU%`z2U0IHCk$hu~sivwp~+v~21#=RGw*;dA)fZtl&ecfb2d zJO}xb3E`NN(tqM$G^fgumgF|9(|}3UE_E~G5EmK*(nvi8_4rl|JfTK~1Uf8sj>BKy ziEn(wkz}+p^=c+Eu$u_xQ6{6_!zV7GbWE_H?b%sA*w2B2eVi;~21RUgn0muubmoOF zX5{Rj64Uf$E!3+oeRDF5TqHG1d=3Tp^8bQbUGPII=CMq^Xi-j+7jxJzbT0~jQLy$I9w9bmsgqi7DADfGspp3kenn*l3o?a6$Hw=hmL0? z1D0_HHn)wn0o_76b;80&nhC^chKfPAMDdFs>(a07UhVpfb%qlmPJUSKD6YKR>j(By z3Ic#v7|H3vEDytLjobQYU3rpnuV*{{xZ~y?kn2|-s)x`2?)n<%?XLMM)6F;-vn~1F zU!w&f2n7S+vMU!ZI(+%_&IT)L8>hx-qZYq~BXH8_-JL!(_I^5-eSdn`v)n_3KgA^u za3`_vDri#jYUFPw8Aqiokmz_A?k|s7eg*VMF)tW;o0{_@<%OR%5l(g$_~vOqg_q;1 zFU{aSDX(5i{saDBi^4$esST%)008~}zvSfqoWj7gw%-)H@Aea%6s4p2uIXTx-vMZ*<^9s{IEUkR3477a_b_@F`yXS}4J5H>Ktt z1uO=QCPNT2leAX2<5ye)m%x!VYZbG=Yy<5d9H7%7nk3P=$M?Q)$v2@?XuQ;d~l42=NZ zH8GwhdZ7^_nl_|T7U=u9ktm)?`2(4W);rios4F$QW}x@ZV{Ha%1^58!vxkI^2>1qR z0^>Tk%VUA-5h|o;(Mq;3{PiSXXX@}HM{CQR`HWB~1>FuYR89}eBsW(wCG#ah^>Q8r zHSk9zrg!33_9`gUVI8EvMKB@?otVMH#9;3#5?O^!CbQ~zok02?QFC)qv?2*ZY=cv9 zLZxR#2h>TAZUgryX&)o|-Q69+=NELEBuBXYhWjYpx+S*q09}=-Mc;Ko8-YA;k-5hLx?qslr}&@e|f!@<}hK_tAvFhfF+U>*XU zV*o|CyFLSEMo;AnA6h1b1m|?&jZXTCx!GXRY58F@VqeL~NF4N5P$>C_n(4~F#v2*J zVZjZ{6!&U(0Eq*%15g6Qaw1y&aq#VvlZw;160Yes1)vm?f(A*5WJD6skQiaY zaQ*TLqN+{kXxfY#l8Wa6L=9<$Q*gAcpT#?`isM&w4?yFG;dF3QT+}Q4Gn!NegxoP$ zDCMl2w_r;E&MT5YVUf04+<-(COmm7k)%pBCA-CRjB;d7*z3CfCB9zq5m@Ju!Ds>W+2b6+#Ffq_=K<%3JrMM)`0u&1XE&&&N0E=y*(F*9wO>7 zs*U-j0n9xB^!NxV?gt43$wRJVXnF}qFOvWs^5t>mf4&j@sb0cQ&u-6n;U9{q23Gta zV3W^mVSa}zY_}tD-L*?CL3J&woNsw&`(Zucqe+YE3P>8FCNzaJ>Fcq zczH6`xA9Fp#GeVZqfj@dAI!&!y21Gz&-a-$sD%5U?AnvH|Il))5R;5g^T1o@o^Wb~ zWIHC_HA>Ji_QkDhIbFR$vefWBg<3>6mhyE`e9>i3*vCl1N9x4X(zN7sEoy>?61CD$ zij7ULU2ZPI!2z3ko8y=L6OwN6s+I1f)YM#C4lsuI%og56i+qtH{fns<3kHo8*h*l` zB|k0Tl=HP^cr_e4X?e=tpvF~jqkwDO{j@_ERqEe`Ju;*(eKVczCq!GiQU7JpCRW=n zc05j4%bmI~r?GE#q(-N~rC*t&xJaQ}oH_ZT6Sfw&)<#vvd3yB6eFezQBHn*Lhl6ac zF4fqDe!Tc}Pf$}W!5go~wHJ^&D-+>_@(<|;C0p+$zbD35C^v;ta zM-bYD;XRE20P<1Gf{t(579wP&z2`mq5Hw}TNR5=xzz+n?K@2-?W`+hX8`W*X3`Hka zejGk~gTl>I^LKLoq_!(A0a^3TTV@Om>w%p$KcgHngJ+f*_UvGM0x{13a?j7=)8*9x zCQ|)eDN|Ev-dpU6cYcm0A+?1?Fazc)JaQ&ggXk%2En&&NpK!p~u-_Cb2-W-1Y*`4) z&Ne^J4Zl8!$GHK<*km16u~ja)4GAckR(5$A!?y&bMnlS^GYS?(!NTQBBlfSkjwLnR zrRFuIY}A*F$sAC9fHRifYt)+*fPaBb3l>j;qfjN`OOY=7SvIF*hom{W=v(zBJ$%Yu zPiA~Hd4NWfcLv2ZK2=6K$KqKWh!VOVh!8%sjwW;wN*2s~EWSxNicp1&+!AF^rn@qg zs4HRX;N@Ay_I7}|k4p%Fknm3e?tH#3kGQ{^Bhi;zL(1xdDn|T56_gAZYZ_={R;8u1 zYh?p9Z<1jlM!WAU8YbsNDJWS|`J&_)LZN`MPM|QppZ~{8Do?T_v_1pe5ob)OLXr3>VP) z!T2J(RYvIOX;X=_rMkarrgHb#MOd}Zs>0sId#62`78Y;H)#BHj79L;DS6RvOx!@$1 zUP;5s`E8-@QY6S@X*1(j!B2X{S}h#DO?V@?o;SUBkj7h*?w|gB0OmG7hD5I}S3_Wn8%2A43rR6NRKrU@t(f`r|o%_3={TO@P zKCYgubMyqB+dIhF*xgFe?)i080IsfBv5)qm2fNJePWDu&^EAq{6tM;7Be#Zet1j`y z=qdipwIR`WI?70^vxDF zyql=i7d~@2kF?+w_w!I?RD==pPs%v8GWd>) z`DORp-{`qf}$CP{Ep@!ee7yn{7`guk{A>7&Nw3*FK<3{TbjOx*MPxiw2NjFzCYK(k-$% zG*%?0W0=o4-27P5kE1w2G z$NCv0v|IJt(IFDg`C-y(QwHNnWx;VKGWaiC(N~99=u|}fc z`G07g)(-#0^o#51>h|z1k#aXkQ5Ut0zo=N|CFhoahNqVpP>uN$o2aU^fFzOEy&MtT zDmh0iW)3_#&Z2Vuj*>-1(5@3H0;wbp#SPN-g8deRxI;b>Gp+4Ud6{ozbrq(MqZque zhEa)&%#w(}|8D#Iu1VT~lC9yA+E97Jb5N9JQ#tW2gE7KSCoPCjRR%;OBuEG3jtEK> z=qgLX%K&qPsNd63=?9u4&~_t&mr<*+kz`!{rMsu}{9zCKD_Tmr`v|6HX&&lb^jt&< z-2^D7AYLIiZ*l*g{vt*@J4{B!5}5!$UWG_hPH<#7OTz5r+0?>kI+cwy`NCbcCxeqK z3W?z)oXBi=XB!G48+^GJe*E{V{<;4huhuE-`4DwAC+#!35xG!xzp$o1SwmJz%yguB z+c&M%%A3~RYJDR`Lk~5Qqd|n7G&;THBZ`DaagL&vYmNzYcAyWc?O(*szB9)iQUq<5 zR_BmXl7%)@RiM-u1|YpL+~A%4KGK3H;Fg2Ac9yVAGDL*3!#HVYByJG$VRz3ARNCSL z3X-?n#)JP>Qr18q-`L_QlotSii8GM8KLpr=M!t}Wt5`gnPzoTH0YFPF*=Nz%BQh;& zbS>bnPZ`FZw7eyPu!-mb3+!&Gx0YRTUL@Z~?YOZYo+eBWu!v*)DJiF<*5;qb#gCEd zDbg-UEkX&B_Z;mUYOg&{HKi#BB^sChn!7H$2K&3Ax6N&)kadW|&!i?)#AQe-oF+1}Gq!J`0tA7pVILf^jAACdJyveHvN zQ~VT}4(&)#nz%f*aRFr#)hJ5e@BZkPT1ZtDD|xLWKycA}qxrdCM1%tZu3mwf1f4(A zC=EUMp4^c(IN7_agzoQ7LEJPJ(XY|Clu9Lkb?wK%IV6wEY2TKE71rS z6JLUh#Ba<3%Vno$d>E+{eUwua677DjQsxU{Mb-cT-Od9$FtJ* zd?m;Z@)48Lq&}?4dqi1+vZi#FnCJ1hcbcgF6m=fh8Gu`K=_;ovf{#9CUIhqGOm(9; zwCW*FbGPS_3zmm(U1zw(6$Ihne&8BF14Q*|zsNXNx{zf<=@}=%bg<@V$q;&{ITGV< zF*jv|cYETat0ygn^FbX+s#an?ASLg3wj*erdcr+V8+d)anixS}JZz;%}&sE37WKgUDz0hdOAgiU5#=V1=tE zEFQhV8~b!E^Das{6I1*8Dj_#B$nnq4+v;+Kv-KukKgV5LVRNhcf?M?3KArNi{+N-2 zj$RTnLq%coJ{kdK;kqFV-$yiY0jPAktcZDxn#g*H*-WfuQH7h5m+c$H}Lbm znwdrtVG4U%rs3HNGcM6^v9BhE4n>yc8b>1FkCcLgrF>2#Gq0*NT)B!P2q7@dJO#@k z&iaxt+gwcfv)wiMceq5es5LHp$oSxC!x&S*Mk*cnUP$O+7D;AWQYWVVLP zJH|^Ny%rDKSHGq#+L~rdp=YX5PdK&wZPgU(Wu|M#?vTOzMVq5JFx^uIF$9o=ST$u{ z2i?$CId+&(`K$+xJ|Z}2LJWBwcjKGP5H4oPji;Pq zHi5+j4RH?y-Gy$7k)B>6J2AfIKi_?_5yoeJSmr~z!qlvj zjtB^0BdwX~uLxnY8H@E%y8Y2t!h3Rg2vjV1KbNO@XqW?oAAmA3_1&|(?#m4aj$==5 z@88Y_H0X14NVr}GSfXXXDarm4>Q{OuAZ=M`-dkc_G&>j~BxWogeQ$cDr_XYQWIN%8 zqm8&oB^teoYS#NB6u-8@zcmW%LhFpS?bAczM!9;}kS^9yzez|$cA^QBvTC2Lx!yX z$3(z^w}l6bfn~lAytan%&fdOwq#9Cmf0lk_%n#q1cW}66+pgEFy%0ngI;@D+Pkuh2 z#W`4&xd@&pIfLn7x8?ty%KXZOx!i*iu-RGR>gUa?@_Oer}@ zb%*rQMS(iFJ^IPR^C9P>9r@+%B<^mzFDd&MqdR`|&C#XdL*Cw*{_>vQHkNMhA?C4A z51L-MC69#og`QLCgKmyl@ zjK50v7pDtA_}>+;vxA|Nv!#nCy}rJsou!Mu{;%AXrv5vtI!8?>K|>`MJ1a{wGczHh zG(JT;PEVyMK_?|k6ByxC8n{e~hTczt8w}h5@L!9u#$Cljx-b9$TeJWGIRDr444oZ* zkJ{Gqw%=ky`tj!noee~!ET?F5riJU>Jb)qR#OUb|#Cr)aglHMw{-;S@nQ}DEHtzeJ zjhTehT6AfBr=pviQJhac_$DE?0UTUS$Tr0wxd>pS-TD>lmYK^vvs@^KG@~3zD zA1pJo5~)YAxH)r{AGx(ATBIL+)0_8llxA^TIWt(iw8m^sWPD`>mYblx9+fH;Z>v(1 zMNn{cv%7s={r7+>Vf4S&?xfN`6o13EA?P8|K5i(=Abv!OdlPT zyIm8)jwlUw2R$_RvGtk$iR=D{OD-ef^E~0a!@6=Ce?M1TPYM#Ll7C%RvF9!LVl=iF zWjfhUnKIdL<+E`U_&7K@LryC-{cx>zf(9zNIyUQVHmPdFaBQDwGA!MPqC@FHsnBKd zNGw|vZ;7>=e-c&cly?L~BDnF=O^E{l?OD=M-tKU&dT8!x53ihPqsqeC2N^cU%|LU0 zha5a+>^4S#U`r8*kTKs{4Zq%>H0xp38){HC@o_9iFBYygD@+uxqfO`4YND(`m!;8a7GP^X!ST#`TbEuR74CgSryIy3{3tAzsN4fNYq8QA zOM8No_iGYIVWs)Kq^bT1Y|scOLp|$kchdssAaL_|NpwHyq`+zvIZnV=$t14yv!lBv zK#p^YJmEBw?=##jEQS(+ebS4Lb3`nR$x7M~1CIVe%YTe6wj)hU94Xq09(Kl$j95V~ zl-X*LPFF%daHs4kQ>g&N4*snC%gqiHNTTc+090j0AF783k;WT-Mk6$R2-Of5J+q?>Hzg#h$6dy zpjr^G@86kTUtDH%QkOo!U`UNP1nM>ggwrC^a76Ba*GaWMU^58I3<%PB!hfa(#O55@ zV!fs4Lf_Sx)DG>#0J#^>BaGn?CDHADd&UI%cg|jz^V$(^#oL@5k4D!Uxq_Zs3Yv z1#`J`pq^XOA42GMVk@SA&x9f}f>SfDU=Rp5U}9fhvso^vP591$!pbV5l8XTYRs4q3 zjItsEk;_oGjrOn1tlBBqhBBA8W~zg;AjM7^XEgpO0&vy5;VVJj1Kit(Uj3xL!!Ol3)kpE7h z#`tva_sPdT|ICE6|7_Rw1cTjL76TT9eywXGs_1Q5;@jjZZe?OS9$+Dn z*q|0nFDo~ch?jMnRS7L#iKjY4sDi`k;g1&y=Oal+WFl~}#_l;L;jBz}O+)7LfyHWk zW6^$?w&dRBLLK)m7A4J|AAlGB)x)%0mOC|~A7H&I!Gfw6|1ueC_)BfQ3bgCnQW}>e zv|9UO(!SG1Rl6oP*ch>R@Wn#kNs|o+!E;Kfe~trOV~mzTRvt#-k+hH&<0gVaaq45KvLb;*W8xk#p?;><*5g>>Z`J99b#>xsFSNfi1>O zezqo$PqAg)di}D514AP*k%@IEKo5simKQR8SAzCNWF+4)=y#S(-j4m*dI%E_Q*nAV zO3_cw!q;`a(yA|cx)Pq4>tt515#Gy>$W4Bzi%RVNk^;!bS{+~W?w)PB7#N>6a=|7a zEEarjP#Ji+;l3YiM9)D5Ui&$2a(SGg`SXc5Odfa#N!}kaVEYKa4Qw$&pDB z`GlfL|0f;-z(dm zOUJ%In+n2$(^n+r;EJI}sRI-EPtNVKuIIrE|N z);oVaI)eEhEe<@G#ce{5&`z->IHo+oI-?VeKFld5xH8G?cZvsF4ulbr`D)EMc5ipjL*TGUzGTl^tY>}YsmBYyPTrZ47z9M z^!l`M#X^7F@d~c_JVFVV*(wnsP(-@iyN6w&B#D`kT{cKcN3L zEfM8eqql+p0Pz0JNyz`NX~}=&L#Fz+hR)VDmj8p*Ijym6zsZ5*|Dw+*(HM|IB~^2z z7xM(v=|^)vr#FB7WC%QzK)ZoPq^Ol7b@~0oE*ynIx^C&!Z)8mnAu|2Rv72M4x00$r z0PmEPEv_2*G}C)qRs)|Pwb!bvr7k+KLRH_9V?{EvSWJ(h?1W;N5E23)CHZFIDKyvv)K$OX#2LK*0t; zPy*C#N;Pgmi_B;in{Jy>*KiXld_fHX^b-|QDn(0YJJV0JdCxFtp+Y&7a6)?^^RyPbRXlH*Xddk(c3#0yTAULc zKJ`os|K^@HmQ*|i1@Se`2Oa7N;2NL$5kvSxNM#~gQ0Y40%By_il!vRf3N=(k@uqyv z;mHGbK#F<)$%6=g4g!W#C2{m{o~Cq;0J(#_GBO*AQ-#vnhZq0h(PpI{iv{s0+utai zDgh?c*L7Tta}Cujf|C42xMSyr_jzX~?#dmXj-I!-^&N5sLcweCaB0#kR-{H4H5sb( zYSMCDi^|Dm2(J3xS;ZDT9?sA(H6K117d5c`;W1TivywX_6rlAK6}ScyL#e;uDX)?L zHgl5-*OSmbI{n7tO}nOdEKf~kqYH?#^A3$ni3?<2?6OsC?pPVB z<}aFESW&%N7qP%BBhEEA^O}YVi-Fns{(gI=6(gx8v<7LcTX+(tt-=Dl(4~5E++&lC zwk5rlp)l@fCCJc1v6l%-t4I`iE&aaTN0ZKpbp6ZcBPfB4eG9gn81i{{vAx-?-9gK! zi23<_*5?pyHzKS>4q>Vi5DF3~8OtOFgf2u>Xl|U3^;Lplg zn#G0}WT9+`=2do%9R=qN*Huc=F@Jgr=5I~2z0QaA0VO*_O2(CD;Zn2dzIeNmLmibnK1>AjYTCS0 zNaF!lEYNw|f7^nn^pNbhAf%=RuURhCBnK(-8HaT{R+pG)iNN&+7L}n(!4mEb5#wFe zvX;+EdgD-;qG;FQmHBvX+C2@^2DI%QkW{Igswg8Z=9?#JR9`^0zL0=NUujPj2Uf_o zz!Kj^(uqc-w+icKg{gez6%L<81@RR$aDnk41yXtIchDau6TH*ipJ&13}&_|-k4#^l89YOB}%!<~nC^=Is@OSGke zqF+&kS_8iob)E2Ii_ghgx#c+B%oHZ96$`gsBVhKvK8(>Oq%!v*B%8C47~k{QO($gE z0Qzl@?Np%g^atadQNh;-RQ+!GmfG?uwYKlTbN_cE=4LSrd>3P0Y4~%CXI$?%r*vo| z{kp6TW37$<91w2ugu@m%iui~0xx&j*Mfk?_Mkk!-ym32V$l=UZe%^r_D@6GB526#U zNi0GS05t}D4hzRvKT8nw7dr%7522I)998e36i&{3;#04v-mizh2byc-8XO}d6x!<5 z1A;l$+~@(I#Hwa~Hv0tXly z%CJH>3Afxwi%mFuzVIIs$L{Y9EymCA^oJfEHlj!AE{`qa?R)yf^X3>0W28`2qFD8$Dj|?7myj~MJp81o8FFM?5e&P zl{A~xTvdr*ZVR%haPM38cSP8;wR4a4sF8U%@h@z;A&x_~^}$1|o2wWFUVokwHT`qiuR%O3p6#1h z77btknx#GTR`+IIOarNHU=v!SO3?@f(B6fp5(~ZJy_V=as&LvKapCv&Bf_oc##MKD zLmqfh>G4xrc2*B8$lvBqoM9*K6AOsq&sBLVQ(oU{&D^DSMh|k9m7z*0N==*i2;>#iAD9=poi|gj#Pcy;QIuYWb8`D^)93q%X68nwrubp z?@#{kJ_Ro|ed^+0nSulOf2i7enEv_|>>d6~)z(x_5H5%T;m<=;&AfcKr-rwahWc;dD77mD7Wq45nwT~Fp zvb)##)c&SWg*v}2==52!P2K*O?``wvRqjPaOL_-wA}KSYet;~`tLt?RPQ1Ab2xFue z=%AHzFfn3WSRl{uvvn<>JVC{vGP>R;U=E~ybgbys<#6R^u>UH^>=2zk-G6WQ1N?s= zIsC61I@_DM*cy81|IZJ8C0S_FfL$;HOxO*NXdFRd#fhXua@mv0RnvAEl~gjoyig2M zb7Q-HKpL1uHx2~g&$Ic@yeu>vWDv4zQMqtE4VQ&~<|&Chl3~Zo5w4Vt$In6k$LiS+ z;h(bNqb!DMJujg^aXfZ=rr%xhPNht^ZtqB!_d}QEMF?=^aC9=z2 zz<#aGs2K>KX4reDY-cxoXc>QYWa}k%n>)7NBPcAzvj{<#}A5V7;xUof`@$uY|XW0fJ z`E<*OG2=SE|CUP|0oFfNJj(=#>blOou~6~XaMFi;$iJ< zHG3+x&!cg?ZCBFTpLqqDm(?aBYRK*MS6wl`0srslZ3Skl>~IJ9xcx>?69@nR^?%A~ z6IWY@|J+G1O%#+rWIzbL`9hVy0xem%9*pG;byF9KR;q*)!qGKPiaFZQ``zuc%M4iy zvT-=|$Mbu~BQW-%yx%t6p|sxdh=oQR378cx~~jdrUlk)KuK>lDc`@xQ(#Y-|G@C zfS0PLdx03iOCK>d$s{YQo~ZM`Li(s!eO zmjF3`(0W-iJz@$Uhs!QK*|ml(1R*%SVBsF2RvahR_Oc@MOGgsc+5nRRzA9uXN1}L4 zKzpAxVSilmFT9EERlkS_!Kwa!v z$Y5|yff9(}gr~28(>vV1jyJNs8B(m$!gg6*0VnVx06PxU)W)U1F8eL3kI$e%Mv&f2?W5M z3DroSlj5qtCq&;*6%MDgFfJwP;AvSEalb;MWRiw3yjqY7wVvoY6{lU#ff`0b2F^dr zA(HM|zCRKn_TrLww4W1%Z~#*(%vlOyLwEueJ{&h9v>pyO(yRZlG0BHtku}N5=G0MD zpa7l_1KWFUlX>rbRL{OXn9J@3jNRQ1cJF!g`dZ9?uDcPOySt%tarYe7i`(~eBP{kC z_qAem_Z&dDXby%uiez6rAxC0~Fqn7L3cx23D2n*lDZl$vokJ#vFu0JzfJwxiz{qLp zFe92l%mdE~@->56{46m{S*Ab!lJ z!4}WSbkl4KsINGAIu3{R+i5FAOUu&vwic!^-%}YQAADW8LM6 zL2U(14&kPha)Rwc)1anJ>iAbF+0uWVqr&4flU6nRwgNb)WOl}VOd?Bj*4vLZA}60R zaM7<2MRI<)0q0T4{xAH>P*o-8P{TufeDQCp6;+b88715{T?Gl0MlK4KcMqPb_fi)| z8V;(At8KD)E|6O!)?H?~&@)$MWD*(LurtSwgdto_A^YTGiW!<$TB2gTb#L95Pab#+ z|M=GUd+6kVrUsla=9vy1%?dIWvD)v1ZGhWQFN9{YPm-`|X~#dY$n`dh{sr7c8#8KD z=`8XT3UgI4seGg(!j3i71Zh~W?TS0o8Vu(WOn2AHVclXW11)|I?^z8Uk@1++4a%U! z2+{O!mUMRHv!Gs+R);q6$u+cTmiM1TXIAL)*Uq&ucP#n8L+!D7YV)kCTt|egrProE zPRBH7U;ZFZSG+Z6+`H?aE=0q=Wb8ur&=;u_(K)hRVT}HGnERA4vt?^A*E`j-IlfBs zMED!+?%u)J&*a=a-~BsVoy+(Iw_2r!W^&d{%q+(Hq=43VbPIPG9jn;#WUHgHH&5)d z`aM28yndWt_XC%2@{8v3(Iaa$HrMOp1X^r#wLLeO4reMZCqw$zLY?4NYhFL1|4^kT z4c5TEWeK&O6HLidqVYY@+&y-y5Y;y5QKiGeYZ`!P`7!6**nC}>eD9YpH_@Au^$R0_ zuID_0Z!ZzeVy?nlISkwi1gFv|6xX{YQ2cWI-m_Q1p=Z2Vj~8+~k2TS@I&~gHRXImf zxDx7-N)-jC~-;o_QDUk@jDm+dEu;A5V?fR2TfQ_Z6BG^g1#6>hR< zEhj)#;D|-v`ZA`l=wrY~J2a%UQLX0P#CRS_`?c+kJTpU}oiHX8Qj%S0C7j6{h1r5T z2iCmw%9+*+kz&^@^e&>i^WJLb@K3u$o>j4w#(g_g=kHb61^+}4@m{98V!M1aTgm|| zQeC4AdKYHH8O)De?p^&NK1NVO>7I=p`w6D2S9kprttwP7W!0@}l;lzBr|4r;P}O;L z&I(ein6USv;L_E|N%=sV8-G->cSQYC3u;~V<&ix>|Avxk;Q1aSqVz4;aC>OjxCvOM z_Jt7)|94V=muO?$@b6>G{#R@K?%er*{ks1F_i}Zyv~m8=YC|<)x(bL9M(o8mimsjk z!p;W(QC+=$4>*LWP0Ma0g@x+jOmHppZRL?EA39qZHrQO+jLb18VjsB=ZOqJ%}Q)McrQ zO31k54E~JHJSyMQ>#=ewL;?rPx-r}ly=AxNU%f-KK27`KjNEjJQ@t?&>D>T7jXXJ~ z>}$4#a-vh%7PgOfA21in!r2pV*4_uZlKlUEc3Ts~C?&|GXz%~ty4dbNB?15G^8X*m z**{+E_|4zVLcmA-k8yl4DdR4qJCk_F4BofvccTHw;Em0rxrP$7jiijiiCTPSHL&-) z^?2Kt;Mpl;-I_QKLVHsY+sh;8Uh1y$ZTu;!#92*RH+(-f%BkMpnkJtG6kGPl>umqp zk}5qv)iq6>QmO3~6iL7}Cwi<@Ou@bldT+((HpnKenavs}OcyJR603^TpkFKLs=esY zY!Q9$-^{TzlE%LXHT(IayAos10o-<{M)dI1tsEcf61^FS3PK*Wdl?YBa672TiKeWBF%9Z(RWT6W1|sFT&y{Q~dVQsd2I% z7)BS>v~j2{KithuzoP?vw@5hhLVKgqIx+cc=&yXLeN`&?=PPIr7J_D5JM66aFTHiG zf3~tiRkiT`=wWuHv&Q=F{`VwwdpOy)+3&h4%g%Sr+U)hGlYGe65-Vb~=$i4Y5hJZ? zA7=E={*Sqbg@6dS0pIbJXdH?^1WR^xN%cjwryLpY}>Xu%eHOXwsGsbYn^@e z+Gj_M{4+8l$H>T@t+(D=d!Kh}ynG~_dS-zvWW*x3H&4M5--*H~E?fysQ#py+3iit* z=de|c93zPmMJx6$z1y+y1!R6Z% z#*ZyieXXMw*V6|y=S!6HpSP;jEVz;V5_CeD-o0;6{OKNY2$dnx@)84f(zYu zHWdb7t1UdgqUUWTWp!!V>~WxDqU7J`4LYkVd#|#z*`SgHAtW9Z15v9DTcOd5dXdwf zE<1rau^+T^Rmo~> z8%0_Eo#RlMWgG^U6gA_Y`~7iZOP{bME?3;oMd z7k|xje8r4sFpND*^^ZbgI)`O*sN`g+@#qk|!{Ign1Wg?Ih4vA}kdy(4RDAaqwrd73 zRi}ZL^75kdvvok~AsqY%1lI0JZ#}v{JiOun@H|(g{;p1dOVpStox@?I9KX=^0`H-U@IW6A|-IOhWW}x%c0LFGY1cU}6ZJutiE*h5fBNGkd|WY@1{Qr)(F3sR(m} z?G+?xmT0La7Z$@OI_-O3sslrvPE(#62Huypy09MSKfsD2&wAt-JjTk(w3;i5vo4|~`>Aj}U2&c5lvcz zJOobZB$9+vT~mR9$y73QiyF1g&0po=ib{8wbG&Wbjc#MLOB%bX;nC_m;IlMpM43Kn zFop5f^j292&)}xS{t549(4diq{=yv==9>;LbgF>=ei%?sbyRi#tFz|uNFVL*{&BZy zi1}}^`?wvBWY%BXcZzCk9dB^?1NEhUm6Ug?0;bEGP{~9sFJTHZZTe66HWY`-1Uhxg ztb^HdhlvKOscd_*5TFvXTh7mpJ4F5_>})3QsKl;5jkVoR73bF2IaPExUT)_WBpfG6 ztTUq)6XQ3pmO>o^#FvjRSd3zqNr*0NU3Xfl+(+&{`r;P2UW69*3h7L+oyZivhF~A; zl`j49jX5}<>l(Xcm;3^WS}(P~obHLS)G;QKx27sq9o^qy<~|f7u!sSz<=jOEt#b7H zrlA9Ozc-pW^oEnOFcH4&(2zZ$Nib-=NEa%7C4xvw86S8o%~oEY0c1D{B}X-(JBQ-A zzrgy15IjV5z4UnON;?b8r+YSffQy5-yCcMv0_p)XerT*>MUx`AF&f&fY(@9(Scuxu z{36e6tsF>uS8Zian%&c6yDa>{lno(YlRE?vdHEXkW5;zgsURi`qA*WG_hbHD)+1Fkn2 zmv@aSR24s&<@F1CLkvt1jOWA8liW(|T<>m@tTn_8w{@HB*Xp!jn0g-e?RbqKJg-*# zZfoHkB?%Zu8 zEw4z&-Ako+-=fSFZ$v+xgSBb8?cV-ydvj3@h2B7%I&1>zV1tH8wH3D*m22{%^}y`Azqp?NvH+xh5cAvdcVg>!qBG1 ze~VY};G$3qpow5_kAa3y0zU|N*wmac04J(KHMI991=OHt!Mg20Q9M$e-fMASt&9xFv&9 zIj-17h}rEs7oR*|)XibOFfyw+zU()KT^A%c$12#Fvl%IZqr|!NcXzx2)<-e%=>QvZ z1)RZsQA^@%%+=yt@^h@SpqDUlW2litFJ0HAuY+_G8jo`L?RBjR456>+Q0jNpqx+f2 zMJ)lVaK*8L2VKhqc3nD;0?zVGA`QBUqvzrrmAzH0@E9#`CkbBZ0Zs=E5XWn}wjxw* zA%aW!zB30n+hbU1{anX4^(giYLgx-m;co^*hef1{=&?$X0lvrG@~0ii8nV06ZI9c< zhXrmogW#%gji)kDqGJ8DOri-A0l2+r{d0m2E68_Yzv`;K@eXaTYErNNtWC?*$cb8T zvb|nrNsG{O*}19j%dDv;5KI+KBEu;p^LZjn%s7mUl+@Zm`>T_UgvtUG8-A$6&bgKxO9!_*sbq9*GEoduF09br;PIJ5D!Gza|%DQoe8 z!z4%_$KX|rJN=&FW5`POHX`lUc3FT6Y-4b7KkQ%2H7*8ga|v^7Y7#-eXG)k3SQsbg z+5=b{0}k|MjB;&S`?4a%3S(Wd5$Sz*1MZx1t0g;CD9Y0l{&}VNYsxb##&a*Z}d;!f5U?>)plKNEsXr)_g|U z!tv>WT$(D=_7t~3vzoa`l$_GFJ{ltpG~dnGQeUyl8Q_K(dzp^QB-0BT8q4OT%-&eF zLmKOzne`4m&;FiYMEB(rpzObIJd=SOO#xv2sK=q40xN%bPq^ZBT6TR}`&VO4rlw&Q z>Fn6<5!ZZZpnag~IDV9)AkLxrys|X`J&h*wk}IGbNor++G><0Pr>REz!!#?}%&~NfP?qMxeOfsEQ@2XwqGTP&%t*n4QB8A%ygPpZ1gW z9g#|l8OC8a!W;0Fw6c7oe5GO5os8}Kd6zQVSLsC!HE+6ZqUx~&DVo}Wejm_KuBX>$ z@Z(cLBUbBsB8c8ezT_p~fW`uiU5(5fRZ=9dyE@O5k(ILT zJ;j2m*%(V?|BHK36zAwv%&9jvi-1W76Kx?6LlI7p{_6e?K`m3k9MHs#;^M829WXg@ z@yTAJ`h3{x9nWzgrsRecH&rzBz974gFyczch%_07a`)-D4BhGVP;wKRUw2{|k9Ss=owHdYJ1a%v%$g?ufF$;>HboXs3bLmmEtGTX}EwYbPy={UsF> zVa^%BALU1*O}!`!n}Z1OSmqZ56)Hu6vNIHc*aEP1PA%pU1^7ab+T&M0AoL-G=2kk7 z@DLGC&Or|n)O|Sbcc7LlE~JQXf^@AntSe=|yaVv|erfH(W|xxix?AUltq#7F*nJB*c|l3QfxT=+C$=>AdL(r>>t z4__NbJpqzV%X_7PJ*`x!YK@u=Zrv{SsOeZk8cgnZPw1}HHF0Dihg%0580jtpv9pdH z@m`P}NCtO(pf1BE&WVN9lHo3kjn>~9J?Ls6SGaL5!Vf&=bAhJ-qe0l!H$}c@c}HY- zD|#^b6lr=$Gs?m80A_=|tV^uJZa~x}!=N#d4Rk2q0aQRb-j74*Zr9oankc%=aDe;* zJHLI{qC+kprVYYzg7wv`aV+temOSXOE1teEDA#sm*aE%-hqBjxCBBmSl+3Kh0^xC3?+Z>{eb1n^ z6`-&7Kryze;WvGqw84eZ>VZYd=v~8xF^+YcCg32czf#`J6a5GLS3s8>rTTHnGI8&{DDWFs))FXNi ztIIbXW56~UO$UQEksRx}B+Ispv4hE~aHvG~RgGFNvHWyR4x@AvoSeD1+@s$IXzMLj zzb)FpMqLb!8YH!!6wC*38aT`XmS}VKO71UaS}qSuthTQ%IXb*)&#@m!K+MJJsu`Tl zdRa5PpP#Tpb16R1Vic3X0gBTfBW1_c3}&Gb8taE-J%^umCs@W^ZI;P9^dy;05?ORv z()P7{jzF*%wnmdAtamkSAV3LC8%9+8Xvywx1ghNR8EUUNosG7N16x#h6{}+{nJg_} zBaKJ;P}?t}dXQ5{QaG15VM@$%$XU`KU-}-Wu1MzKM}VZn0J9wxqQ%HK`yK~beAjU| z&Yg+YEmUK2wH#_%nZ$wltjVcfH;ozERIwD7tEuTIxw5jX%tQ=y81E6ZougHsA@TY3 z&Rfn3{=OL@?#B)VYCc|jIG8t z)tf|47b;|1DMr#Lsq6K5GQD|LJOj>AQ6p=~(`9L3(~}^xpoa-0UQucY>m|1EJ3hSJ zUvQr^xKSp?Fr^=mG3&P$wzjlm{S2Z;MsTB9AIA#CG9%U&(Io@qZM5<^EY3oA= zrOg6MC+eD+>n5E@lYbkQn*Ap0NuLIXkhQb>7zg9i2$VYTX2@Jz$M}(gc?Fz!z1CYb z47UIZ_9RV@OW9NWd zZ9spV!$`oqNR=z7iafu2oJhK&M^?K1f=NPET=6GNE|FjHNDEB`lyYe!``WRb#xAJ!D72qi55Q1w(LiZ}Kmw7P+5SHsM&S)MAUsJg!o=%+p zLI++LrMh`NXc6$1Itk9vPEDiZt7-+>CqBZBDSX)`vzDY{K7`SlgFgJ6~c$X z3v-KR;6lJVpK`Y8e886cJjb`#hW!Pk$^mJ_%C>-CN)jX>r_@&ajh27P4)xB9On*ia z`@LrTVD&B2RF^Bdt|GYV%-k2srn*lJ)%KM$cFs?W@6zYu)eWmJozCMUVJM?o=Yl~c98qt-8+Hf0pA_tS12-`hT zs4RhSAN1yB0jwE^os;(y(~ro{o}wB!mt&pUUAe;cZ$@8+nf!qB*5GlM#TqxGm{$KGXw5(y-le|=u zh*Pg@<*tORFM~`o-Aa*!Y@hQWV_k>efPQb(IG>(t{t#&b6*oWh{CwiH(C)@GAC8BG zELNSgIbi4f@ez-ifY*W9gstm36h;0-nJVs+GuZ4-m(F1~dGR;XAg8M}#t9q5nPRz& za%-0imS$0d-~AJ&x+)x;nI8Jnf88{momihTZBc7cK^~lY2Wd4YOx&q)Z?QTQvm7Yc zlM(*BB*x^NTeV9QxN?KYjFOjjv7y34Z_gN)?rMCuB5UW^!()y%wgUWGTFsEE4bn=i zeTJp8aw&;VST%o6sx#6!-n4t`Vupctm40#^m1z=on^nTOEfN0bJ^?nyeQ6%T8TpqsrR=mLt!_-^}S@(luDX&Ytv) zzfQ!F&>Mg#dXa`qS1sko=p5}f)>Bz<8BrTX7fJ8pHK=jS4Y4BO=|p$vY~!0Dv&?a4 z(aX(|CaFcw^X6m!fN9J3F}v2wB6=R+*1hGfQdXo3{^(aOJ|INcuzNgx8>p+}!-(;Z z%@Tz#g2Kq3Q)JQaG`e=7K;mkH7t!DQeXVp6Qlr84u;E`%K~k^PDh>AGlYXTb^f84r z4UFXHGN*_w;MYu9+W5p^F>27QF91g8$ZGRlqbT9E*X*`w$DlS3)L0D~fOI7S$wz4@Vo2 z{7RsZc}9gcu3x&5a79vAmp4oufg)uZ-^oaps1aWZUnPqeB&;c2(yO$x@evCY+aArY zTCr%yx1&yxe1D~+>YEH(cp{(|wXhovUzEb=+@z^Q(E+6tCtpNX7S2~!u7*UHw(RJ< zQkZCvX!+@XQ8BJRTh??ksgH!Vsh?PbggI%_r?AJX3G+&Qs;}VxuE%zyp+YNtKI0iL zax)qO$c#@f>;e&A4d1XMw__(vb&o%jG%-4oDxS&$H~EcTSu23bIrAZuuEMOqm?dK3 zF5Yq^fKB0Wq^k3bBfbfnbyYSRBT&vUsk6K6`O-$HgyJ2cppjgQze4y+c|5rG(-1us z7}skorUNKD?5g#%Dpd?Mw1GR?knZCeF}fy8T*(*}L_#i^Dg4kkU;vgd z@nP4y#(Z=HnOhr}%H0wHfR4lx-vZD;uX{1FY;iuEK{%dE^2(vTBWY-U)71EPUbCu*gu zgVG6rYlVizNK?ahg^PJDVo-esOQ8bj{kwfQk-OTCf`QGsQ~=cip8BTU0#d4s@e23c z)q5<1^>Kkp_9bj|uXVxp`s?)Q(ARc*f?*pJAf5*BkGg~b#@CEMF!}%-K$8{X23wB% za@&)g&v$rVG7`jNI)YLz^7}Hn=CVhCRb+XwXr1klmovwDjF0Q=MdSqQDsz}6lTP*e z6U`*kh7J;5;TFKn69fG0X@M2pHk*QyX79btfk+@M}VUv}A0aKf8V!%ZU z?66NW%F7agRaO#MiLD36QMe4X1TFz#gGpUT9a-|8^eJeOSd&KViN{Y*iPj@ty{v5a zE^%#V_T$_=x^}5Zj4Z0vfb}%W8a%eIYD9(}M6e^Qo3XV|-S99JBZ=xy(4GF;TAf=N z4T@G9ory@4@`}QR9-T)-FDAWf0-21BP;)mm1aGbgzfd<_5b?u}X)W+82|t!`lY!9I zPGHNqnj#LRMKOnJlp^qJ?vL)YL5-{^=BTRcm~NNtU1rRXZ{GRLULD4=L0W&MLg@rU zSrr3EpkjNHm~c%D;j^RMAEb7*cRpqDu(bSe2|;D{ye=JCpt&?0{~G_mp<~CIw_?R6 z@1n(!>lTBTdsg!f9MdYML$-w@-R^Mne&0WeLzlAge!YEqy^N4na_IB&k9s{Y8N99h z5rwa81hdQZ;!L8>CwJzcZz7TbnAtsJzG9*$bJJ~} zV>*YgUokRxt~^cxF!sQ~g{U;Gg(HB z1x+;ewmls{saXzR8izC>yA!#6RfUa{x#<6vG8lT5P<+ z9642Vz$4A3eh)(9ly;1#uZd`Ou~RYLiRon9Gk=NVX1+p0=8@>O{8n=b6TBtbXOI)1 z*HkBz>e*kNPCW5t78t#5FE-|dWk-1bSUQ6RFpXWFfR$7zE_V*Aw@MyAQ`LHythEm{ zU7sak=#f-|Rp&O>#kkMjGN z4bg90ZSxv5!w0{CP*blPxhB*$l(R}>_*5W2T75oP)+iuOgNcnLDD+@ zf)OWW20Cqvc5z|$>IdF{8?~|i;1}MRu;8gen;xmSat4-qhQ+aa{^EsVV=znM^W}zV z5GDVhyttx8u9KNW$f_( z=M2`i>m!I?)w*@ZxwQvlk%vvMI|06EqlZZNQ3356n<4nQO9v8b!3Xv2<0BTc-&>|8 zuM`qvpvzB+umi~>S#l~G|%3a;ci)&rzPGltcWkOmUT%o z9=0XkG&JG|#-HKdm3K+hfieCxA6mvTQL=wpUfI%!)0|{F=%+qYN2FHmF822Pzj=-jePEv^hFdE-#?F05*NT*DwCTO~ zxpS>=!PP~r5kgcO%W^8zpxZtEFs1$op7jJLZI!eKy|>yB5eX%kRkJ=E9|wt4=kb=m zCc{)-Y~?PU&77*0GTMc-x!p*AM22aJoZ2Ip>He3%HsZ8JR4B{Ih6?rcq34+hfH1%>s! z5Opx3KmlY1$`X{0%}}SQ=EsAIb@HHnfRm&8na#7)a~5}Ul<23Z8Pj8ZzS`8BwOA#q zl3@!Lpgc?UK-p2%6fJhKBnBX!FB=H$w9%6hXK!RL^Hi7YnEni}>!3=h79_ zyKUK>5KZ3d2J*l;XFr&?)Y2>tgs8b#&6CPnbBAjSzw{`43O~y0lQEq+LMiJIEnB<; z%FS9PIPzez7WP8zWMGPD#F8A^jld`27^n}0L>^A37yTv-MGI7BeBHmtEBSu{qQDWy zE0nJouyD#++X?`Ij^`L`;@{DGG#t5p>#Qi^DxeKiScr+TJ8k9URXi&Bp~gZTT-xpR z@bFda>Ia=Xg|-|@OS6};3zRuaI26%1%U~BwfIR;`v{FFdwe-ytrD9^CJwi8liZ~-8 zaz$02C~!VRb%X&DO?9iCsIfSCwh<~I8CVz)zE^;Pd1!E9LS-4-a0IY{13*OL zyx#c^p9HWjg-XkkK1=MJ}0C}Y==p9ed8 z=%Nn5GAm{;z`eqXitH0Tg5(I{ohxUP$+EI*IK^7^#WOEb?7h^?DV~MNM9=y4S3X}? zA#qzmO*XfNX7LTZ=iKv0rv5OPrF$D$cRr=yfz5Fup;-0P!~oc}F;j#SjW|ue5^GE+Vo$dzjsqgA5#_VE4eE%;&9#c$di$&h?j`Gr4h>a z7-%+6EDPI>z_Hs+t;A|ZPqf!^zzUbmMYWmL@tf%FwJ97tC{8^WrM#5N6IP|oAQO;; z5A75P0yf{#N`p14s`|7Pt?n?(Sf)LKE`tREo)qTIT7C}4$g z>I-g#eAWttPDq+v`uo@+&0)N2NRpOwlzBHh zJ_6m07*73X=g*hPkpvu=RStTh_|fSH**u*J+gtT%yR66e0;fb#NR(X^=6fQ}RHXNQ z-F~Eq45*mWPqoEribg-ciZWz!A#%xSdU$|b5=T$R{<1qTc>zniSa<)+?%0Wehedqv zko!;<;M|f7y!lNQKo967uKl~C6}ttwT@w=#E#7R+ek)yoUMdFq)lLU45K+S~b2G|c zX*!GPTk1NdUS3ASQU_G zy(7}3PR$_Tx2xw8QVF5ztci5et~+$lel;p7DqheE1RRc%9RqmFwItgTePXriFe0_Kxv;w8t+lm?vQzmZcJEd}iMvZg+`-Qte zDH&g|){0IBhQA1D*ZyOUqS_j8mmxFUK*g|7lFkQ!+K}E;RwJF#oP~^`RX!ZiD>PjL zo=ZZN1W%lRQ?et;Eh?-g@C?+4zNAx+IX?ZaH4g!JP1UHrQu(sw17xUz(_Spr&Gm%V zh1tfex(cOmR$H7X#auoa>qy{9Z3m5;5yA43DnyTdDa=6kj=!Kj3r(n5{+HeTtr)%d z<-(46cZUW%yp6KOtF=~ok5+y@G`Kn}RvRbnmcUetbXxZ#JjT+r9bxe2jRHwqB7p zE29L+uhd|gW#TZ{le3o!^FSPCe>om^HUsPtJ>fH`Ur{`yWEe!-D=aAsfx?g|$pv2>JN;gnE)$tFXbT)eQrC|zh zc0A%luQ__0EpmGj%>@tE_>%6%O3J>k^fU2wm=eZFbo1K4dSF@_zr8>+YPIw06x2X0 zKSJ3YVx@pHGeiWFJqHMV0^T(5AS#y4ozQ^ zitT9U7JffVJc&*%(X0GOhI;O|@munKr#}fyHaboIeZ&OXR(5Xs_^IxN>>t99!yD%P z@io}6557}74j6-9m*YHc-zRMub0Z8jI5PLS{vaFhqh3mJ>TE=bk* zv?5LoObO9Q*+3J8?nJh_X_A?ieMXg^v65nY<@0*v5DUT!O8a6(-Zs+Xt!X3(#>KXn zs=uQ|t;$P26Hp3@KI-xXCTasPsPaZKRquh7C)?z-`?Z?Z#8?|uw%gmgv6LYqi49sDX_czMGno#~9} zoPynm=W{5%7h`Tf;>M7>tmT|N@V1gDYnWcgJCPASdWBB16rEzSn4%84D$lNxRhIPH z)_Rd+r_`H(E4A{-k&drp-2Ta}hHA5)@Nyz|C$$}gTx3!|v|3EBs&=z?Eb$I~Eia~} zo-;zQgS3875&!Ho_PM}Y#8t=KlUqvGIM_F3eFm&l* zGvQQ{_shJZUy1Qh)JWsSe=pr~^H2+chOubxtf;nw>gQKwvo^hHsIsh9Fu^9Wz=Q|F z^2OEf?H$l9vEi3h^Fy;(n-u$ytThr)oQG|e-wWSeta6SW_1~;E$&2#^Qs{+NAXttV za>s^CiZbS&&iJj)#;14--jZESK&AeydI+NqHEbN;bT5z^krKlHfpc8w83oMIe_&M! zG4DietgXjYSSkf?k*8a~KXzv<_nuj5rxpa*$UjCog z)#&FS`mfj4*~-aW*U8w%(bhp%-_FkJKSbjH4hGbdtmsiJFfdcq%rLRd9qa-BN8spc24?sA5$0_F1dkuu=fC66 z|N9TD|DUgZ!b+4vuS`E3Oy@OaWfmC4yRSG7DF_mHV4zFP+{7k~Inp2;_8)F@mZc=P zKzLml!6@C zFw8z@ctIf|1*9Tw;xeU!ciF4C+lx-*{?@S_bhmm9jYPO+@=e~_{8>gDnbG87z3!*; z^87awN~TUxA|+N9n07e9A?>N-65_>2caI<$n($JK(T)-@s4be69a6Sh^$dkC=ao z6$_XE0HptCu>9wb{L6H~O5fesLHD0C|Lj5h!(U=S_~9?vkKhp+hsVQ6f@7G~Sy}o5 zX|v>RKE;CkyhkkZVRvF<;#dbh+qmLh>JB{NPu4(4qX)NLKEp)9bu>wS{%MOsEw+l~ z4X*?h22)AdR7&bCX=-C`lPHueyHH#|>@mKa+H5q1)Y~*+5=A13Q3pM&Da*R6q=;9e z`iXJ&DH@8&zr?1lS|s&K!tpAY8IjR_mMy27 zh-xnLg3+k^%s;LZNm+@BGkEqI5=E#g>gW9SuRa7pt}Mm)vkTlk;7!NJ)OgV|#U0*E z`caT#uu$FsNG8BI%svhiBSoaE1XEiuJSdv+=KursiZ>fGvX8CEGRH9>X&v z#BGPF2P|IJCi1PMHh^+CrV&GA%A6;W`ip3iB4UkA+3~|ZcUKm=Lseljc)S)!Vmuc1 zziKWqKynQgXa~HI^N&F*^E{aBwr*uLk=5r5M9+qnt1!NoLu>qQkNw|4^}dF7yx7%5 z@n4A_x|ol$`_#}S(E!)i-sWnNVLo9|g*$n6Luxu`J9~zk9#gB9U$t-iFWt3fH?ep` zOYE&(Mt@Oq@Xy_r~RHGHOk*LA(@Gy9H3~br8_>5*ElQB)wx%q0;$j0Ru@q;iA(`s|(U_?9L(Z(Nmr)A7zKWSyHWm%R5E& zUsx{_Xb^k_=9NyqeV1GUYG@&hBVX>57KtqSfc5%7cX5{A6VwDLQC400?uRIYu&K*4 z?23|`Z$7n$1_boJc;oUko!U_^z9_p35zc`s@E~B_gAYt-R}De@$GucbEp~I*I0DVy zG>HCS{CT&7wv%2J>;1}m!IdW=Q-9}9j_)|QTDz{+?`-^n#mv_fJ6F$!KU_EMGSJ@}s1O3Dt79dllxx2^E@$`vw0envP)KKf{=uIUv90pG1; z{0^~>Zi2+0DXkesoQGr2L%w}VMm^6wKW&h1WVPMm+st6+GN_v}y6LwEDM8G{nb%z* z`iA?;3%~EO*K`cY_jCfS7ouMCo95ERF1H$yYL@#wb#o?;;kWzz|(*3a8MGwUZzK6-^gq<_+BTKaD4K&jtioWOs z?l+m17QwC|uZ-+vuyYz%s9G5)(!i^0<>7F5{!sk$aW<@2!sZh2B#mnokI)rQJj12S zHP@0IEfSfF(sVM5D!_h%jR$BCw=W{MlD{*WK*_Cp`D6I0QQJD$cKH^oXI9_cAGt06 zsY(T3vX9!qJr9@G&`WwhJk@*2!ll;P*y2NH69RsVU*4;?e1UuZ_N1PF)K=ox$*cLN zLEF=gZiXF);r5wFx5wX*1Y=g>mgK4)W7)(9;wh{6{py1)e_#bm!K$`Tai4T4{)|H# zk}`ljV|5;#Gu0Pip65vJ*b5iV{6gd?Ozbe4<}oTbpsE6nTe_C=S(Qvc^QU#=23(FU zhO=au-|#Rdw}2eS8JWZa7>`*TyK9DVnzvNerJ_|b$L~#$x;fzrG~{8J>md5vd8hVW ziAw%YRnz=%#CrGk(fv;O4fj|7v{%bnS~ssk%5{h#{JRr$u(YI|o!+8Eyi}oDZ2zdw z|DE0cLA5NOwDeT|Ot=O=sFwfg68LXcNB<4|?mWW}7l4ldI@4LQ1JVgxJR6l>%yJs9 z2QV)b2s5M8yYqw)@FR)a`gAdvo9@yD6NP?)D{xdoT(IKd)+&1nyqfHa9HT zJ_%okAIypE&pH_Y|6~{c?I!<~TId_;n>y(K;a_DFP}VqX z)-0#^^c=wQJIi#GNeOZN8y3i6`Es7INe8Q8LW~$NED~zaza%1Vt*M$s2K<^a&-zBG zI*w&u(bI1Y03KjM`r9kzQuVK>7O?OaRIpFysBqlMT#M+5`BJ_gZkx-d7I{Ek0)3L_ z@Xg~24%jLYjcl^ypbS^Uca>Eu1kggR_1gkYnU?3HRtnjoit?7L<}CAyFLI_*`cIt{ zQ4qbI`?vlwpe^dVHEJwgd5ISV;bKV&dA*eP(#N`Q6(Cw_sEHFHpUR5r83N8@>qNGgfuDO?M-=BONTG9OHi z7Y;Ub5VHxqgFEnS!~d~(^L%M}Ztxo#GmsaB{lKN#)l3253BAD(pv(4UC`6rOw4s`n zT(>a8MeBV*?d2nVlH$s_HO;3I9O!mF-C>~&Epdgsa2D5HODrHB9dcSuR3*y_>&|vw zc_Cd)SHj3#4BrSvxst0=`G&&Nv2vgjyBgQxAV*P$i8lQzxHDw$=*DxH=zmv#wB-4q z>G<$S=|^)UWJ6w<};Iw2HKsuHghPs|8^_lEct81JgdL&Hqh)h1+-R zFu5yXfcoK|7_=fFV6%BFf#l^QqVJGq+sPa^e6xBAn??*Ju$lJ9uPT4+TJtZp*CAfr z6M_cBPhnvRY8aaBBO#(inMsSmndPU~!lo2>Aa!IYAX-|*ziQT4KSOX#F7`Jy#lR?i z-U*MgDBuDt0uTdvDI}F%@cb*{yJ_P3f-t>Js1UbGxnt?HPqv_+)6UcW58JnZoysZ) z)ghf!7)O;la2;H^VX8E^?9VIZnYYMwEwZglE|%`&LejE=c=L5Iy`?r41(@hGA~J%1 z;}Qtp*6rTXGk19JPLopKg84(^3e?~I0d&RuV=!FA6E4QKSO{ZguERHW@Lf^GCr|7Q zc&BLR3JAeao$+4eFXpaB_ZHuURs(zIN6})b(#fatTng4F3)T$X!9&udziAMPDP{e1 zkKLo9p@cM>zoDqgjTGjSExkvX((HIr@J_|SlNisaYWgA7oZ@5!WCZm_N2Fzb#O*t3 zFj$t%a>-bbioCfQNo7|>IpWXpPvi8DO@(>^F24*_l^EbMPT=fuZ7j(X^xOopNhbJ6 zOA-a{aH`wGvqHsd?!95+BOfpM^Hw0sTF9t4PL?W=nU_wqk9T1Ic;_=4Ym-7+%U>d2 zOQWQyH^r1j!zArE3OO7oHmcr=AZ;C9MLgrXwRq#?V4rZP(_)T~#clN)ZX&a7;z#a) z?kS1~aP~hB%@h^bPU2CFdB_{8`X!UKg*B(jYjaDf&duTJO)uZ(N8Pv!w87cizYdN5 z?Sb}G5mIK<&b-Yy?5sq0!%)V-;?9=5Qw(HQ>vlMz?RQA49A41?e3w^SJQ6Y2#L~J0 ztr{sy=T+!x%(ExzQ5fGroIiux5^*CooNm}FVSxu^)|wu*Ik%en>@q`e6-e@CwMy<~ z)M|0YbSfI9B(K-&Z`vP*6d@~Fk(YpXolBi=ydTysvFy~i0@$$p`w7MZ<}OBIuDuvE z{d^cb#FE}wWOK@^_(uE-50C6|I_zLFljN1LXuA3UlV4Ya&A<2vo4_$zB0)iVFeSxJ zowx8&CvZ+AnTE6IZ1=R{K<01=s{#bE)$6#tvRA*0nPkJ;thl}0*kScb@rfv)=)7`7^J>?^zeRkJ8j|U@a@Bc7gU49(HX!J#6AaWZkmh&e>?( z>S3ipl|~1f{56n&mml{z0I%K4??SKWh_z^S-P<(Hh`>I^Wj!~k+Hw2?1$TGc!CbN$ zceItMJOXcfg4X>HqfgMV^{%b^pTzg<;qTPGj~q}~uLOUJ^bA#nnZ=TWB=H`t{fo$5 zQ4R0unhWfw-a*LqzZ#F+DIy=xi@#(%m-eAi&ut1PPLi-1x;xd1Okq7YjV~8(60xO+ zY>vL`BArc#zc~$~92yy3R=-&PbH1|vV~;@kQ;EO)sl@+${^OrwUd*RuI_PRT4f8&1*Tk^=7bYgc%F%@NsR2n&Ai=T&Na%d zuxKBIJSqzi^?X8QQ>%gEMv^(kg!-K0SeF(`;|1@zQ!ds#DxNrxZqz4XEbnb&&zU9; zi?WqRN8Y$7LL|M(sf8xWgCF{C%^J%W+}6d$9C|ci=`w1Pp&pXPmh04B@Q|7gj4v7i z<)?q$C>)i92PNt1q})eM&4Cw!7_yw}z*kmrf47b1y44tUE8L+Cx{a-L2gR?5-Aq29 zsh@TwAvgj<#~pLRVbsvK@BgJ?A;u51smlGi)G2c}$PALlFRO%&W(GgJ8yp`BvGKO< z5kRO?pLjmT1BM*T)UpoiWT&2i^9`W0V#vTF< zPH9QO5^?NWzg#*Vs?}0X$*d!}qqwR?MOhXX4Dyno( zWBQ}kSLaY1q*XpNF$j+&2r07MN82})dHD_sRw9!{bd_YG1O0;U^MTj`DgI6)0jd}t zqtGfqRwW}f)Td_oayn!JHXR0YTC|xsaAZuYOCNx-MoUcEF)NY7-i%=Y}gIb%T3=XABRsimHX! zLbFOJ zWlWuAq8)+23KmsJ8Oy}Nmqm910ou!N95{|SfMno`6Cbb7qk6BZQnUZlo5{ef+mqS) z88H~E0gmGs38qI^p-L)vp-ma9D56&|kKKdJ^DhH8IF2z8bla4gLH#Dy-)&9ji{{uw zQv{OGjJMRE*Yx>a*H(?dPX4wOJA9>za~MFcKcH#VF8X3(gJ?U*EHd$uo5%Mf{LE!b z3~pOXBC*wUHlLb-4I5s={~|EZt9HADU9%5t?0$VX`{u~LKWA8{(irKvsM+FooPnzJ zI`tR$>;3BM?F|oi_a~(BD1M~RVrH=HoTIRj*>=-v4ID5IF^e*Yf!9mh;TC^=>3BeS za0ES;8Ps&K2sS_0G%x-#@)M2X`Hl8+co)b{1QYQwp|JMC8D|97nO1TFvJZg*4n+ub zEtV(!Re9RShd&^C#~T^$)=tvoSUxXLMQIbNk!EU;wzjtYgV3#jHH)gZRgJB$uLD-E z+tb4b!8f%i%#fjvoO11uL%Fa!P*UZGEQ5OH$8H60@gvNSIVqnFxU_-1kQGHF09Avt zN($$tjs!IsQ%lOWNZp7&ddFUreA+X;<;8Sk<^}VF9nyp2Ds+hgaHm^aH>`=YDV5gJ z%*T(5U`aMA)k3K6e2rSRXgy4F0Q0bhVG|@tsJ%(JD3U^lQ47@3)A|G%99rr+H$rK5IwH z^=SUen*@Np>$d56$!5b%Yz@{ZOLE*z+jN=qYquf|ds=WIYj*(t`iQI=`HE7e;mlbs zn^8gW839R8($MGCR@X2BeL11swlO@=k$t|)^8jGUX%E7OI2SJ z`K*p+zQN@XoGH1}9U{JSaK*Bra{)t*Qfdy6ftIi+DX-^BXM?{c5U4{RBJS)Ct9kJH zL$zbpp1}$>z9h?bcq8Mal0e`Md0+v?7)P4#E@$*CGi006{d7dZow*X3hd#d??0D0U ztzj0Y95mD%2in9t@&q-d(2#@KW^w6!1S_?$fPhc@WYJYtn7fDbEyttuO>!+?Zt0$4 zRNKt@?TwyDi116=xZFHVYS9EL@hzQnWQl4`)sfPTFZ%kZxZlC%d+NJN1eZ>0H{_NJ>{$ z{(kB*@OcAnR^#xsG*@bTOGuZDTMJqhY%k)#(Ro)B2SLMHa{=^i$@gN5k}IB9D|9RS z%(^nBOgQMz3Cg6_XFiX2J&sQu0uy8;ZVr?(1BuETXJff*H-K6(#%d`c(nAhLT2`Qf zv*xDRgrPiNPn@jtQ&3UTx9J_HFfw93!zs((Sod4oh_KuauiJZ^M;Oa5j~1-`l@hE5 zuIQ~(R4d|y%3+Hi1dVYa3!0E(@_;)Df>`N&2`sc1ZhOU@o)w-`GPu{#oQVFU3J#$+-*T|6e2 zJ2!0Un@a%i6%^KPwM68a8%EtVln(dIL|s_1jITTwdaKuO#rKPA)!c!4H&Jn<;9Rw z1BCZD)ln$Q{ygKMf5y>CmC(|(8YVM4Vn4h~9tbNjsQSPLppMZZZ3)EjS%7TZ#f^Hr zJV7oH8SV$rE~>IkcFXJj z5EqCIXTm)K%171?{1%Q+$Rq+X^I4B8pxl>i76kBXOtl=F>*`}%#UucdgmDr0E^~@+Weu|UmpVI_pCASEhfo=E;?lGh$ z#6?dgmu+!)#3vtdJ-+_^3+(9UC4-cU2JovQf9pup79uZI5+ zqTJF5>{J8;@JVz?Kba(=OM{#-t{O?G6jOyyze|d?%#KA}z+jD$CW18WhwW1~fU{m|XbF!?`Fpz>1CQs1wd?zo4~$Up+3zk|I*cRw}=M zuY;P|ox&_=>Q5pJXk08v+N!#{?8iiU|9Co6z7T6tIm3Dn#GFdbI06n0p9~BbP!Rw1 zj;bs}@j9(Q6+$kd!0SnJnKh#3^}&E7~X zi|ybK_6+c_D#)*|sJ0p#0NE(%%zSn_HphqmH&;@2z^b#?>118$jlJ-Ty3r(lA+g$4 z>F-63H~~;ROM&Q)j4<-ePun&5qB$!2BJU1w{2brj&dx4h_b*(b(|eT3#2d5fbR!rs zXcz!f_f8@9j#NKYqD>TrH8~T^RrU$HSOB;NJuIa+6!*63w z%ymrY`E~z3d_sAO^-ITv=dN|2hds+6#yS6h=jFfH0OUZjw%IZhz#Er~A~k%oE`{rm zZHoWwIY6hkpyF}}e*jwM4mO9UfW&$P#Fbw8S?cXyHB&3!a#$Myi2T0~n(? zOucB%@l$J?lUO^0KOi6%yrmPL9QFXA`&B$nS17jJFJ-f=vXv|<_b@q`6K@jpef>!j zPN=;VXIzWczjv0iETVxY#1QpsU;$<7-I!^Az_Xr#uD;T%IaCD5^{-)H&Lh+@ zxO`XQXn&!*-eVii3*)aElt9rD(lnxi8=GmIQAS;D&Nr82WQ9&l#n8)u4?y#6tqW$% z=`Fjb*isyw+V~W*4ey{a@R472B?)>Acs}FB>91uu6husC8P8tU@mTHY49fL^25)Z! zKny)@f0O2DMmZ1{>PS)U-H;%^gxd0v*}Cw$Hnj1$@~Sme`HLPb%M9KRXU6LUGzJB zUr;XvN@=6EUeRm|yNXrM)YT()PC+KGxa%` z2e5f_l@60WI>l(6$9zd?|3nzMy=`jxd^4QzM2AokYvnsnS)W6=`O5>aWsgP#c8>yw z?mWte?twI%R0qmY7?~E zW&8oQ7rg(p)b0h21l161q{FkyO!Keci|@Zg64QU~7;tF@F?P|cclFgt951X?^_=$d zS)r|9TnVFu0XLhwz7=CGD`EmebDPZ5;Un{N-BJ3nXjGnig2lFbx-_i;wj_i$-FTTo z(bn<*Trq`4Sm%`#K5xT98ugvlKAE|k)LVUdc$#8We!Z#4$_{Uf9>il^OF-%RNh}SfA2HtDS;_A9(RU12uGrGOrWLN_N zO4jM80qYG7?8VzW^r*kWF-5C(+F5%UxjTb<9&e;+!o}00y};qUHE5Imw8YYZ%Q-1P z4NXINO-ks+3ZG>3YvlCi&WUY?z<4TPKc52MuTyt;$K&lZce7*O?%5tOtx8HrqXgZ_ zBaft`$3Vc6v@RUoSH?lGh4ORQ&mz)gFVS)#1iDMhWNJr4T12sH!c^I&9KIqN-iNN7 zJb)-6U>p6)gYjD*m^Agppzi~1C;APAQMj+a%{bF3Xx2*NN$d+`!%2KqJ!B-9i*Ptu zo}Y$1K>gP?UX5ddtQ~WS7rme;y@dV7{x(79a=P)+eS^(*#2&?3yCTddCt$(?}5E zi4MlJ`1RrZ>Kvh5FITPTH-3W$&235tT##MYhUEa_KkbTDV4Q! zb9gRebN+>;C8}s?xsKEGXoxL0<7`sH*11=2Zy<#9Dwk;Rm%WO53)*`Ru!MTG0q6CE z{)pdivntJEka0x|wM^qxrQ+_7*_P~U$tiU6wOhpncNv?8&l@LZi&rB@(Ww-(HEqA> zO@=Zq&@X<*GVAMihJ+5~-wcVfxw}LOmaLBK<+gD2|31R8=EXWh0}TKW{BJOf;D0^R z+1k0<{gsti#qrRX5e>GcsVt5h6Ag9S3)A z6f@HFnKcn*%^g+$1SIdF&)d3Tup6Ii!14Io2CVsd5qr_xV3DcT<`oT zHV`X-1z>0~;!@_PiK!;hKE&kD07`1uE9!*mzQ(t#=7#kkB@Km)`o@pZr$o&pqAL~b zYiLc^(4x002|tst2MFXg(#j2}4=rc2*noy4Y#eT$7+1zrcDO(4)*}mt&fFTd-Pk?b3#vvwqJrL#{ zqP$+ITt*wB+sNdR*dciB$@Cb@X%q*}1dr272!xku%$*RvY@b668u<>k6?^ps2ot&4 z@0w(CMK0SfxpTcPc6?vNk>~ZVHg_{es8_C1E;uw9WaYC5E@&`hgY-%YZjzqakLgjJP!Qn&gK1y$ zN*FS^CYA313^U>@;Ktr_d{0_T=XJFlfzwFMTv<_ZOfERy#l23nv--Nm>UzG=o z=M`)K_9Fg@)D=NaSl6xMW@#B6=$)li+}?~z20#}Lv?3nFPlAZ!;7=9;(`X1u*I6sw ziiBal**=Ny=0#UNzv&gBkvD8h3MhUSb*Kk+9k8V+?-{C?Mea-&stw*}BWJ9HP8b*( zNaQC{;Iwr-neI_j0;8+b?O<^uWD#l%K+AUyDUdNspTzGdS0}|$gz^RLP_HU7Z%FY^ zVJ5*RN7BNaxL#J6n)*BhCwL2+h8F=C;OGzFF&p`alLF%%D!M*qC7-o&+=nd(Jhe^% z8X~sQ1P3HMXSB;_XXEoJytF^obezXhfwjwy!Ih)iC_4uUT~$= z0Tu#<8hC~X@d%y5e42j|k${(G8wDP2FGUxJxlREKeFs7tVcpg z)hAj=6Syvj501o~j*=3eJ%)`AcXO<@CPIdrHl3<(#xR7S*>29;$dFA&LrbYZkC7K1 zL#jmXFoSG4Jtte?pPcB7^MGFvnkX3%@JLyO1$YCGa+@@-8&yD(6SzdUnJmpfw;UA# zUT@>PE6{@uFhgLLNGX$Rs71c1NjlnZl=;m@9fjf}o_h(YLl})3IC+%KflxH>x9Tcy zlS3;#T0hyI&f=zsjz<#%Xxs`y09o)_8hZx_{`yFkJrq7o1};%AhLr}sU(Mt%vr9zI z2`WT_(7E*LC4L2tYDfW7UUkC-6ySyXWCLj2bf(td$(a%ocO~ZWoV=^|P}8B=o%y?v zrAMdNTTRPF75980_?BtzkbF2GCF}~CIeXMMZeJtkH}~6|g3Oi1qFP0U6bNUS`&+~ zm`9&X9JT+al5V%yb$sv%hB*J^^$ zub0p(g@v9XwxlNjCmC?0e_#b@>3%kWR&} z$^!pf9V~Uy0K@;Z;_-4+@Y|z0@ikQNE1rZ>+Z&aMYOi#r6((Cx~aqmg>BkvEEg6`B!#iZwsya zUxvYVOS?cGwW-3qQC~(eTt?X`)Ln}kwX*0UP$XG3h{{={!$)3&izCD%z#dKW7Y;)% zhZf(ACWNYqZTHhh$qD3gtmo?^S-J4At#H^BHY14Po;15uu1fRd=z|xCq+SgR0)~uA zN9_oF@Z~xO0Yq!Dr^YgJv&{Q_VRP$rbJMhEEhjq-5S396D3BW~UEXh1yp?5FCG}b&2%s_T zvBM_>l(PpVZBKZ??{6hT`}b;~45d_wqHsN?pbaOoVC<4_<{GwjmW{tJo(wG#kkEg- zhQAUG94pI%_!19aLZqDs0w+G6jVF6Xud>N{kGjhzDOtnN4LCjfvUpev=+-49?j60~ z482B;eS!icuPg^%gkksi0D+RUON?^xhA-8-l{ms~w-$GL|Bm!sOspzZAK$Scrau9e zQZHor5=LNV+Yi;fuh%%93cCbPv8e8NKXCHg3kqxv->(cIDQO_6VE91BOsiQF;@C^{ zdP)yeL?#XuDPzcPdsR(e2*xZn@SsUz=g7Gks(;7gOVkTaN_+(`CVdKK7Oa!v)?}mU z#XiN@SpcR(l{jaH*`nioFQ(adXm1BxLoJIVC{(DeOEc2eqstp`L4K#GwQ6^2|9O$! z+xD&3fsz22TB8m5>Z<()GR3xs+-5SuqCpFBJu^8kOCem+hw6_@2CP*q$4~SnfKu)r zkuhDX(d*|9Z36HlviZJ_v%yeZ`jqX2@FE2_dd`zd!we})?|TAa@-=EwrJlEU11V+h z0ogg1isad1Q*>!c7`P7mu$0uO!kch0s4}f@5Or_nvYO_yTlgCp5%$SzZ?X15PIv2+ zf<5-)${6Ww#(OPOB(s^6rcF238;ldLdFZ&Ztf4Klp!&*X-q1e)7ay@73|a3L59_F7 z)qv%t@KOW`pJ${nQmxP6NA25lqIGoL@L>(w3=K3XJsfNGMfwPoKi_69v$xtS@ENpR zn>Msez63MHmT3O@D8Qz?omUw~@KFk5W8Ql?{b7^nYWoSc$N z)LxjAefK2?iedS_hb&S)z*=JPQBZ)AKTg@W04i$A^j$#axXtTFgJpk%5hD^LN=C+t z;m@&n^Tg5oa%GCPEf&AC0Ev$zR*+G41PAK;ISR00mJO{qZyA-2>BP(SU z!+}=n>{d9;){<5rZ@|O~@)O3)(t0I#%!~-OYiJ-K;bjjM=ceJN4P@-_cYtAZ@mJe^ zxn=0K!<@k7;lFj+kjexUp8vboa{SZU|0a6>7en01>_6eH9*uu~_~`!y(^V93HZI-i zz!a6sVK6L^FwAf8WuZ~f12hw;BG@CRt`iee3V(a%t`n!;o^&k5{O1VWTuoh1Gul)f zr%Nc+2~?cZZ<4t;>jI{uj?I5b`yfV;q~3ac8}uc=wMiw1nYv=m4vp-omk1%D6D%qO zbI&eHcG=Y>f-`3dldWcYXGc zAaxtn&%=M-#8omLPzdKF9Q4H`vL=i0reW&pq)P6AruKY(=d3bH()=6nt}&UtfNvZe zTT!VeoB>%0P0NC_YO-WyqhZw&35Hk2>_x{EV&L6!&%AIIK|lfsA+0I!0zm#yK?uaJ)Zh>upaqJSH)CVNan zJCtP9f6YdqNuLTjEb@2d*i|dl)+nXBs7+2$bNZvxt*4^17qUigbyow-RAU=8Vq=Dm z$1vD;tRU$KAE> z7tAoshm7<5u4RwxqgpQgHd3!=8Npfi?leSEKPcjj59T{tW3!r4oui9VFCZVCw1?@_tqCmI9nfj~O%ZMu z3ow+)WaFBf z6MbP2)wprCqcYhv00z~!;R%ciU25ZADIDBp!{ad78x5<|>ob*6&~DJp&49>Sv9WKn zZ@hm-_scf&_hT$RJawT^tZ-?cqV}Vkcx(hX3#{rP|JI)>8e>xXD+9abiW5S>g(A}1 z-NBJJCr}KWEkGy+PDpF_bAM(Bmt5b3-MiA!Tj2ut=x){q4mf2@kQyW7+q_^19UKGS zpFx>0A^TgdzRg~Ar%P$dAc z+9&n0p(3jMEnmv80D=i+juTy>QRoW6!RD}L)yg2%MrO!R)Z(K$5y%8l#dLL2X1r#k zz~%nb2?)hk5j>*O)6mb*>vc(vNSUMD(N(4avNQAvGYo-Rul_I4)R<~$^ceVtyr_hOhVv8hCw6v^c)hm;=D0IXM1u*)FBP+Dvb*S?dE%4Yfda2U zSAl-MN1(~gSQRe8jh7^}Ys@)yGxLE>_c9G|cQ|F2yI^Dokgn}1MPOqH4EG!;pc{TZ zi#DQ7M2O^-q4_1k9h%B#>%kUVn=OVebH{(96T3EMLWQ+W^OMI(4X_U5ZupQn(G-#x zuQsr!uB z$`Bn?p5NV%{w6zwswl2JA=&LFkW*^GzNre8NlpLJ-BF>%2+722AFnd1Z8e%|7RH1z z1PR6MzOv1LLc7k^ykv;)gjkRabSo!{J+&UW4cwwl(14(rZ%wPG?ZamtSS7L%BNFGF zYm!Zlo*oC}dmt{UExD?(tPY(Rs^fz0GsDAWsIirztj$3!lEFwDJ0eLIPdjg%B{^WZ3cRcGXIL#tmQHe$^`=0fFljkV{|d@8LXoAXfCU6zZFNlT zs^JIxgag09{(?*Jr5SiEX2aHN$6#Tz6JsrhaEG&<$p1Xd( zt=VXSh$XaiFK|}5dhYO&m_L%akVKknhD9b$BPop{K)4mJZc{U6I=*PV4YetaL4d8} zdR=9PtU2R&(p-LPX44#mNy)U1k9DWnof`(EMu(SB`J7$`HzW<*oT`Iw|7;s?zdik% zhyFqlmSA12Ijf0`gRjP!NI9ZVS)<8s0m7=@280a*s%?=r`Z8 zN_uZnZ8bx+sjNv;aJGW?2-XR!3ZDfw0(CD72*fcPd zi{q!qki$D|K1Y=}Zi~2z)YZ6GdcEtgJaamYr?k{oVN0Rpqdvai4_Q?k?fJEl&Tfqy zjPj89X&UZUmp|e_;nQ2rwZabaO4XOh*`5ET%1~9oXWP8aSGzl#$OMc9DR%9bsW}gd9LAv1!rcB* z0;6;Y*pW+zl{_EzsWcQ2x*fcSlGIFS{vYCZ)s`@bNR|pE7Koa7y*R1zu9q;&v9tun z)ZCY$olaKawh!|^J9t>7w!NjNQ4g^dA)CDlNuXCH@e|i7*drz+Mo@d_%=Kz%=L8yj zLz=UOHn?-I@@j`pJj{eo8SP)zkE0I`Unsnvld!J}ylYuC2cFf%9&wE-kD_Z^0B9+QwO(ByAH92ltgj$hd zCBvjLhBbXP9NN;f`^)zjr9K`0CkM2 z5$?7aVw(hQbMih^ZQHrgY``OBb$PYsC^N8qMEPR{(p?od4oN(Ep@s zh^oYc)&6w8cF)f3g)`v&j~1!b$f?>xr zp25YrHl5R0(Y|P2v_&e$eeg-<^I_h=tJ-3j*6I6TymPBXq2>tTpu1tOy)IQZR1)ie zVih;jO5yT8O|fC?U4H$JNglpB?G0x1`va9T-Bdh3(^OTBlP!bDF7=r5ZW_kzm=N^p zIyP9DPk-H#5*`832zoi@fn>arD@DL^Q-_a)?pejtth@?2osfpQCEAjyB5LWFzsGci zRjZe~DI7TKXuGd4QhFSSIu5#yIA^p+WxeopVk_!}WKk-%wPJPJsBEe}brv+mb*bjq zLs$}Rv`28#N&OeWC#`e=a}V!;ssMJ^0~I}56G(T5qGMP9c7}FS48>^fY)nZZ1gKFz zWVbHla>)z}Yg$$WVN2z}4~_t^yWKlsn#yDbTIg)BcGA2I#SFgxJ=KH{+3gLv$)^T) z2gT&6DLv;OUp$zwvO2l!f-Cn=P-HNJ@N!ldo}rUMJy$Wj5GfV!6*}Yfc(uGrxvILa znDsYFX53eu)G@mZ=Iy-(6llxnJVZP9)nr2C^tZi?>5r6ENv|D=&*9?mEFMQNzuzTT zdNIhR+~r<9G1uU`FnCa27TcFo^y(SySihk#EdU*f*_vR-_^xq@p?z!(~Ck z=Rgpai(g4bTLh$)H(@JS@OH%^TI?BpwR!nrr09}e$q^rPtbtz|Bnu_UzXoSv#F*{8 zf4B&4&>XoxxH&aHXUE|wG3`LRPtMU5e_Pn+6j2w=pS~r3M<1(~eTh>hSn&4Bn`>3Q zr4kmmJYA)%I?3mGB2u$FXlq4gX=ZV`btTu#yeKGmlEZZ+GI*K6w-W?m?-maZp`(mS zBZMRHP*vCkn9UsboKDVpn5ijF=jR(#GbCbIVTl}Xwo}nW487pLo*6JT_!|^xU!Us# z3R8|Wk)3%(*4W`216eUbnZ1RJzI#B1g3hppq`b1*-8G zh|X%6og`R?80@)x4)af3UbprGikI3v+e1FD^>(3ZoMz&KDC>G7=5|qXkIPQ%ywPLy z*6{WA%SD?nxbt82E0++pxlGgolC%U0axFLGG&xOzqwW(*q{)YJ zw7bnfz1^lOb>?C3;1aY#Y};bIvjgnU0WX;&s@@_6s*t9Edcy^gxX z_*C#83CFj99l$t{{^)T#PjIHVdrXM$xzM zOMmtU%6C?ns%vwWE{Yd0+C!djKnZHN3k%NM9NJ>Irc`!2!m zJuEiqa4kU7V%@}c+wz_}s_Ly!+Wh(&OE9R8GH;p7`rx+{c0c|0p6eYSxe%^A06dK7XWKHRf@X-^(_B!;)`vTV(_b3{6F)&5| zvvaa@q7TjjdF%n%xOa^iQ>+GD*xgqMc>X%T+^J8eDf$MkrK#c3)}6AQrJ)OR$tl%0 zW1*~R?>vE|IUMzbdz5+riKN%kwCeY||UU@-Xd z^nH9h9>IQd_H}0JZD;rOgicXCFN|b4JtFY-1bvLy-RC_;u^H`~lN%4=If*4 z>wv9gdjVA&Q^e1DP^%J(_B6T-tm3Rb=2pS11Y}p$%xjKI7r8?g78_E*Fu9f#DYIi# zCvO+hEhdkVAWGt=1O(TqCu9l_o6_4`iB`HR<<|d(GO`(J{h;+ZspRxjqv3dCj4~2U z9qipG8gnxqtE(_or8aeAz#JwQ(9k{}{^?}PAfrC0Fet)ePKN{F8#=hhi5Y3*X3}xg zztAji(GuxzZ76p0>+$f3?p?$Rc zL;6OaVeE+1~UWDkOAwBob!RMq?er&!*mkISCw*_yNMC`oKMsDTcJAtWKTDpbku3 zx+Fk@dpiJ=YsA@z73%d@bO1v!giPoggawP2IYG$AofoQCS)eCDBy*W_QW3nZnb}kJXg3*HD>Vv2x_s?Hz zlZjDO@=7vB>UL_8cVl*Q|%h)EcvC(oE4cN@qEu^zCa4)UqwmQMSsJ+j7TJZ z%aYPTig3LcHKRoWbt;YH&RyZ97;BdL(tdAKb4)cav$6w7#yAo^gAR0*ch)eMY5C;F zX&R1GAXOTnzY`>NW0)qz#Jf^Dca<^R{a?i24I^8jR3^ACpMjlZre&0jR@$K=&{_#& zSTQAh=+Rh@nhnAHuiGBA@D{UVenPM0cWCN~$)FL>;_jqvghrv61;v5o^&cMAOwQ$l zt9d|Z>m}kL8!KPzItS(`B;2EnPW&FnI*EZqK7OG+s20<+B;F`1Mo>y|hr6=m))jGb zbFy=TeLsJXn=chfcm$6yCj-f`EOKi{Y3Ip(#+Qs7Au@cVTN}_YydD#kzXrPs=Re4@ zgUOWSyhh5if741p{i>3ciw-Ty0SC31nTK>Y3-TOxGI6n z7c?p*;|}F66uvDumW8Ss$k_!8h`7k*7e&4Rt-#nS*TcGK?f0=;te)2eK@9%bs^qIE zg2lu)6Q}mi5_Lz;h*od77~6n0!MAcN~MzQCaSy> za~nS{c3VqL2;Zrrk=b}(z*Q8hcpny9e`)omTA`)<%S=V#=mzXqC4k2qbu_MdvxWbp z{FEBla-Fbox+m=k&r&?4%zW9PS~pAM^r;SfCv1~zN6`&_Ikjs)2EBa($P(DuDYtIA z6>WPn*tLz|j_3Id{O61Y{Txx2CPAm-4corBr*vafF*t@rXV7qy)~y7f4c6Ap>xq*9 zS&mn@d*=+zMT;$V-+#$qZbSur^1(p!qR3*Q@g+$a9|M z2B}Av%2(gERA38o^3Ms&T+|dXRTl)kBY?2PBwLhb(5f3xuSFKmzU-JZ#6P8SLd-7a zgzuxi84^?e7Tp1u3w8Sd9z_A3Xzbp@cECKV?C#riBRWFQc(!O=l+WJBqr}EmR(A_S zBCQm1Ps(yp_xx1!TO_^t4Eo`$pZp=BqSYhi`C38bY$20idL<5RD#FV7HPI&nkF^^a zZMxG1k8?LVM$06khT4yZueB;NF}Q2_BdeOX{Ca3MZG3irFT^I&MGQK}$Z-%5J#9 z_N+rGMO_Xr!CGQ&LFi>_Hy@`U%5%ED)O>8dJOL7|=K;-AdSUCCm%W{-9~o!B(E~}& z+gdN=j(+GLm@b?sEiESg;!+m{L!$Suc+exs3|#G>hZCuLR@k zygaBiCq+Ixj9ej*D{-z8lHF02DT%sY!Jkcx3*%+AI7(S1pL-FUz9pRXLY@DT9;Lw_C=iiq@9o{cD-A1-Xf#G`f@Yc2&a-a6 zS2lXN{Fd0!PZIVZNoKJIoieZSZ_7E$m^U3{#KGJ0a{F&)AYyA4B0GPD3b~X!!>wRG zingsL>m_T^Jw^BG+vnb%Rv+yni0zr|3I%jnp#d<|1{lN9sdcql2$}(caH0k>Xv7c%)hpSoK0D`R$O?{g!o`c z5qbb}t*r~cI}QK|Kw=Uq%(Bb~oJ8<{q7L9Pyc_L9^l?Nn2_HAUX{l+6;G-fxE;@+| zjgBcH64Z?H`cI4aSY7BbP0WULc--;J%gonRR#ey26;*MMxRRkcYJxOFhbiQCC^+9! zx=AES@i-D`gf8Rb&<*zE&AE#t^ho7dFvxqkp6)BSUuX-qLOkkDytH{>v*!eA##1ul zD@v0#a&Cfp^<(l9wdrxfRf&OoV7_)=DuQ7tg}hS!+=1ooB4TAsxm1mli?1W0V>=ZT zW|RE*h=gUuGUPZ<ZC_YzQvPqOxt zH3PBa^b1jnzYtPw^Kp})muRIiaEuNRAg6J$pMw;w-Pdtg)YjA%f&RCn$J^3N7}w*} z$jr>YC;v%Y+wmuuxAV&>5)X))-NeXB-7#L8nq6pcK!HE3*Cfd#yr3gqWenL+-fsq+2&0;F>}Y%JNtMdGt!T!K903lqa|) zDD8-!p=AJr2^`cm&4-#)qwY9bN;RZM*p6~(wnj7-iuwR@ibjm31WU#utYcMDkn|DH zV+2i+TzS|xnZ zI1T+|s5VN_AOZ=JYN!LI@`U|C5!e({5BhhUl4V5{3&krFsG|`|_?&ngBwjX9H0}oB zDWiazc^>AWwm=Ld7$bVH`&h+RuqM4YhO>#64QEM1a@l?sT`0h*zE}^Kjd~!E8h0>} zlu~~T>%0s)s)J%bpH;uz?$gqUcQJ@yOe4$*0yKF<>K!F-XSjo(*ozWzY3XfCM7oMa zdDN*-eaT&BN}+o}TBszBGZwL;TtYx_*ajJxS&0|D>7JU`^Q1{dj7xPe)K%eFlD5$* z1_wk9afRb8Wxy*?(wi6-enzNKim6NEY0*8Pw`2vaVUlCR9>-B;41#tl{I@$Ph^Mmx zXi9ci16Kb!wK^~WOla0H*#Vgudc^S2L0O&i((A@?7&ju6wTzX3MF zsnSH)Lk8l6Pv*|@*LFlbSY_qjhgM7y3mKf4zXslDKMyNzWi0Oki_HcOl1bJx8%F62 zK0t)1Reg`fglxamKg@VL-(8oBs<8Bw5rovJ3Mk~MvtcHbhA|8s^vwcypic-!Gs4UB zO*T~cr1YX`yfxpgHtJ5&!+ik97ae$CC|&z~=9=|*B`A#NdZt$7#u0hRXUBZMH(~ks z(ozDH;K1mZP6ti-27qz}3v~|};IMMu)<$t(OW;!gQSv@#Uyzu1v&d;2Dv^iBx&6;h zmk)LQUBMV_!5DqX2uE-Pqp$?s+rQLKUQ}DGZg^bOQ%L#wLk2tGb#jUtpyeN}36Q^> zKuuJ2YDwwJjO!iI+ z3^7CGRZk1r93D+^0bT3VRcZz{j5Y{atqNn}h#RZtT{GLzfK@2}hp~5H4h3wRG-KO5 zv2EM7ofF%(ZQHhO+qRvY*qOZFPVLOMTeY)4;;DMN@9w_n*=b=NqPTRX48`;d6Nee8 zD=8JV7Ey5_`}9M4+uLUGE=udh8D}s1hm~b(b?d5@BBe=-5UtQ|F-}ts!Sx+3cz6i; zc#rA-m_on~E^M6F=sL3WmvzJ${Kc4m8t~i9N8h4TPP&h!2Jlw1BKh5UBjBz_ac{q- zyM^PGBHi3goQoa$5;2>r-JiH?oG=cVUaDUWJlmfJ{v+sXH} zgQl>Ole&j+@r-Xj9Mrfm6A$(j6hh>DNfHS*Vck@>;cZ+|=B;mgaptl!lk7&8Ggi0% z*=x>o$OY#qBQb2uXzWVZ>bEO&{ZYD;W)m3n)*_6ZfQWR^gxvfGU7sB~LaiN7Rd!&F zWQksOqtUMkFBPd7MeC%*WN;#nFVGmFg2$!5jnem5B}RSp#)uyYP`5A8Fb!+UKS!ie z3rd<`l#8r*t#EuNJN0M5GR9Q^3x@;Zw*QZV>CF>NyK6X?KXbIh*gyq=3x?C}eN0ZrVBR*pO=S7~@ag{5 zmh|eSc1Q15yI^}Ir$;xqrhmH2?XCB7i`5uPsgRl({f}5P2CCEF#Azpj#vOF?Et|P{ zz>mS%efhW3?|yUGu`qZVIukhb%ILyT=Q({#>Q>Xf1DXH&RKN5%R}8b1?$WW>EguSV z2&|VMyzX~t(Bx)PaGr*5Hx2`=qWA#e*ndi0irQl=N4pFoW2_eS(sUC+P4+x&kHz2D|4oLE@tLF zG6K1;73wOXsirzB>OsPeZfW78XgNHQKuIG-wr`KOYh;Q;e10WM~OLH)C zEsEcn?%T%^hd8ROc2k|&VL2iE`R~Ne^!Q{asUpu6nCF2DvBv!M+AOY_*MrKj$5M5c zcz_gMvwCdD%7Q@IO$*+7=kyB6cg`qF$@5{_!}Db(N)11m&X%OoZJ>c3%uXf)?5GuS zRsq55zQy5=3kj4J&D7=}n;Q9iXC8bfvAQ2V&l^3CBH zi{MAS*WUdr!(z$%6+)cUNG=ETB8B69+n?-Vo3L@vu=&|5Y`wFJ?j8X{hz_{X089LjeVmhV8kaZY0M$(-^uDqOWV3x(65s?Oe&%P z;x8!GG+jw;PC-%sacLVuheR>%lC_Oh#)ifLf9f4l^d8eS8(Fdj0-=x~X?NZmX8dtFyhO_v0pJ!x#V3 z%7#~STWf2(w<>pZapHQPdzlFjXpl#)9%2pU{|TIklT0ondV}GJp?(yR-=NQ+cZYE_ zk;00(5u_!uRXTD&l>q|G` zgMrgap^>+vMYcM~+5=P2d1bG)fFdvG>kEzQb$-^f22CnznFOK6!Ct3@32G<25cnyOnWNP zKA=iD3olOTM;#Qq%ij-=%}PzB>h|r^Zk0cwqFX9Klqwygc1hTj#fa}kkVu_o7+f(* zY+0v+J8lds%0HJVfL-U5)L<(Ad5r>`yQRy`6OX}&LXBN}k8}1z_%vqjF96c+23DS2 z0&w2^_g<==ovXc@ouf+kP2Sxb`K~D3F@3%+B(HX2$S`6A$w_JQiZOigGhLB4o;;C5 z_`pa8P*0kin_|gT-ReoxP-Akg!|FADAa&@FKd^YHm-s|3YDSgKjHysO3RF6oJwf+TVsJ9iym*pBt)TvHT@Ldi_;Po!3~|*aL8WBW^|mbpK#`H7^@H zn&8JBXXyU7jROp%x9fWgY?9Rox`a%-2(_`hk3I-%>8Uerd?`&UM<5|^PN4uRC4fwv z5;F=|_^JI@gax9C9A5kx*A5U%13813@G1TvDK@=D_e2Tjc}Jj3$55D=KJ=1e!qE<&&>FRRJ{!gWq6D>W`c z0#Y|#albA_7>MVUK5sCsKp>EKGpZ&Zw9I0(moa1~n-AIcH&%>)CC5xNAFC0f3sbxt zSLNvR?NJ>-03+d)dO~M*3I8TV3VWYvctQ#;rDIyJkMdCv^;sY>>KK7p1F1%pAnl}B zzdzfMS@_`KVDIzYdiU8GdA=({pjHXoN*qBSFC4Gd;DmT4KLo=(C=Zb|J8Pu|H@9f_ z*x6^Fki#}L18h8pNzP$bmG6UzJJVD`jUGoyO)zcX=GiAf34?d9#Q^XFdgJgM3%Fv1 zo;hwCN%jyroc2~YonKZEml}UXLmyDs0-qW0sleoVkwGRgQD90r0YS*Jpd*Ad-5Nh7 z$xkwDJ8oe{!&Ax+f;0(_Erf}TDe{)WaELmcM;DgE;W>}Ct=@rJRUsnEnvb&^M4}q& zSa;@TQCQ?OIdn;1qN9l#t$-a3Mu@Lv-sXo-a7GtNvjkgK;C^GRnQfcxU%iXADwAXo zH06i-IVkn&!TKaLW*rZHsg7WB@Z2{#e{}B4`G2Tn)&)!Fr z8^vT7A+_3^)vJ+oNwsp?7#6JIANRPK^VbN6hPj5%!sU^@(q<~^YCw;Ym4&CPE5U!k z!Duz-V2`hI+3#cyktiv-BFD`pbY905O1p|NZzu|2)TsOy2x}!NH5TT!W3RAT4X~KA z#P{jZl)jgcErE~dXZ?~&SX{9lgB$f&qizx&va8sMvZRRt(vS{?gHe?l5ec)=%SW-H z2lbf2xOF(T?-J_|&04Xb0;>r|yy??p?UsPSt`XNq=S0*FH&H!Ad^S*PZAK8V&A00o z7Fjtb=_M>wp>YD0h~B!AcFoxSPMN6+6F>TY+BPz(9qXg}Ci@*tEJX8XjUJVL$a9pn zl5zUAlzqNAuNcZeYdof*mOEZG@3rCxa^OUC)fT@AxYQ8}1+MNN^!n=sag8)H^zn8_ zL}^o$+LrvoRGOTr#}#4yQ|h9zRqDc-+YwyYzU5WwRClD-DMo4t{7o;pXB*f5g+gXl$4i~xEowZPp)=oK znVeqRnAdeS1#;sZ8iTbJhVy9mf_95t-OdWbW=Knj84-~0)^0|$$yRH|94^eF8)dZ; zr_+s70hbdGyrxtrPA74)Ts(1tH|tY>&ee+6u7<)obn*2j1QW^Q$o%7{h#XfUSajgD zxO6%<>71jh>uPNGnU2x8Y5bTsQ~_9qyrtU(s~+vt^3%gM)oa~owJ{7oN`|xwDuJE|)!P9zPMt z6IXuZX5dCvZ%5z`J!0l?@xm4{0qEJ;Q?RRbwfo%M*kqaJ=E~~it-PtnNzC;O&zkHK zZy>lm2+u}6CTHup9%TWN<~P&m3Rl(OnsGUAV(|h1{4&%+>2)vUYRWz2Q+OTv&_e-M zRPV`T+8#I*@j~EX9ms>ggSti!jM%$rG67s2X${}&@>sN82qNb0DRI?pu7+4kz??Y< zNOD8s>bx)5_LpnJm8$ziQ^@(Kp)tQf9R4N~tQ^du4EUXd##HMKW+O9mW>sQ~E~eJ@ zOt*DFZv`HfnYPRFd9dP&O%zIP=fP)Kw2b0-VqR87gwMO{%4AIFsjK|>ODMBjWO5p5 zckj)ODP~nj$yv<_5I;l2-P~!TfO*w*8s_1ssS|Sj@FW`aYNzu2T|1X^>Gir#7!;0w zV$@dX>M_xFhN3znWMi}Su?Kc%Zlny<>kG1dDWKy#@^?CAP1{YcGxFteo2p73I-*FzTL+u4`K+9M_~@zv%JS>&4BN{;b_Q zOu+U`$_>I~jTJb;j*OZ8+qf3gx!MqkrhfV@1*Xa~#Sg~WF9urOd$lgM?Q6M)NZPX} zd-;0ZQC6z#m-Y3rM4=m*P(Sfq?wjQ;(y;rbZoN_kmPqgD)KpSXm(LV~;peLhk3 zBg{(;pkc;xRx^YbYoacg!>Tp+`A7k#{UqwO&pn#kQ;|xemdrmAJ_miT)NWqJV#_8Dr@krkO&O|Ry?*fC3VctaH|A-rYqLFE4SGVF!3IgNcgSMMxBRl6 zI+fw_Z8~Ih<9x_mcO_uD?nVPHMuVt!EFz_;A}Ggm;rW>?TOWfHVe5nO_D*W7*9+!4 zk^A8pHu5LTj1ohpCB71R?ZCKW`bCq^%aqx0#=Cx}j3%IpZyWTAr#pD<=F-}%%5In4!&^fiO` z?kbg;@GX_~T{SHGZLd&>Wz0AHf4|Vo8m}6`!~F7OsQ&*5HP`=kqUjrhFHsC{X|iTh zsI=}LH(6aAk+m*OufEL3M@pd2hXBJjrpo?}-2mSK#U))CbuCAcBm(p}{yL0^?lcS8 zN0rW^U3R}Ky@|gU8j{kNs6=eE8f3&sRMUxD+~$%PJ=k$AtcGBO+_B5a%9m6%)Bogd z*k7n)!gDIaj3UNqBKKHfKC?rq6{rc_kr*TzScv#^dkJ=jvuNF52Vzv=uow&dZ$jpb_n(<@QwX=h14o!%forK0nz46@{x*Zru zU=cvf=QznDytJb-VQWT*>9oN-K3e-J!Nxp{sI`O>d$fP_m1stSA;VB#wR8(n zwo`0B14ix74PEn0iQarDkQRtOY|uG%_(DXJ5$f(-5)Zny=p!g7j`1h9{VVet!fbfm zP!9Chh_88RE)~qdKIkBQ=IU z!+@rDEMSDf3L8sN>UQFlbJyGsaz+et1(oZjn+!A2$}rKTpSjFa67qoEUPjBhD#+H;oXoo$FE-D0@ZAcL&&LreRkBT2-^;A@je z{S|i`tBwhL+j8M-^x(~YrX11tn;H{v$Vr*3Exrd6LXuKH88np_=nsayto+Sj?$v3x z$Xn3dvsWA~jQ_Jrd{0q=gVehqhAvSrykHnlS*wX*UPt;X{vX}O&l$FkhT_XH(npq> z_?8tEm+xoQh6c3qBYH8v7(zlgHjFG@%?u`9`)5%vWLV?JVZm3sTZB8$@*k?4S(Q*8 zoRtIopM(J4Jgc;YQ+D2947bsS=CGz*ZBs&RrDTB?>V^>6;D^52JsExL@}Q06W`W&0 z`6G&xz*y;2{Wd^dk6~4J7)uLoST|C&cDPw{l0?}%LwIWL} z`-7qO?2WWH*PDx`*gTlJjhzhdRf)~kAy0O=8%y?$r`OG0-It4^5j)_+_vfS37b7=@ zYKu%_ePsC8_@gm>@fm19Mqt_f3>IT*{}B8Ba&UU8ZCIth0JMz9@6$#H$ua!rpf1cw zI*uv2F&DHn6_Z53$r#gv!x>`9fjyHBBjuZpN~^HCO%n2+3;5)80U!}E{F%!KTB)h1_aC{2Qh zhk|@@eS_b!X+5>KvM=^&P%qr>m4m`Zfy?qiD``~!z9Z_{2)dMzye*Q+k?4strz*!0 z#jLqdh{so7tzmUkgJRAvV?M%oO{I~N^eGvY9h_pV@~OiDlayD-qMmFH$ea~O$wz1K z+>=)y6~|*VSTZJ5#G;jbMG%u(rr{SA%WJ)34^$?-xQgC-uEPR_Ey|V!*#6HS$H8BB4fkfN(F0^;cEAb z7f$mv{4>^RfKA})Z-0=U9*-u5F}&Tc_Rokj?2O6{dg=7^FFcBFn%xFeDban*zzXCy z%9Z<{IIS%!Kx`kHu?aTCS52VaNN*_+Kgb*{7_gZOMm@zb)^o@6@qovF(q&;hlLlb1 z?TMiCmqV(#zR)qS7G#SXAfd0c)<=d`|m zF1pGhhRnbn6PoGsMJF0uOuL#7w^zbYn^%S&0G=!SyY~Aq&dNmt+0{AwS}#cg!455J zm=?74sipU7V?q9Cak9Ba_Tgt)&><{4R11D_GRb7W5r+&ND$ZLV)1Y)T%zxk?{oZ|hZ zU_7qH7i2dskgMSGxn7@Hp`NM+#Jn;FcF69t(DI@!xMcs1=O|m(QhF78?&lCYkOLl+ z4x(efUJD-ss2GGYwr1p5H1YdEo|S{20wf16YKKgePdCM8XH?$+zU8miI@jxIptx{< zkgYDLi(S*Ui=kuhbgrPgumj91`ok_7EPSyt>DusQ-`!c->31yeaZYfx4cV|{M7T{1 zG~7Fr|J%EdIcOIP-==q=p*S2UHn3iBa=+B}Xq#D=Ua#bg=m6FXIPbWzae!Mf34^Od zNi~~014qRsar(J%iNU4TSDy`2DdB_Shcd{ zFB`!&{oO`68SP=M99Ri|nOo0P`5scB!gxLSZ@Vd-0MGJI#YlcV%kC-*cmChkALrkt zOcb2V%fFG2`qOkL$`p65OpHE?shYC=LIw343>ixgkMr@{le}pC$&TwhR?7yR`}gXJ z-QdWWw|+~ly|DH3slJXKPJx@C7)YUu2BwC?d#?jZwMvm6@Haj8i^a3MP+O`yd8lVK za1(xHE%!Yn$T;yY(g)9KY%EH@*fil$j6UF?oc>m_e=P##fQ$Exw zGThPIq2}w!nsIMlc|n-1FSP}uS?n#6Lv@Kl<=JmmQx4}V5}?5$^2r#QC<7v^FG7BXYpFPo;j13MJxr826?ykMcYPy=NaSF|@; z=T>kM=LTwjP>b(NfijW3Wi=UI$#+@QM!d4#RX7!C(0-XI$5V{`ET^#`Zfti6EOTn| z{KbGymSP>8y5`*;qP37goo3XSx?Zgo_MLkwo4THY>)BlRlAY;%>G1|odYrdJ^MNv> z&OhG(oxTA<@}r2+wLUCVbUe1lHGy1=KdYK2*1wIR9`Xn^5(=~>qvfS|)&iH;`c)H> zb$%8(>iX*}Dg6tfC1$Cr4>6^z3{rQ-DFaj1v$W(WVLMFMwpCx?t@zWla*J1A{|`Cw z&jzqy>Nm)5{>!HPzj=TE8{y!81-=Z89h~%j(LDc2eKl+TE~t$lea-0AJM*SVn{6Q3 z(P1@1RSd}zZ7solipyhw)fcGV7kC-BHyxvV-MT)D+$1DfftMo)6S_W6U2)IAq<8U4 z)EB*ZXg)0W&+b4!){x_2=eS|DTC@OEb9X7d`bU43He>>xdQDpV;2m^eNno?c?rLSh6k zF;~7>_frNv6eoSi*RC@A#v%utxFZq32?9Xo;h78GM^Gzd(?JU11eub^?n%`KoiQ1p z(l{{3#Kh5oJU=ez-a+GYqp(ce0Z%~3TtonXRL?~OXc#5NB@kr>ZmTX6)nGpl>qaL|#3hQ7SKAf}9nf@sj%ti>vNzHI+zfDRWa{L-4gc}r>P*uM^807(^R|C=f#jW@ojWT_cS?qSc7+q* z>%onlA6ofh=JHPVzWe#eE)1d#YB5$~G~h#@0L4~e)rLRBpszw6Sdl>NB!bMIIuqt> zFQyJRDnMd@SuIR&YyluC$uz-`>go|1!3~ds7A0i&THX5F#Eq9wN+^T%u$NG6lYL7d z?ON}HPC3K9H&sr`GBFuVZ_C?-xRfaC9Zra6QFutQLLm{M-GbR&4>%z{(XSYDrm?j- zHNJM=JLRk49UdjSHC4l-n@7PF5fl4+`+D^9kcfQx?8^;5`4n9a zjUvq*AL10udZtEGU=t2c)Mdi4fl~oG+X3z|Cv_B?m!Nz8H;_`Vjg69m+BzWbueh## z)&rg`)Y_hz) zW;pQO`(!HA+Cx{QE1qmjEw7-t?EJ2x+wOS8SGXgli?DJRHG%UWs?v;2&{8oK;pZGg zSmfNt=tBYuFmhJPlV{$>vDUz=RX9^G1^IP&cXv03Jm|PMoXpYn_!~*bA~jqUWea{* z4YYpyrZfZNBmKNa3Nhdx-FX=hZ(w%VZwNtkNNXD57+F3Y36Wg-L5<~iPGjVx0S)49 z`YDOVg2DjdcndDKDd?ERkd7qrFgQ^NV-VoMqu?IQ{J+DD$m-VgEPWspSzR+|{5U}A z0(gj|%2`P&g?;KP0AtDkR0^OnMjE9{B@#888uNq8-`*FR;$O_5HULmYqbarjry>eX&j{NgyFt7!6Zxss2lYIdek}g z`SF^;L+@i&VdxIK^w&YY{Mnc*nTTsL3L~8t6CHB27$ z$QZkiZN_F$eE2Pa_4l(sAy7 zN2gh5FQcCsDWP%HG4K~wwIO>2+|0+vb^EWtOt+uTuSR@AbfWxY9V9$Z|;GQ<|4+HYGj-&K7ooVMZ<{X4?` z(2)a=9qtdf1FNsB{rUANoaf9i19V(6G1}Et3DU{hS*V5}wp5jir6g(O-K@6ThE=vR z4kGMTNA6`CZTcbMm9@v9!pAz)DAe_nhXWb+HWpkK-_^0(ovwg~E?aVjF^cj;=&s6d zRp}_ARnS7NAGa&_MpXOa<6Iz+CuZhsc>Ac0i6f+?sY>PJyb+y^$d_GNY^_uXKQwFZ{6R$4h4MP0=<4xAWv`5y9c z=w%eHRo9w#*x3EK?9`$cS;X>7YVC!uU!3XfWZzbH02Dk@$AU|#AM3JhdIaxo`@+0P z&n58v*4k@%;0y*{I(YAsCLD9wxj{5`Ey%YyX5YF%9z?YN5U5qS#Inz!%eUrl zO`$V1O0HTo^jErce=McYEC%JSya8&@%-Df(Q5w_EerWi*OFj+Hlwxey7QBhPJR zKcoa-0uKLvP@3exBo)l(!g@XkdwxzDUzW-Sn3R$E%-~v1lr01cg z4Xn2DTL5@u`acx_{^yXvwAPR9Zvo(E+ZQzTTOU#hg^bO~k`*sSS4+E&7B|BwR0!?N zEV3nhB0*B|&0+BSg(m??CZfrZi%nlNbA)g=H1?JYZ&~fkM1&)Nk7-IGrUGeMuE6kN z+1`JZ38gxy?CouQJRw4bN$Pa@-RPT{NV=|K=y50qr?gP5Q0*+V!pG359f>?+^vue^ zB_j_H`;(u%gWy3$19dq8DW@Q^fZu{pEXr=+Mxq9hcY%X~giR(LIb9=I|?P-;|PS{ZRGd=!)>E%H!huv;IDoxE(jzS9bXm^iL& z-Cs|jGnCY`vjZ$0UA+8O^wrc=Nc~SI_lLt$I<~XY$k|-F8XMYOy+XO%dHCF#9i1I* zpN;@qyt(1!*)!WzwOb@%e)&2W_p0HKro2y(=O1fwjHsBHn40qgAaS~HQj5ki5V;Qtb#VN= zS$_pfSq1r+_FsTNRAh28=q(h&Qbz`?=Gmlq3#cc{iaN!>wRxY5KTUTO?EDm{-bo-5sw613+9A*3Jqde=M7=p$%n6LkbTL8-%xQvp29tOw z(RX$H&BlCO2K?=%Tpvi--)OrW%a>HRxWon^SObFWiiD7nzHl5HNqO`@1Vwz>yC^he z?sWUGP6WfotSN#bYds$z%ggez?Du~#UMMGFrE7lsIJFAQl@*48fI)3;eRA5PS}S%0 z$%0hOA_(tzswh*6%rM-Fpno+ZCicI9_>~@m6GfCT;{S@aON+uLy38mE0Y(~47G&%b zCIZb)L{xj?Si3v{qHAwd97!Wn&=5!zS@Xy!JY&Djj%amM`K=Rmt3d+JzK)+~j^izi zA5C8}u_%Djyg}1PHo(Y>q=;mpL>4)>f^ZA0dltZMGOP)2B;t5yvoAa*@n#cC+G=7* z$opmI!64~~(|tl$v2eJ;;wi-)^V!eGy+o-{b9PYQ+6y?Y63x!{{x~KDe6FYqgGTIY5q|pNm&tQMSVE?YgV2`DL&@>tZ1L+Bg z31J49;!Zc31*$(ZV1w%y2tNS%G z^NPtWcP5W9B^hAaeH5$)u(Om=>AA4LaQVNz8CNdUp_J503JXE7c+I$(ZJJr7Sp%Kn zymtq!-E@Vr=A|}}0)0eIc@7(?3Gw)0Gfokn!3DR!wBkHHfg+?cXH<{h^)-^>1_043 z%iYQQSsa2&mIXdZnoI3eVLxwuH zea$Rf$>4$17q&b;vh-*}fj6;fK%;=Mh8#N`(UB%5>$3*E+t~HKq^rcyMgAA>2_;e<)D0_PNS&T1sUq*PdbV!fKR+9K` zsR_p74JI0iF&*3^91sOFaZ)^x@5R=OT{X0!m(PCTw+rDh;;eN#6Gp#pMhpU&0P{Gw z{?!W0rg;STyncGNi(|q@guS@|AiMPoefG|~be-dpJo3w%t41J%wKll*+o#co0N;Os zB-ufNv**q#gej6#^*kaYFL0@Z^AzEV58e7gULM}nU12KC>*@jupkYQGgxLkYz#}o5@ zkn^4NH)KNAqYqFP7xPQpK!xy~`uRtB>3ON-&5_5vwnMUZ=z}mnqA9z4S=`I8vSQHWw1tOIYT;)-i{JBH8Fv$R>442i#ZJ+*3u5HYE8!R zq?rcUvzFn#XO|uFpV5#z*FR%i{rQ@p?Bg7gm9Ee;ErMH832UMiZ*^i{tMbE-YC_YO zKw_85!rnw3h|UmUekh?+R9xcj1cjG@5NLFn_p5qVaa3^{xHG8=`ARtRdAJ%O*UkD| zLzmiC7p;BY-wxJ2$93hGyTbd^Kmx>1PkHI2OP4jDtzoi%YEqX{i!d|K^;9< z^xLWq!lN*BNw@swlsM3s;$80or$ajA^c5B0)KWTeVwZH`zOXIy#fwrZV{ii_XXK)@ zl2LrGUDmyc_4QcT*hDpZfa`=UR0qP zb6}JY7qmnWu-vv{D<6u(ZrHZ?Ir*M;v+PrEB#L0KH`53B*{|7SoiL0TNzH zt$D^0=(Fv`UrPbr@M~GZJN?0_wP9doJR9TjSWI4e8xoMXlHW9r_hH>609%r<$rqsLJ+i;&{M$qQ+k`7-4bW>QbIjDm|Q1Q&AdQ1%K)sx$q^_ z{`|sgV7k(EFnT~u_no%uhnUG_R1@V66u*6f({(nysDUuzoFgMk{__sK7EJ|hzu47~ zPcWWD%WI)!09>ZaMZLqw*3EZmkHICQTBp$MbS=^DYR*z>Kdq-)ryPQ-aPLM_}_&AxB-uz_^&V^CkFr^ z{{JipI9NN{IoSSJML_fVcjOG~=cYH{)H}?(F}Y|EF5C7Gt>r3B8x2juV?;jHe+FM@ z8_E)rwyoB_u3xx`N#YZ(nnY&!m8860J$LlRj-ci*)pOM7~CD6kE{fJD9>~3 z9VQcGXc_BIq(uVhIEzkqrN^K%>WUFukJcwI)#z&JZ0PiK39dy_5QY9Ldx9&q&qDn< zg`7f_lE5gJPV6x__B#kXuC{23NrPIs0*Sf5FvkNo`txhH77%NB$T{RgE?mH>84WXu zu7w7nX53{F8)_M4oj?+K*06>60}HhKI3pOA|EC>Hw`YF5_@8z#b~Q8$7Y{0E7izJz zI`PpvbEEPF%2AH0LndtPLllMbtM@obSz8xb6M#8V2M8up2bmyJM+(eJ6TkUOf@MxE znQ(aWjuIq0@v)>-54E#?ktFNf8W}>KkZcG#+tIy=cu#LzdsjNZNao~bu5M2ci7jnS z?XLi!^KB!btL+z4o28oK9LbCH=_p#5wx(2-hok7 zwCbnR7(wdnrzx=#nUpT&o__+PF*9Czo%B%%# z_hJHlLHN|DD2YFHGrnB8suXYff=IB+>Vncvz6K7HK+?vomJz_B*l@UHr9%y+t9(1| zBv!JhEsdOEwZ?pm^k+3)eZeZi1>wz$J)2Ynu@T;5trd)Oq*7WQ7dVf0IZn7VG8p** zEfEGMa%V^w&|n(FLeA!5XWvoV8 zo<6eq6v4_kUJ>?I25Y@aAUfWPN--syTv1-4RXWfwhF~laID0Hzf~~>O(*fLnoN+Wk zWnel=%M==_#YRw6gl3jTdKf?qqnbioNYRZa_%pd`EDxxli=!m@ka@+&M}PG+eu~=mL-k zEQ~iTYA5;A=MV3sG6-hBB?LsN$;BUwZ!U7zjIyIjveQz^LkbjL=_$pAHQtfC*Vr;~ zwOyJ6P7@6ODX@M9Go7odHf1cHaq$;1cCXMgzo+c?`fn=f}@D! z1e3em9ccH`sy_Af^3Oik5^~^=#fRFN98pubUfxabZPd~j1*IT}B`$Ky$C=?Q4?V$u zB#Qi|_S5<_<`eN;prCbKG?;E_nEV+fc}T)dn&5>&xX}%>aQFTT8GPNZIG{#4Q35^ z3r0p2>p^QsVvQ2ZTqZ|ZGPwXBeLznu)%^XF*64jg?19%T`w^sZbS!=h?+nkT^K83) zKFz`6oA|zFLh??IIzoPYcyT`z0De1KFhW|$^Vk%V!zriAe|^y~ZWXW8^&47o?-{rPqp4}^oA~(r>p{XEyDMYo zs;|-4gby$XrAPmX*ITjJ14fd##sD(a$7C12ke(5EeK;YBGlllILqUJ)t{d7+uMh*P z!wWuYK?FBy=;KM$AAg0KkbDS85{L~>dVzyk9A8F#zOB-0=Kp&nPdu!TjhcUum{Wn> z+}8wvr6O>_eas!o+gU!ArwO(KLia_g)HhAd?!{?nGXIe1x=Nu%e(>S8dGCgn0PV)E z#B{UDk)Ttqpi_xT&t)Ck%S9JkPE}n4)m+wQh$5;nWZKx*M`R=Vm!*HhT14Vhq+kh< zjuB+9d@7QRn9TW?&lP2nouh%%cPbJ60sijpB{Ta;6sJPkd(^+5+!!R>dKy04ct^zV z*a#k}nT^%Q4- zV^v_uJ*aWO@ys71Ub5=BCO0znf7xBYq^8}KB_g9vt*p#0?7gspUVv9Ax zu=cluCd1~E4`c6~!&qLK(ObM;(m4P4!6#o2$)X>MCq4D~dI6}!Kdf<@u^&&nsBJI9TXI)j@Xb3WuYS$>@|6lIINJ=r;P8z=r-)c zf%c#k!sG59Cgt-7xCB@6x&(F5`j4)_Dv(`Q)+^H++c+D@(zn{8)$!7lof&bTorFO4 z%i7-DPJE?1*Kx2Z&rBlfY9F1Gn|1P(nY`wVH09ovDp? za^BGMz|5MQgo8N&AKWM&yg7sF2 zR1j#58JA9F#PdoS*+#?h9n!c}Vpsllzg(p<8X`+UXF~tG+)lg4X;aUV!o_I*uVoXD z)!6xuZI*@ow9Yq?tj$x~UI;N4OIBBAbL6oQlhAFKotx@w7{|VXNzikczAZ{bA(Z|n!a#*rJFZOK&Gi+6_&)?Vwb}>eX zMi(h2p_X}ZoNPNh+zB=L)&M(MN1wmzu}7K$O|D@)ygRE6xVEhPio8}?9&Nmh{8)k1 zzW}-%CYSb2vxQW8w>#`Yh0AqN?dbh}8qlTN>6Z%adC1&=1rs1uBw0rX5WXuQxlTfU zTe?n7gC$C^S}J9=@O*cvP{P*2>BUdEVrhIG9k0hKc80Ehf3use4AaDg=iXa3zRa%H zmP_i-krM&S%`o2;8!g-qt02~3-}F}mtibi~)oj%8|H83rWAj`@e=GZU{ua54^)*_% zYrAe2IUCkLveWG8$FE%Wr)M_Y9B^>%r+dEt{U^@J4GXsL%itiP z008*KaQwd*9RDk_@jrj!dQK+)mD%vL-Rwa9+06}b^cNYiI$qYrU+1Q7h2PTemWVH+ zqK)7$CfOjZA+8`!9&%lC-Q_6yQ$)-qk&0<%5gyw6;3I!eb+3woJ+5Xc6W}Ho0~Z5V za+I2|!z62?-ZdjashU-8_gEq3;l@aUXEuc<`1wEhdI#oEpms?&JGO1xwrzXIwr$(C zZQHhO+s;mM^VPXCbEZz!RIT6eR@dt8r)76^bi6ydI6FCdyU+_n)~NE4-vii~GJvKv zG7yCd|C0#kO6gNoe;YIWRpoJva%sdK(_^!%w`2EL){;`g7tVr!+L&sohvT~Xx1veJ zg;)ttGI6T4zk%=^#sORdRkHy{#KDn|2*6P!#ZqZ!Hi4)C@otkPpC`6k448 zjj)cD4*Am0&jP>Bf+Pm0VD0PD6v^wrtwv3o#(l>e#$ZTax^`d`G%P`=IMNcRIELa^ zVR1pVg#HXv^6T!SjMGXA1%vY2a$xKPyJ8aEqzxuT8Z5IbMDRHRvcvzx!Ffpea)0CC za(@md{}Tt#wH-_S|Ks4Vt49xz|J?3xu)D$4@W$4MZI5zK(QZrr3)w?X{J)VsCcnrY ziYoq!-zM+FFS19C}wtO4)bnY*Y=FAxHf;itYS%6~98kj46g+l%xK2$Q5yo?NKZkjSL8Bz#4D8pTY>0T{`>7i4 z4pBbkxYtNvUp}5IA&?v*&oAY(v6Z(ELH-Vr#QfxZQpLZ#kmlCAhW^9+L&xv;6_^_% zr}LA9O%szkIw|DxaFT{Qe>j}EdZlOo&%@o#%OR_-Fp=%kCHDTJX9`)9iJCP;tu#+h zQMLFe4OqLHY>_N7F<;f;=kR^=6>asl`oh#f1=Cg&?65R1!346;ADzk5LGq$jU1Ewc z5DU^lb4)U#L4MkRQbO+^Bgu*ebM{KQoh0n~75G|d)W%Sv za-)eQsr((|B@jC}SY0=P_5%9UJiuI&`r_X!xPe5ZwOU4kJX9cGaZ2!9Z8rES`Pebv zFhsutjC=_LD58{*pkurt|^dL2C zEO3JXftR;s%+=-n{;JSCjFRfmJm#tS{h}!;%1wB&c@u!~JJOiU>H%y1gbF~sO0IGl zp0LIsREQ~fiz8Kz(nTS%UZUKw8}geIi0M!4f=+?Qb)pHMEP*#Ni*3>rFGHl1g(B_R zj2J5*D<>p-3E=9EI3oTA=MI|3BmpxFbS11lr|?BeQG@Leth$pN+N)lEb{lP8Hq=L4 zUSlIDe@CGm3$3&m_)X`YY~$#_N^y&dupyU!DK`zTijWvlvyWWh1w(S@N)NX`QG+C) zX|ReM>1|_a2D1SnFxh5Qs_a|`8W2GTi6Yl(ciJurF1n6i@l>JLo^Y%<->+H$NCdF z8e0FAb<4q>1tvA@t|$YMq)}!K9{7n4xO#F@zm9cgHwE&Mn3?73wM?bKWtmx(9tPI? z#m91pI2KUNjyM!4n?q(~?XYztWMYSgm|cwbm=kU-aD7(;So*94*%i$WE;Rq}4h=mA zy7rgO00v2cE0vHTnN1i_enHaD4B(QVv7ilMCZ?m5g0fiOMU5?I>%?y_EP1eV5mrap zZ(~g>-7wdeqZwmJX!IjmpC}uJt~fmw^FX0>0(>_)k?d__6q9E*7-zS;C`N0~+t{;y zadZvA80bz8E|1eeN7EE-Rjnm@xmwd>|B8@9q=FY9bKyjlN+p@9vu}Utc@LRF>+k8; z!N+obud`)_=IRl`bi~-!{XVrTbJ~JRe^Q#V(BGWUQ zundiB9_1r(j za)#k3 za>rpEOc9@<+dL!r8lce~PhawPdZ#nE{JUN@nD*~fTcelG_YXPA7#9J+T5oQGYS?dT z{9A7QSM*9AKgklZx3Lh1%~x13KMD{ELvFAK(rJx(KM0~fW!OCi%x^icEq=V5FHsl| zlqMt7#S)*#XAU{f>sV4Y87%K>Hu2buy(w+8E$kV5bsStQK|?I08yp9BuFYH3d0APY zWuZ4qgUO^J>6l5JLV-V(^Vimos4`0YzKmsIwUFodCivCtvfVR)BMH)%QJG)2C?4y9 z9V4W;{<0D^;7!NMjBC-)qXWgAyu=c?ke=YxnE2eh11R<0$bwzsj@;mNfrD1Jef4iJ z@d>^B^Y!l>$$Wa4YZb_j9b(x*LUNvbHw&AL3I|yNxD9~dimuCz57*zbJpYcnTR(Pz5H@_23!Tq3eFx=6gQ}XYn z?rY68kPyz7=1meY*ez7wBrD?bXyUE6aFtXd3mg#K0uw_jQ@QRLXH{zfJGq7vZW)o} z;<4WTMD~|aX+H8+)A;TIIjE8jT;Fe6U-8Lo<23+zYLh7Ik-`B%asuk{A0NV5LqCOX zyS>MPjU<4`8PtzF-aN$^EJaAuX~pbe`+0HkL=Zb{o4PZ`QMP|HjG-zp$S@f5k8>;K z@PeJ1N?!B))(YL%zL~R$M`b{#c*CGZ?7QJ0*;9EG zf?!rOX4DFmDPi<7|95{e%JP|;bU(GQXr*4s8;4@_QWg!f#CH`qqn(zlw;)|PlZII; zn4B4s+$)5T5Jj3<#^o%4zgdFUr0T7*S?t4aR)|AZO_D0u0((i?F-rfA|JCcjB$HGBYo-}nEJ#s4)=*Q{Y}yUC9DeWTm&XdmVk zm)!KaZ`bo-x6{=$!r~HNq=qblzldm!lq^CcF+5HC(z_dnOfjK|&31MogsCuSx|4$e z`J-!g{xcrne^) z^O2~<+N{<;vL! zj^NMlkNMEOH}M-juE4)Y%GVOa1g%PA5Jx7?r^@iteeW2u3|P7Hzr)@V0NK}~1U7C5 zcvR#cKTDCMewLgpFI&~JZ}}|)VMs*5tM|1DQ)uinCQ`^4)v8rXj3h>{=l?{h6P&O7 zE9phBd7ZfyJX2Jph$sj;rVP2eb&+sHlu93?u9~FApY`;zEwV?A z6XgN!Rb^X=@tUpQ(V~`P)Un7DyLKZD@Vv0*;vJX!LQze!P z1q--;Ub_nLUXzB(Lpc-v11n?A2u23hd7KB3YLZ*wIF>}y0D=@shQpxF1-VQcl_I6C zCUR-1$U!;WT0^n{$scm+K1IB)QZWo47k|{DAQGc`4AvWHBe6qD9q9! zVod6v;45KRw+m_@3Hfmqx}fP^(O6z9rIzzJ4Jz;I^3Jf?lA~xpCn;*_ShXZzRI5p7bTFK$q|>b~dLAh}cX{?1(rs&JFN-nk z4^Rqu(K?%@NcNgB&V5S;hmmt}aAbLUiR{0D2o1iJD9(Ax4_4ADJY~Ez%8MnCJNq2c z%@7{wRFNGy_;da?Pm!5-DW$$&qsOAnmH&|GLj?cRgX(#Bg-X4yas&0RuiKo_Lts#w5E*y)NQBU^umUl*KlRHo(8W#rPCaLX6|?`}!5wR~0KQ5a!}kZvD5SnQrEnzW0@uzLo{1X+u8XaJjDWni(o=2q_JC z&dM4|YD^axIu1$9BSuy_+%#%bgGSj8flFiMLp_8E>!eosyQ_odIqza>&TCDXodenfhX)!8Z~r8aiAnqexn3_`GRueFpKZPY#CL&`)5HY&q=(hgUMQ zSRh)QSS=Og+5hnoW0wK4Lopo&WfNyN(=6h#c(;QiH!+fCOj8Bo#}=lDr|$-10r*3d z$tvDJ6SH9vOynF~BR;P`az6r?lIrORvKVzQflrB=hK5X=xXS~IP8hSv{qJIY_jAh+ zp$RcIpJB+O7{atEl07i8H9-Y2zLgZ3a_RG7-t1C!g(p5LI-!>A`$D37l0}frOpSj` zLSofmyJ01QEBu04bHvm2ng0uuyRkSElu2)E5zG6Qu&p(o z8z!Uduco7fqghWh+b4-O$!cV3WIALjgfbT-JlUenl6HZWZ6vpK;0ME$rE|v@?&^+f zT_w)L!pQY`gmqur-3^4xv80R{F|8HgLs|5VchI@0m7nvb7dBh${;^Vs3(yz1 zoDARjmPcFto9cTfXNMWkQoyL)ek39*Zb`dRmU>{!BSH=6s_js87? z<4CMo-TV6mlP*O;{=CS8gH&5KAEqYQ-^G6a!pNJ_uXp*4`Fh!k{$*V_p96)rM_3Xn zWnH+eB&~;`E$=~z_Vl|RS7wgCTmJ&5vsYwDu|4%}^($C+|CFcdhP1h=_qHK1yKe%6 zD-15wHJXxV{D?;8F+~tqG+(2AJApjO3B3-onaQwCldmmez3+2*Rdo_=>w5L~?fx<> zc`v2i=-3Q!f^3LcVw`gj=l&*|~*oYk9qt8fRt4R7>076X>ArEmas zUPGla$T={mGFW?4$}O)zp{Pk}jXs@EuUvg{+bR5)H!q;3$NghZw(&k{`9#TdQet{& zT)nU*Sc%_Hj9#(WXDUZ7x5ahWx;=00PE6gte zEG!Qm41hF{KAD;>q)0Pgm`pog(|%S~n&@{!s4SY+HO$N419O?T-4?-~@u#CoonQVt zyuuPcO0ht>+DceI<_?}NS=|rI` z;p9YAGaw*P%qDd`{{pNgPbk3j6!J{12xWA?5Nry_<2I1+$OxuvbdU8o? zYx4+uj zZ@bB8>ek{0Km&k*4{Pm-U3(J@WKDqCgQ}f8Z9HI^j1)_-X%qO#1=Q!qeNlQ78VSUX z@vivDT~{KSml%v7S~oiEYIRK1NM^g4=YiyFvJh&da zs$sXT+9WxKNicW^Q#c^U=mlU6>C}bZBOTYc)HaqK#-}RThc>QKtSnPq`F3|kTlsc& zcRj7F*5>HL82nM*`l+ivs9=6FkGJI{j)R)+OU1mFe$f88cq6NbU6}R3nV4S6>|hYCqj0lg zb!rtDDSrP0t)e>e!Wzz|Fr~aqP=4e_qgdX8>{&nrmKr>;nS^oO5_1tf4fmLU^f@fg zqn&$+tXa&9Z283&Sr3NqJHlcmXcE!(aL_q#oTq#e?jQ?JbCDvN7kB_2EE+7f#^#wi zjqov;?>6pibin?#@}T8F6ksE$|Nc%<39$xwe$h^G=BTLH6O|XMi?I$yz7daBe)!XA zO(cBySL+oHW0pXoj<20KU5oayOwQ9OXZ+dP>r)6A#}z?eYxn;47R{R?KD~`*YG&i^ zG2M6rxzth!(3X#^A&?SWW&9h@w>8n za5!%bnLAWTL&EN&RIG($BSL4qqpB1bMhce61SBQ}Jy-1YbXB_D%#no^i0qouH9(AbN9Y6t^cdJw9^7TIM`%tk*ZogBgkm_iINE$%Nhy8dCC+k~jnEAeqR4_vPjms6bi zarXFue1Q(yKbx(rn|M%pi(Ni*lrKr(hyo;-H{|{PPFg2h}g(^kt~R1tJuX# z*?@%-Jo+PjEAAXl(yBWKl#jVHnSh%w?@;Qu{k&E&sYQS75JwNLA}JuYK+JSy zxELm2Ui&=l!LHCYKawd=CpLnQOa1t5r_pKQD;WT%o2;Moyg!DDP#TvrRpt&Mz$}Tr z=nc#4%JiY9+_GFEB-!M}efG|t#IW{nCqH{X=vuS1{)EvxlP~h<*R1SZ*{_%rnPDF@BVY2%xbs9j zH$|4R&LIMc?ufIE|DDPDpKH0QC}s@{0sx={`#UH3Kdz;&uDOl5ldkT62tR5vwpeWN z-M6*qedJA-lvt;f{InUQG+t#$PlsJ4E)+DDwCyDAEz7#OVh(&HAkL0u@Zc+UC(~1z z`_M!gs#s#OnGLdt^`#jaCs`7K#I-@rqF0=wl`Pdhhbn3ha@Ulb#6h!nnJrEwIdGv6 zsXu_00Vesnp=8FK6-)uGK%p=d{%eQgy{djyJ@K=@L)U!Kusc|+f% zJ^QdRjjaN>Uk4M%#-k{7hLv0KI3*~1$rY>a4WM*u7E;kFdATOkma}vKLk#zy)okxr zf@o+k6}M3nH`;tM37tN5w`f*>pi*+heGVNXI2-4f%OWtu(PV*y#U<}gC|SEvX@oOT z^w&E}_miD_19-ZHS(ho}5?Jvv#-SOez`;FH%YVv**Ik|XH~%BC{S28z3*tVK;QvOr zF*!SYfBtel-iNjfC$LrdYUN)sV(9>t>AqDy?+hjun-JSZFje(KiBxAG=B9plg}GfX zeSH-&JJBO5{a#c`jrsP439tfc*_e4goqsrlveX=B%QRx{?d~q7f^>dni!P5hvXdsu zgR1Q5=T3!kzZ&dOo0F1i=v|T5Hi=~Q2XC1^LT!{;BWS4ynN`PQCcJn3qMP+*wKr+I zjQRk5l46-gZRVeg^tq^aSY{c{VR}1GqwJ1rUEGbfX><6?1i9uz_b>Xw_n;;zHuyXV z@j@P7-c-(JZ~Qp(XY7RF1W0^P;l}?c_^IFniRp9$o;Jxl$DhO|5}q;l!tLCSSrh~{ zT&@asZdM!HB2`Tt73oMiEftZ7e7Y_So9dF8OK1(kMD9v#-q^C26}e@MbB0(uh5=2y z(jZOwyaCEVRYgQu+VfG$8?S0Ll3w>!Lw3g)%?oI2^^>&0Pv+nGVCfTGKT|{VjnP^- zc3|XdA&{0@gd)59KLT~P^(zkIJ1P&I!zBkBXWL0A2HY&q6Zx&ATOaV7btuZ`P( zGq1}LX+CZYoO;uG%yMCATRx*hc24&;)R(0tz!|`fb-(T8v z^Ll!{;a()7LWzWqw-9a%B~g+u^v09Gjdmog;kW`L03Ap(ODFk^tJr=!+D78gMH(pBVrzxPekOuDyb#cbqxnWj6wE4#(bd9YCc%z-N@Nr1IOTM&X0ykbFfzEQ$ zrP|L-$vLi|zSy3^FLjr{0K)PjPP>iniq3Yn@-z*t%J*Drt_xkbYno#*Pl^*iDSuZuwE+^8@EBbZubGqI1iI2u??kEJExP}nqb$8m|0)Ng3ZYS@sg+Sc&Jb$OkBxvWS| zq~QaeWOX0J;+S5KKrueN+0O0D$(mb@V6#?HXRiD+RE9Gnpg2<3hL|(6KFZ&Jl5NpLd%)m*meM|#fmj^;Wnw!xO*Qbj?`zT z45NU^XZmIc>#(kDkF0j-ac`($mzO4g6Bh_^Au}0%5Pmb!kii}V&n#rd95^B81lErR zJGr&04JTwZ7Q_j?e+#~(l9`6XrA>!gu$;j*FQjp7u59{q;vdvZNwxQ6F>~SVU+o|M zGvk88H*%y`A_fFbV9`$wuUCY;oVS=^hIC4`=Nb-8r#08Z$uup7_ z8R{FTQ(L}$p2hGu=?Y%)Y2@QBuy#=sW2xe7L5)7=*DR zSwkb3qKSoWDV>3HLGD3CL4O}o+f>J;okhIG+^IFav;!uRi#{l`i_w^hPgUux?Ps)d zrETCflj3Q(|B_N=b)?(Kn!44s6bUfq62CR>C!q6sPp$%Jh=B1#YMz7v-g56#9yOpX zE0qMqUTqc}X4-6J?96bqd{$}FXK_A9m(qWmA~x7M2Hn`W9Rcus)~CbzP1ZrN{=VAx zo|C+q@MlZnp{-_pwrIQ}Px~8bi&0|d{G8&;Q^vaHb`AsF)jrsVA9sS*wG6>fwUA_A znx6=&e*e8j)DypnxCw#x(0v+yBqh+3?8-h%ndE6h#dzQ{V>KUl0UKBwX^*x{%8q_z zorjETaiz<^KEvcpDgY@ZNkl1m0sUL^XdP?kk7s`O2AVxmZ$i@6bm$fU?2e zH;M5vKGr>wK!74hj6Nm5Fd;JEiyYgZEfKErg>t@c_>JP4lOS;R1sK z6(j;i7|Q)ky%=|ySx_7c1}M%^(y3*m3QDZ~5gYr=K*CArE)&#?WBua-&f|B3(oB!) zrTa_l)=0L`7-G2(K9D!U?93v-+(bte-BvAXdO2NCAgwsWzOt<-j^jT4WVe+-K%5w3 z%WUu?W#}(qR{m?lq11(yAld<<6ZqK!bisIpTmexm8Yaw;3$T)3iqY{8z-)X~LqXgf zNGWN5SKpK-$P(g(H6pGBTw5t8F!vG>crYZOdD^1*6j1H3eTsg{iYjEzm=f@JPD^Za zWfc^t9o{ zX2uMhiRaIP>KJSZn?4i0Pp)!QTZ3clI)JoKQ=Xo(+a_A61vIyjdn0P{U8`9$Zv zdO=@8xxMYE=~U1wQR!>GAY2_{rTB*7{?G8Uju}HaeOmcg*rY{~jmq?V>BK`?7GQB! z{;YaXi(1{GS83|U?Fjzgx5Dyk+$yne3IgD4fQAE_6sak`QPK8zcsbW_ObeI&JL zY<QRc7Lu;(6rR$x;o-&T)YVc z@h3&f*^Yr`w2@5HIIS(!iS|9=;;i$z*kur{pfOQ#DBk~y6SjjKyQHJ^zj+xT7-+zZg%Gtio#61)kK@M|E-eIUe3fA`A_CO z(>qnYsgBj?*N0ImV)JJ(!<#iv_33bK^o#=>G1rE>lV0ShYa&m?_~w;JEnE+3#5{(& z>55fupXRzB-@9u7jA9XSXg%{CIiMHrB=V2D_BMX5zksW=@0#qX{?f_flNp}5z!8DoI> zEd{3rVYs{KBI(fq+ulNLgybcY0XQERuv4FjEggX%UcQQSO(RAbf-s3=I?I|USD8r; z#PN)E3tJ276+*%I^P9=R81JA2bB&mLSTa>uyvT#~(AnyK0L70U&e5FH+v z9V$ijyr6J$2jg;Uwoo~Fc8jy46t%Ln0fAcWf38GUDzt6j3*lc6dg3RQS&nQxgZycp z0B(z4LyPz39`mcvpeUU424Nbi>^qzdSMH;}GMXHd%Br_5BIx#0k&=bxB-R5c4iBqr zpiwOB9u(TX9&f(+7AY~A9!?xh^?E*jut?^1cX)q#EY%%}&E@%ipw!`sjK=tS4U@2Kx5ih;oo=O*k%h_g)9cQ;q-}R293fz^qP$s=J zZ|GQF)RpU#Y0O*NecY+7S+E%|8RS}Mav_5DvuISuEfl=> zXTLNT++8Ke(0_FJkKTRm4qEroB6Kn=Emt{Vp3vSB&J~plOJjfACe2pIXsCa&QyuQ; zmvLOqV^d=(0#nx>$w?PD_q8ppt$C&w_UWNsG)sF&c6%OfMoJ>WN2p_%jp}U6vkU7X zbkGT#6Po!h6FU#MBh1V3=W#mEJg9EDGO!f4}9t*3;*^ zHj>}L=aJU733aPoC%wz6Ku|Nf8)yz}pqa`3tAGbEnKg{R4YS?;MNQXy+-W`d8qMZl zG9$Bw(ECp?TapVFRxB#9u=o0#12Y(>IK9dnjE7r2|Kk!cn+E1wWO_Hs!KKS)YP%1i z#eC_AjSUGJYRcd)P}DeV`A`R-xTGhkwJ~{HUTmEB5W4F0nkysLoo9sCxu>m>=WO*W zO9BT&QpwOE%fnWgHvmMQYk;#Zu5r;sl z_s_-ZUDK^Mv5d=X^FL^GG2Jq3N{flI^_v3dU+_8de!b;M1Q~X0ttuRT8(<+2d{qW7 z!2SB|%20bm+29-Rj6;@l=B4S?vmBkzKoAMJx@0u6wh;&td`*N)?>Ig8&yx}ZNQKds zi-NK~v)$FZ9fTIT0xh5lV~ewwgWI4PNq+=)-b`!i!lYmWBR^VMt+xG-Utm&-t%YN< z_fWJ6Q6RtsgfwqdZOi)k>iv8HL@540G%h1p@5?YBdEm>kgUX1&+XBkoYSPYKLazb#Ph=0%XKSu}R}_OGdds4xp&QzN;XHgNYO-+5Wo_x@@rwwB!&u=1{yb$~ zxwu>!lc9*bdSHR0^YjnzjhDb{xz~Ala`U1g(AX>ksZKrP@QVq-LE8WT9ffj+`<&kAK42HN0-Ig8!RIcV^k zxNW4utxoCgYJQD{b~i003A0M=#z6Bqog24yNsolB_Sxd__Sn@kdFzNTIuX_3ILvV1 zoUpTDGI0kGjM@#x<0(DeiLgNvgx>c@TY)QumrL+$Z0!wN(T5zpl|s1~yJ4qLan@10 z@yEiR?z>hXa-<@-5XwFe&&a&a0o+$U4btBO?FhCI6M@6y9u&;K;~9F&llhiOr&<&a zdHAn_9{FpOP@*=6uS^gErjKzZ4FT5w`M#U*tA$#hK7eQ1BseL=qYr>1pdYM0p;TFW zm7yjTh5gKKxS#vNX1xR+zh*^G(VHv0U3&?*smUhwCLtk`uMa+miD!>W;uiKwJwEO@ z_Ez2BK#4idik~f;Y2x6XZ6EY?vCfpgl)uwqhC1^Nr7|FXL^vRT>U6b@@ePOC`jKSi zb|;FQSRxQDzQ^U?$5LVN{u7twyGcGO{eB)&^JqNmV-rcbSac3Iu(T8_30g= zC&LqU+lQSUG7tfxgq_G+@%X2pQDQwnsR9>Z*A0S~ofC(GR$Tp2In3rhT6W%v1Yfl; z1WppxY?!sgQHv{w`GddcT-8s0^gIW;&+Dv={RnI^9T@Gs1iuq4(~{ChKu|c(CoD^U zfi)tr2#u*nY@mXwTvg1r{K``tutrtGgBmH>hSB)Oyi*C38n}hO4r#dg>h!7v{9+21 zA6~fzl0d4m1>W?%KV8^&0b*pO%`7xPZUeBn6YTGJfKBf&s})ZO4Ec3>GtMj2Y;`4b zZwa@B&cFMf)0sDlew5*^2ygC5X8Ge}k!0pumDleSa$!|t14deN##q38XH&^vr0r-L zh>aHX4zB;uXwXOipsLpnX9jF;Y-nN{A?;;5gw^cZ70_uDQ(rw>+!E=l@|dOrM?Fgi z@ZTpZ-4Oz=WF~rvN6;?sqLg7b+#<-%-Uak)1^NA^iMvG_=%8slixQPH>Fr!$mP`ve zXY@tpj{99zIl;wlI3?Sb3tBj|{YNL4awxQ}5OE_BjTAA&v8p3B1Wt$f8?aB*v52X?3_ znK`FATX5ht?d~Bh%ROw-37FjdY4>@5oA4BeH_R66@H08BCxLG>x=oxnJgSVgR;?5# zR$6fvBFNfzwWziU22OO^VxHMMX^a8*cNX#%pFZ-5E^H#nTFKYg`HL~w=<3DOI9@0d zHzpfIic$o(eZ`&zdWlT$Ti)pMAt8T-t`9sJ{XhW<1;n1 z6fC5(ApAw8%0KzQZym8D=~cwZ95F~h>FbP)N-n82vFys`StiJb8hW4{le|*H>x!T>+bR?Z?!Xr0%yV>mQy$vQ$~~2%14(+a}i#aq~Dc# z2Pr9hH?h2Zvp2d^Mg+g?FK?qpA3*WGWG|00pFGl{im-83f9Z6yYf{{&I$ixiS>p7k zo>Q~o4Xo0Uhu?R1ML7{1!RIy0)qk62m$Ce>`~bRWMNrtrnD9vuwdx2@+Vo%R#uH#R zlQC>s_v&q{c1oD`;nHSx4z}sFwX^52^p;hfj2aKreWTHO_VZP_IjN_q{ASv{z3u^5 zc11?zBkVQ-1@+ZcKM>iB9kE#jF(gV67P(V$q8spG)kL@ANL82OVW;=FlkHiZ=U35F z_~`pJFxev(QNQF>#!XIM8hZgU-A>ke-FU`y4`fK_oj0@F?g3m4UEQU}759Kd_+nKt zu0eBUH-HvGH1w)qlMndcTdD+z-Giz79_v63!_pd!JhPk%TXuMJf+Bx-58)h1Z?X_9 zb!*+Xt@He`FLia@8u81%NSIU>*+p^Q%--0aqJ-8uuh(}{zQK0vvfI`_aLHo5YNY?vnS!>&P{5O6D^q=iXagzX~buNlfSnS3bv z4YC8vqbjXuEDc*v*){Ad;j(1ZFi?`oGaMNjDf5O%nmfMbcNHyNFR}-*7zk3z6)OlI z*GN0``m4?`NQ;A-FK_R2>hX*vyfq^MLpk;MVH;1~O13H}#S_Uk@Z?UqZUI(%$=v*eHSAs4u~Zj)CShT7RZ_efQ1Qe7^skj-X+dO?8QT$g4_vU8E*y$)z-#2k}- z^?LqA?h3&f*Wan229<9zX%;pH`}(J(6v9AvWar#GPGimni>sv}0pwJh7}CM?X)L!pD%+u5e1w ztoVRKgtE2`DK*4@okj7Oz1j?2O;Ss0P*dj(hW330{bR^<)|GC@OP>OBGVLl@e1_dc z$sS|jtBGTvQ&h(b^R=^0z<;NgsW8+o2~KI9Y`rN&Of_0OEkD*C z>{jm)T$6*!r{S6yE$GkKAX!yGddNIdnYLpNm@oHkc@n z_kMb7UjOJ(i!Jw}0UY!2Xdob+w2_&}(>NC7_+74)yq|2}w!(IpF2eBu+>mx1Ur*V4 z0qDjGUPuH8jQ$yb07-XiQ+e(EEb2Xti`D)%;uR8M0zDlmwEK)}BE<}LVS&j|RSDsG zuNFs*bUbvMb*+Edd1o8h>5|@Y@-~&l+1vW`seSfzLWu+7mz&(L!bu?v<;nMc58xA4 z27v!(zc4LUEVQ~K5^E+6@9TB+Q;(k=;{DpmU6mFw0OBP8A>lmTuiS0IxBV=~4!(n$ zH65zbSCy)3B={UC`r{}Z&`drBy4DA{O7-+ZvJB#!cmA#lld>;#_-oLbXJ_S zIi!c~x}*ZvwIFJ)>joWjm+L$;mJ@(4W3)z!ClQF{T(8v~k61K{zbF6;9o&~^);I(N zK0F#L)BZQS8*@L%Ni^>{!g-a?v;ZdN!5$-+b73-xUtLj=@};Ki&xq@x2>vMU5R{$- zNKznc`Ue*%4Y#J~OaOx-se|7ZTbiR6$4Yyx01e2dUuVFKxd{n8($pqc%1zm;10@sK z+=i}?!#B=QB!y;c59J+zYfLM<8t$0uQ^kXa$L zLqE#H&3qGN+vEuzF(%tlY2=HkZpNl7|G z;!CAR!ej9|XoigB6UcB~dmWKPbLv+kV)m6WOt;qnA$nfO18v&<67 zoz^fxl>=TU(vv^mteB1J0o$-X)GU3(ZhybLejKY1+)LKk%JY77NtuqZ;r$u!PhruRI1>Tvyfc|eQ zI4MEFV*WQz1C0Ox@Y|jKuZGC~P9Od+=)IAyqq~ivu7SRjp_z@X!+&u1*=oDL93yz2 zS6zEcT`TG3glbQQJwVG&Zj^pMl(j-(sU#U;LXd=r!++16T#5~5o#q?(vR8}SuDf5v z)g3$Pa64-jy=GPC1Do2HT};n&lBu@AwJ#fMu*&kZya`9RB;~_zT^LTs$@2vu`+GL^ zVGgcK%c@|RTTeR97aA)d;gRDK_xRN**J00ey#LsFJ0trdhYKu%9ZO4vP23NfHR%9+ zce>mMf5r!QFlBE1c>eTgbv_Mt z)idUy=YdcDW!qqB)_M$GSC=rea>@H7{_>B1V8lF_MOJcE0p%I03 zAzTQj-^ckaBqt9@u%^fmF)WE;G@R4d&!Ky!sJO%ruJVio^adtQ&oSaEB6{yuM4;ec zzbuGS>(MK_;@sz14Z!l8$0H)b<32Jpu^29|iE6gDcf7r zEe-{lQ<29T+uf&bx6jQy6^7QB72O3U+rV7PzI}E4QwGfegy4QgM>^p@_qj^~m^d|> zxkighdByOW*qf{XuF^*K2^SB4-U3`EqhBWi`Gfqx4zWg#Y){kNUjYtH|1>-&5m|0s zKHpNI@|{MO#XBjw*HpEy+Vkn?v5*dXYJau@jrmXS++u`SVajV>0{ZWn2O>9Q=ANNF z13s-ZY_7aJRlmfBr+mt*=wyS@oJC(g%GP?edr<-86@^t0uywgZx}gOp=1p1Mgq*x$m>0)Rk%Q zDD^}NTEB~Y^v1!(@4+NQ+<4r>4tE;83Xm0kh`;;rbEU|yvQ065NK@`x&HWI|BVcfQ zpc~N2{HH3%=n73>_D=dK?iPr3>5MbSD{}+5t@s*?MGsLuX{~LJ5!oz4y(30kAxMji z2Vh-k3SMiKlvz;vxgpPqComXYP?4~K+mZ!Ld^rzIcwl0X&UN48pJG8LjjqW%3o6fR;x1ZqJ>U zC-jD}9y)|SZ~uHF-23#-?15A%9c6vw!f;D@7zjOo$7^@R);2;fOjBm{H24?vmAr`5 zJ2hB09EDAs#jVXL&X!pB^gpK!KM#8~SYA@_N`Tr4CEBCHx5|gO%924*Z4!X1meM*1 z(Ool+OMwnKCCd%bQ5;&1xOp+n!~WDD^%8OwXJDU7$frFYIaYASqOhFeDGgKnDP!sW z{}_A6AlSYL{)>wr$(C)pfq_-tKeW?!IxKi1qwibH=BfFjaEW?DiC{#%!N2Tw)t5RT25eUAtoT0uHJrLMxy8x=U?`kgQlQJb^&8#@e$D=`wr%Zq&M_Dup#&?#?blxTkMnfQGkKRVr!A zPnowQ&Az?hLy*n3)#V*%_kw2OSPC1gXSZqPrTq5+Bk8N~%kvci!X6)3;9$ zpK)L~FQ7t|I8_yYVG6N2BKK=ESG0aQlXh)gd31&t>BC<|E~JB&{m41Q8`on|wMgvy z_8ztl*c76i+%Qqc^fAG7?HE`u6U`&tZz(YqW_g^1$io>;-yc(DKcz1v(*2E+s`RNk zyuaG~*P$+FS6e7g+`B9K%lBF)?RMDAHHrEFbckx({5qwV$}>m`suw@b*lseqx^RY3 z-M?J@^|o<39Oi%DvPphV#+%RyC+TSDo)V5t4Vg8*;QgmxKw8zg@cQu!%|DcM`v2d4 zLC?y>(bmNJ$30Z4YyG$fWS{5i9eMpkMv-tPeYoCVKa-vGdzJufns|@_1*95el=GM) z8WN!2uemxT3JTa|G5J;qUEHg-TW2b4u?XUk_m>iNSGe}@Y}0oEC6KP$>vt@o2j^AhDR|Y5E;-?1Njov;%Izf+m<^d2diSWv zloRvRQ`xvWwn@*AN??tR=ts7D$x#7_SOioWML`j5+{I!jgbhI@zOiO@FdEsdDpP5Y zfoM@kjAyu{IFMqL@>`}BD7oyS+ZjWTQY3ywDR3p4ZXNjQ`M!N_1_?>HRm|Woh#t7I z5Jb#y_Ge3S{~?YOy#E@I=lHGKV0-AzNIz0R8?x+?maDkc{4jP_jK@^`kocJ@IaA9? ztsi!zE`4*vZUCwl*CLom{2e`xHP$XF?3le{qMeW7eJeZ0fsnUKCZDMul4Q|k1zzvg9i%te080{5qIA~_zc6dKF1|0BGv9ZJXHVao&u}JHtn22 z<+<7P(c z)i}l)zgtaUNYdlAubmUPo{X0hlL?KC*rfOlCZ^o(UTz~?D7n_{j2kcHc^QG@YXU?i z%ubT>IWbP{21nAGKcE<27|sEtaJ@89Wf*afPa7DZX4V@4ZE(xmF_PIa^|U~Bid~0q z*fZ@O4@ei*9x3GThzEB0JtoHr?0tI2I9FXh`d*A0eSK>9D$KZ#ViU%-aVo~z)RgyF zAwu^pH9{rP*sL-(Em51fGde1Q47=3xbYL#nu?*Kr8)nnpGRy@n{On)z2|NzsE`^7_ zd7xPy>e)z6e6q0I97TvK4cs=E$6l6Y0&ksud%@VWK+|4YbF_-1Ydkx77#CoSM;hI3 zwpXc~Unm8X$|BApnw#(C=$5(7hr1poIi63qLCqe;XU6FSj>8X^XWV~{#x2EDtmUi{ zvof?NaAv}2Z=K{TW{(wf2j-Zgs?69xAAk>l;KlrK4ZOaNP>DK*x_K3gQ%;P@%Npj1B4hA*Wr9NmLF@ zl+ZM4S;kT00*GqqnBJi4D*sW}z>g%Hc~|@%_gDL{u$yYTlQ(%K_0Fmm-`U|k1kxAf z&4@gDy*w<}b~!e)uv+<`zQj5Fb{ymTejSDqdnz7WVaKL98}${-TgYxe&^y-yueL@N zc8%gO)aH|IR#Tx2N~Q9iL2~XCvUgda;DzdnY*m~~o|0(w0zJk%W}i+nSndQXZiV)C zr?z2wPTs5Cj{IljcCCTVgbNM;5cETr$NS$b`TvIND>Y=C)|*f_&((6qLYz%_xx6>c zMN=*AYb7^j)4QnGU_s(?1{fgf04$_FA9>IKC;|#DUK-Pgn|dMxox15#Ix47sSe}Zb z263fj4c(%Oq9%z=!85ZlY@-6IWyY;2G2O+9Too@|ZO0l^38dA;bm+?${e6-mZ^Q{L zV3F6s@p>9|57;ZqvN%0PS(hd+3YWw|VU2-_&!g2z*PCgM<_azX=OF!N zt=ehX!h1QCsox}urX%#6acHl488ODHR-`e}e_)KEgD;z!CE>i1ZJR;w80=sxIKYSI zk+Gcnu_lpknf$mFP+k&kW)E2YSgDW*TA?pp=wiK{R~DNbSk#-enwHMI>i+63T`?-W znydviX!lM&7d>o8_1eA-`96?uS~oW!=EVy#E=y!g_DqsmBF{?S&aF_#xB}sWFwuaz zbhe-HcOkX8;U`EAN7RO@*Dy}?tAp#)RPJw~E?*d_I|>W)=H$ovuxK1v>)Y{1tUf1{ zi^?&L1%1}?l zXe)3L5Z;A8s*uB;=`P)U!_aapRgn{=F2IUixuJJ9 zNwBvIL}Z~1cA8xCGXK{1@k zkDi&>5^n41k!8|~Y+S6Pf-*3FW+0SGSaN(T(YPaE5QuHxc-qtG#ti@WfQxe(fQgc* z&^%}k=ogM^H;M{=Yb@7!%3ogwlGxCEGyBIGw2EgCuRl(y_;4rbvG4w9X?{Etxmy;& z%!w*Sa{6tBcl@vmGFj$sRHJGfULV@&8PG&gj@#077x(eGooCq+aJGE^)LIOK@LRdzr&yZUnKeu9Z zkFqf2$~{fv{B7F+jV5Fh7VM^N{JCQVETH@gHaY_>iFxC~*jlVW{ZGXB%(gJz330mq zD@IDqXBvSHIgk%03s?h%rBo4x!sWes4@(NzPlLZ;sWf~1MCq7IF;|pmgpzosN^Ht$ z<6cVGnzP_ORPxsjog{T2Ks1Uib};)T>*kLd>rRQxI0}(SJ_d`BoP__&%kzTn+m2I= z0qfqBtS~eLN>qi|L4S!{rp4UOHNIJ!-rGPe-;t7;>813#`lJ*U{8JYkqn45SNWIbp zj~l{KB@E#?30O}Tz2+JGq&x$EolB$_-e~BNIEpphUfW`*z6BjT;}otDB;)*UX*nFW zG3I25!tBX1K7+2Lbn4BrH3qZP=Nc8!sUcEpdLC3Fhp0lD^d~NUAmT zz6^2I$P7n;+vC{lkKX8+QeDh?2plZmyel7`A|tx}CsdVP;w(@m zoSO4~jVfoPSV4rUd2?sN90X`2C6aWnZPiNHB81X!>sH>d1;c^pKW&P)LbSO4Y)+EG zu>lYhAhHM=+$|7l7jLe=pQuj4v!wc$VGF^~>XDOuAIuv#Yy?&8gY4NHnS^UOMK08A zu8mtfNSv#CA0-sw!=3RX@r&N9)? zVlh^egP5MqAPS?a-ye5QDTNx@-$56=Jl85wYtYkUR*PZqr7M{ zYWn1JyR^T!cwVy|O_z}AN_+lTmq#k02UjJud2G_JY}Yu7x_sWYi|IUhw|wy&d5g9Y zkkL#wv$QtcZ$1b-rok`Eiefb&ciq}2wxr)yEo(+r88NutJ zv^vpaLN)6_%B03tja&)3xaZY^*m5nwnQf|zY2H7${hvYN1IT46z)$OP?N2k_{}jsn zpUK1jgckoRH0)HBirr_0{^@0gujeN)!#x866nlI^Q?Oo2ayI{%Q?#2 zE%#kY&l7{J69qJ%x&=$G2n`641M_MRIkUG325#!5#aCtMT)h|IQAz8Eb|@$8l*r1g z%?FHYAotF*K90C7g@IO0VDLwNyn4?!65*mJ&Yix zyFf}dkP})L@c3lT<+_vpcSd7urmb^iLmi0{Co+?2c+Zq)N`rO^Ql_>^n`xYc)yxAI zl_j34azBXJ`U-WA39Jv`1eE+BezA+Km8Cl1d$W_CUs4+7<0B2>Q_5DiwQ#!S`X5Tb z-+M8Ua9_Q)f-hF5AFCL z%e{@Q##!Gw;~k*zRV6kbUpRmKweH?#Hw7Pp-1I&B=m8F~IR-*b1d3#Z8LSe~4u9yh zuNUxDvd)vUxadN*ThXtz)~;wdfigW#l(L)R(ZWah(l?<=^|EhIARuEDk7X6gm!%i# z-2||yeyt(@o7rZ{??r1(dmWC&mh}zHCfSiRL=s951OUpP2kn1`Cmuh=BvZP7Yv&!Gpc$c~R3Mk59vh#KqC47?q#B!6nNb=E3Oi0r zR6Zd`Mm{Wd00j8Y#YwE&j{lXt68U-lSEJj1FK+AV?EddNNd4D><5c4^)MK(ViX;EM z;mE`|^{!+;WnxM^qMq(wb$I|}NorL9hSq=Vl*C&yz5EkpL7@L%cCvM~F>w4zkNv;7 zF?Lj+CSMS>-X6s>@=IRO5daZ0vp<8(SPiSazby1;5KWwDA~oY;&QU+F-!p~Ij@ajW zFL))me}2zU`Q&L^)r}~@w23o`N-{;3W+ELdi=sHfG&)VHtO&N1!Ukh=7cPy6gH>Z0 ztcfUfW5E+nRJOkD-^nXurumrZvSd@ttDHP`zE>(HGV|5tlr|P4j=;QfQ1cfi339!u zm~kkzrGM$8SJFgD2$>QS+Bgkm)Y!Wdg*`Y&jWr;bN2J5mBNVmBqx8hJlxK_NU&U0q76f{;}| z&59|*;A%fFlaNpB_VkL#jps)KGevJgOShJa9CV*%2`61!rOU~lHN?SV?b;`W*Mh68 zD;dSuUoOf2pd>M_Hvp`d^JE^C!LP+b?mE6|BabdR|JtO~uP98MVz~s}Q4hDORkQ`p zmYuRyMe#D+NiN(Xuk|zQ!w(LVQW+E>^yxtLErbGB$vqP(rAuH^Ixeuj_0o#VK=1z+AXXj1RFMU7n5o}oK+?I^*r!Mu#gc^6%xF}9Ev z({G(G45VClAw@{=XeLy%Mr&1cF+CINrj<|$EOfP8r*S!Y`^FtNzN8D4e4ln$tO`JQ za6DcBU?qlouUz-lIXrXfp0Rzfa~qFC&zW-ons@0IJ^BoR8*~*RnEfpGezq#3D`U8< z3jrESJWHJIc3x^xDIbd8(t-vmQqbN0`7>E}UP7gLJ0-gz`#VD-x8M z%k+?ZPUGDGVpQb6<_x=9hIbw6ccY&z^h0z%=0p%$Uk&Nw7Nht15g5Z)ODq@x-Uo>zn8MPiL^cEU-PwaF z4iN;3BY=C0t7A->UA9xx>lP`6^KY4<0127yQyr5(*Cp$OB>*d7_A3|>dGXWoNSGq5FA_49jfmKoz^LX$ zbd?7X4JNpVFOSqi9*Zo2aiiW^E@haZigwM5{H2$cjS@wW>t>Z`Bk1)`YOXv$CP zCn=4Kp8a>F&Y7LZbU}Blx80duT&Co-0y~I;#J<2fl8zn<32L z`a5cXUuN=eG)?~asu)kGe7_snjF)>iu69_Qa-(T*T3n8!3c1+8(|8zSs*D=7RBs2Yu5YfK6!NFdzK>ICcri~ZdJ1QJ@MSKiaQZ! z_1#Dg6BCla+vr{N=Q5WHO7{WJA~ii}O*>c!UioQ5kXzFK^7n5S_iFD30wwtE!w(wvyQa6~dXOh#0ZUg?S0i;1_Bk-~n3 z3dE7Yd(gAZK_5qkTT33&%Ob}9aRZ|WoMjb?Usy6P*&~mR4~=At%|zpw2s>Xj(gHWe z3Q(D<-1_cu&$)?mI!aajHj7~p$k=I`oWArDARl!bm{~Z@_v6BVooMAU2r$Bczy=|Q z6RQmJqi{x>maEx(8A;8lMQBNZb0e9nfn2>Mf`&BHIk&#tEVZdEQNDY5)pIZdx5n*%dyco|C{kdeP1@U0QUiT!!jnf7ZJs*?`BzV6=>i1z zc&6p}5WOX6R0cGht!FIj%Btq?V20Z1Dw}s^j{So0T>@c0U>j#iM~LH-EMVm}$^LZl zc@7qrVd`+X;fhnFpS1j$yAcSTBm;BKdyrYj(~d;-{a@2ib_lOKTeHch$NX4uQT9{T zkF`G`!sxcXN$T(Wwd=(l)}@Y4(Vo7!MbjeXEXEj|UF7Niz# zGLz{=S-6Lwd{DAv-WT}aZ>#o9zJ%A$(O~Tr(VzO@8!r~H-uC zN(5JuB7+AI4kx@SpQ(*bjh$^B+7``QH1iYIbauY&uct?shnu^w)-VoGS5JS*d!ZO* z-N(cwLc&0)nmBFs3@~XD>BegKMe6R(N153}lCz=VH%>s)X6qL8z!&+@Vz;Wa+66O> z1Hoi<_MutP$aD*jV~yj8tYqb8@{I6o?ZAXteR1f{e(giZ zotiz(<1(e3a^ZXm19pX`W>JA_28R6m=+S=>hJFwiz^{M({rqr#9s_$5cRKt3hF(7* zqLazLV=th8psCkICZyV*m)`zAUbZo?u>G$|FDx-quzmE8h--me*)nDJ}6_uAJ90NAR{$E4<9mpS&iP7OiK-NOX`mBcraL%l$cvB>4AbLp^{4ZjFQNb-i9)V)y`GY#gHy9 zkSk9@b{<9Zs)>v$8l`HXhl;j@2#jE^2w`_EA|fA{Vis(0pc1TgIG#^JU`*XEiRh6k z*OxL9V1}aCPK$G`z+v*DztF z{v$yrthfsR_i~H1El0X(MN#B(3@8ML>F((*A|oIk{=m$NqXB|&92q_Fln@%Zl2*!Y zGb^Xc6e9&nc{ziNE`BZSej6EGdXnAGw59u?_oGE`7H^8K{9){G`XyKPHx!=IP0ugE3;Qrq=5<&{jl<9lR(=$L3&7-_tHWQ_M! zsoTgT^tlnCR4Y*&->dOO$|*(ee$fMOxkvgisV)GoQ{7YNVXm66sIEwO~Wb>rDAmthW7Tdbr zxbehUvrKZezmcTO-5HB!cVIYpqDe+H)NdH9cK+Cro{;HIYuptZUJHDjL$n9NfHq!G ziVu8S=R_FJk!=&)xg`M2=Z6*A`yyP&GLR<<7`EFQTd`8Quq!CDGX|70%Cae`|~EzU8LYYLI8ItOnvXUlO9#sdV@^ke%jG?`h)$5CKxD~%duWsgv)2Mah(@Iol(kSINF4AR7JAE!+`C!oc!a27$?W{CFH&}d{G{8QB zp>Q<0X4pLtwt3xO?@=G7%47c(yD`e!=A0cx<#Bh)+^FOGRLZ9hOE|q!gw7KO_pgyj ze@dWn_=980$Tcy00nUl@!DSuNMD{Y}hVAuO)C%g<@Tb2CT-|X4UQ2NXQfn*+qN$Egh>T)%gEZlrB_z&n^wumzTNjee(@EGzS*?I0z zz9MxpDTC-Fk|aJ=KhrfQAxnYTsD^`GXqpR)N&)Tj^n;h4WfsMrB>4By;1wH=`t8`V zy(@YNR#FKFSUr#G-t9!Zru^_mW#~|*e)MV+)NTm$oO(*{rhlXYCi_xzg!T{O;Y;9d ze2pHLZZ6P$FB7g)dh9{LIrIv7hSIR%#nDCF=wS@&1j9AGy0ii*&12%r+Q%)(etH2* zLb^O$x{z*ETX$aB0dSDIE)acYT1Py-WMc4fN<}4;|2hD_@~8tvlxRUxBC5I{h+HJm z#7nYA`A|k)UJ9yRkzDYr^#r0YmTJ@pwk(01(dJAw^iWDN>cI_LpGT&$`bWny$Ho61 zjQdhe0K6i-*fg{k=f2l_d! z);&@C$4$`&!*!}w@P-nUrneBdzt81@a|ww?Np&|7n|)9<}IAGU?vF;)T;JvgQJgtdp=R_yujo7&>0qFdcf zf_T-Ic04-|7z7Wev(~FW0Xp8s<3|*{H1vS^aDc5ae%|mq&x@NsjQ$h6+kUY2)2(sm z6fyqnqvIFaf2Pl1$J$!xe)LU^2>*T9-oIlwTUXbAiJDewXgK}MjrU3N0gzvrAB#s9;Pn-%z0jHOT0Vc;pf!)B0)eYL+J@L%H(^Yu+%z%HT&9v0 zZjxG@*A=&Sw_Qv)a&;Av6_(GLCYE@?lE`yr=FAibq?NzdEYV`Np2+lhuhA zy*;rdEs7WVBQ&FJ)+l*66`laMe|lmK&h$#mMFu-H90->%YR{e7i6=YMucRI?dU)vu z>)^`hAs1S5rf5Qy<~CYA%N<7?q8+g+ZBmvs86ZhIE zppWY?2xizA8qgKx_*lG@^EEftI@I5Q>JcweDh#s&$u6qt9Hfc)mDnsgQV^df6nZ&x zhir1ygMCAC`-t@p&OJ?L7lL&A#?+Oq4Lk7jRm>=%nY`^vInOX2+43jXxQsL=jk4mF zsxH~9U^N)gfw7V^=7jZr#ZLp}Mh@aeKY7<3bTDzn`U!F9vV93Pu;qq^U@gtL#4I-% zp^+xk>Y&E(qf?Ah+jSYtJE|XBEk|+glFE{S>1%+tuvN6Tit1(%ic%xhC9gnZoE<2HV+6Bra|b*yHj{K{yV7Oq0S2qA~`CeB0kgq!*hg`;1uwBt4mTT zH4fHH4LGXc3#<~|XJ9|BKF+TlNj78?tZ`J4nX@x*=1PHe=E2z`L!7WGYg7SmD7?&2@S<#a3%Lg;{v7|%Lg)#m*|!J zY8Ux__d?(bR_I}2p*i86DMn_dCLfM;a`3f9qgsB8gIE4BsY5zX1%&7u|8B$QyZ-$3 zZwM@&*Ke3VPltip6;`9Njls6eFs>LrhB%WeU-sgaS3PA_%bP>%$AFezV*=+-9OsKk zhm=E!tVv*Kc%pweUFhlV5bFZ5ru~*MM3`YmgOI3ygI1u_q#Q!umiWc7!9y{?09jKT ztDM@44ZAWu{L1i@oe8OML7l0JBoQ_)x?9KBz!9M8^y%!Z3Cjs>P2 zJ`Q3&35+J%Bj?>3$RJg-p!q!i2EvsEysV_y@!S1Mg;Qp%YPTShzk)H1vN{hZz!X- zLkUWP7@i?GS*3JO?%|v1^P4Aw>~-%6iI4Yw-_6K*yREJ6w`kSFodFRY89x{$KsQ21 zrn0_YsRGF2*|tGp84=I-oL>{4MZa=MaJjINxv`{WVB8I+|_Wc`u+mLiD9 z$Q4JjI7;Sfi#^8gFmshCu|+TQhVngpz`(&(mMM=dq1Zo)X#D9ZyU{xe?o@mTOzN;j zx4Kg9(4|3!b=44|xEa9Kk-d-5NK|Z7Z7(Hs=$!A;jx&b$B5o#Jgk+p>q!YN0>4k?o z()LQF=DE2r2uFeN>;%vyIG!`5A!iE14^gHD6po%%^8JdW zhwhS{zCNRWsfFQ7m~b3CS%b7E?%0(5Rpz6Qmj~1_0O`~Q5|^mRlsUC-0#>T*Azj&C z(_ZxkLZL+#QIOp4~*6Ut{oJCRiAa+M{eC))#tWDfT z6>T4rhUqCj!8aN~7l`=tW3iNAIp7PCE3HUR@^=u`WSDRn;QiKws zT*@F<#;iVpJP;bK1k z==WRAITH!62P8DNKERv0(F!?KdRN|O&Olg&NNb3a)wr<<5|i4!!JV-}1<=U9RWlET z$GR<`Z{`97t&w~Hx+V8KK86L&yEX_UaBOKIt>I?va^zui?NDiXFRu1pk>t;OamsF! z`a4bZ&xm~a*~AMFD0y~diU3<;h`TSeK!5q@qtf#|qH`j|Oht$;_0PaJEl(xM9h63$ zNGwX;W&3c#*%tH zBb~-bxZ7j)f)Tb?u4T68npLMs#urd^;#xa0mS=T)58^L1Sly^iR?|@yeb!Wixdw2J888n| z?gU_O*6_)OfCdhp$hFYiKy4kv1_>3ctix_7Mo$Fzoa1HP6)@8;?p_4^!dwyhMtA$Y zXBHWWy8C#S!<2sR(I!082yYTMb#4}g^Agw*d&*T**pB9xV&D%7!|Ac>Yt(+O1kb@U zr5k#1Op-c7tBc~f@V9(hYahS4dB$eFi8koW_?Jya^T_FWQ`IUtSFH4At5LK>zwP|- zgoPayzff6#c&Ku4c!+mL1j8in?AnfS6!Lz1=L-YID z(Qc|+qf?IF0~)rI5ZoP@6j}Xy!=bmEp^M@Kw~%F|ENnwwA=>FqEu8m2mG;Z$^WQQj zi+8jykAk~Bn&jN*usH4zo#3$EI>D3Pen4Qbi^=#8bW$4$Pix=*n8sRM&)5q2(`uph zgS7uwYvsSA?Egvw{=n=+RfYfRx%+|H<=o1B+N6V9kna3TuEDdcqwx)N0{WdT<7^Ezn{gY?ooc(6+nv3N1*iL~W-o*A55UnR7Eu?u7ub zW8PFPP>k<`8mihuBNWcD&nge%KDLyHSm8)jLhl)YTs0#+qagK6^8kFsFsVG z)-y|dbR{?)(^L_V&a(OV7&Hugin4BUM>dGcGYhXU1JZ-din_n$0Ev^zR6KMpri4S4 zHVImcIa;D9f|2`W1I2RSIz9lv3$JFZjFqEU+Mu_nAJYS)W^z=QFR`zMlljyz8f*{uJnHXgVZve*-s})B&6&x7=z3z)gP;#G;+tNwkVdq zR$h`MF)8GdI(rpg3DgDK+=%;)jz&LNw_xQ2Q*PiS!VLX8*DN1JgXrwT?lBBhV!!vW0UuExo!^?n~NoR@a8gr(DdAV!K(ZuKj^cihn zp1-O$hZ#07-q>I}RAD_STgMdRznIfF-J8`JASK8Xfkn>239=BNe^*}$E|oa|1847! z7jmaMCYjbit(NsD+vdl?_-Zhd!W?`WGCximVMoxo#vJW2Af=oJDG9m?K}H37g0-r3 z*g=Uyb~z50e7(XFRXK+xmFIN4pvi=02mE05nD54W7aSfwG>>2@h8k>IE%$ z7GYITJBd{-uBeiBGRwAN310(jIi)a9_n)=UTc+t@BTnO{eni4x{cbLRM@PsYk^ZdQ z+B3Ba_^hJ%mwwv~;kRZK>~PWBUF6rOJk|B$Tf=~2jlCWoJ)L{5GON$ZEpLY$J`RG= zTu0_cH8Vr$!_Lr`AnOQ{7qLxf%?hCHn|N)AM|aB%^%*H+{LvTd<1sPOvygd&x3(XH zz4w~~ys>4Dc^=2?)ZaL|mw6iDHuViSY z$=)#f-MBGuwK?Mt8@d6=!8%vZaTFoI&F?ttqNwJ62kkwR!k*H7u;a)4CLRlm{|!ot zF=3D$JGWZAMcXi?I$7!iPK#C#Nb`kA z>Yo-=Cm=ZfUrG0W2?G3+=JQV?(SOr?l>Q%jkGKT2#DW4j`Iy}PuYc5_Ag*W5s(u!H z`M)pvZzU+MpArTZ5q5bn>n{zswiUo0#GBWDX& z17{06+aDLysLU0)K@VTm*==YKxo%)|=Z6f2HR2$VXg^uR7&%*c(8#WV7~iq+^&X|$ z)L$r`V9C7A-6e-95vYKakqCcr(GrjPQjq9yV@}Q##d}k~4#QiViG#MW>DX6&QnZ6q zYfZ5@0be`;=V)|<3)H@;0dKWSrKlZVT&j1-?AD$~J+9+`-5Q+2hbRG3zat8&X4sNO z^QRCDx;`pxUA>L!Z(r4$XwKZNk-q1lrO+!|KbWv3p*~rzSM@lC555iB*@kiH5`+Nge<$$l}EUBxvF3~LmxCJ~H288C4Yk!c_c~KOu{9RK{3z0ib$T`|2+la@U)ytI{uj5t{8iLY zyX3q^RH<+_Gs4KAfWyO~!(8+iW4A`+kfbXI)4bkfl^Ml$xMfDy(G7o64|q1Q-|_Cg z1Y~5xpJ*2F{NZX8{Dg2C6xs%mKalx?XKkPTL|VJec+1{vu$BuZTkqTImDReal;ls< zdOy4rLA(BR7r(GIzr*}zS~93F$5isCVtVv*Yk%l=|ND(KG;lKcxvSBNEHMEav5baaf?=aJu0qv(UOUI0b|Cinr)EcLvv z${(%v$9#aZcy#9f=0oJfm4?B&0x!rHH-PrFSQlhcCrPtOlNR)Fbu*?bF9lSHc6)I=tb`ib$t480O2i8yhD7vu*aV;5QJ)Rfa2fcRNI>NEVB+er z+Cwcf-EdD?BEJp3;x!z<33J;5ZAeoLKk1)1`sh-wGgh6bsA+29W+6{{6s>%cIlpMu zi_h3hzp?k){pb0g#I#H7|5yu`AGG>^K7VTir=Rn`RG0cu6NTp;?hderjaOU@+Z-eW z{{vF+;9#DBymB#04=kC7B_Jp6toZ#t6(!DNE#k0dtzf+jmVgZ#iWk}+D^xkoZ6c6G zO!!A3ONCl8vg7YX5wr+JU$X)}nbkt7jqzV$$8waf()6mI=LxVhW6ZS*nbZW$!Syj{*(?TZ|jY*&l zQ7Gc~c|;RX@x3EuM1!Owxj@C~EcXkEq8XvMkoWRZcfUY9a@WVGQO%)IDSJdn_aYeW zA*fkeghDqWD#pf3WojBR0Z3?Kx&&y-+zUV~hkx41xGApK_ccbm2smI#dqL8La74;5 zD+Mg9d~`4|i zMj&+sA?nJaGlXt1LWY)LLWGS;B@_-}uD%N|SE^vK!VI~xgvK>67jfu-2dz44nEtft zq_T#sp;7DRP?U;>IF>gY~vASQ=J7ZJLz|$Y;e=8sEi3}i!g2-QeK8;Th5`3!6r2mC27?)xj6(4LGAkMz&^fK zjhZKXmyau)hGm~~G;%6xEaUDzJWWo`u!MS{rvn`qM7xmLF9w$Z8o1b*{D|wGDActI z&dCB(Ekq~3#(KU*H5wTUPlad*R3vq?DwSMrQuE}z=d+U*h`9QVwkB*m?k#-A@3|5I zv23S-58i`2HoRz_EH&&Te^}^i12>P**C4hwPL6nwt_^M73Bxip7S@0e52J(NFPc%F zUa+rg?EYf7eRAcLZ$5yYFpTYF3`OC$aK2>%pbB@s2x~7U@XK0*p#pq3?y+O7dHBYLDePVyoLy zRa3O1?cTqA(|ol^MO+PelY28KtE%{x-~b%`gvzt2Vsi&n5XU}Z39vO3j^LN$b&<#1 zV1+S$hS{uX_OTtiQi(5)7n6K8q0GO@S`ZMP?rU7^YLIiE*jAiq!JogKSph!THX~SD zz-cY@qfUqZx%8AIY7Y(N34$2V#PB9jck1jbrso~@5B1w{+wO4L{_J{J?~L1o;YHTw ztZN7JxS_l3`JDOPseUO9M?NjUxrd9+qW38OCzBW64K9ocU_gZ*o4FNe00Ur;x;;Ku zU(6i10I@Tb`B&oY=koxiY11py2hsTCh z|J;FVKjgdA(gSnJS8>bM1SOpM_4H=8W9k+p0c|(L8R}_Wr9A*H{4xi&9NlFEsF9K^ z>GlS}CW&nU*!h$n@_^+EkoK9zDBGf|Xs8nTh6kdGDp}qsJk#05n~&EOmDjx6oYb{Gr5#V- zYTTqzFJX9-pFU^LUDxh1|AVe?iqf@BmMq)0ZQHhO+r}>2)-LR_ZF84x+qO0J_pF(< zx=){*@9w?&a%Dal5gG9wMBI!^yYCrV2 zn>`6BrP}&aeib3B^2+Ar5=C>>$KT(nG}POTo#FJFz58j*^uu~dOKUf^6= zuuNT&SKAd6CjJGH;-W3B8RDww1OLb5ksBg(fC4(j=$`2NkT>a#d|=mr1q&AZdIgl* z6y6ZGDqZTE($REk`h~W#*t4xnvk~fE$1JiTMJ7pgQZp&iD!e&X1Ej8I~j4*e)_cCE*+5u+-9-jvB&7_%=MaMNJ2H0w>`KSi2O+z86`b6&fgQ zq*;ona$*yJ$casMdFi>Vs3BR6X>}ykOBi)vb)GWR!tJeMBci9=5-{th)31%eOJ)#3 zdg(}kLez)ahB2{1S0^G9+~UwH3kATz zuz(5VlSm&iE~A)|xOss^RVot(@2uT+ssS$8hMM|Hb=mTRto-#IhOJ~EtiDe9d8%rO zr$b+_j|I0dIZB3_)zhJRt&%94f+LEruSY8`Za9mn!dzR)!e__(u?x9htU^OH5OX+G z=pIsLjOIXQZVVb>s3_PA2Q>`$e+4y2{Yr{D2E6ePLjdxY+OdJLf*u7G-O!>EmkdhI zDv59k&H>xG9ef{xwCP0rqe5z_MI)d^E$bMg@TV|bAZZ{!8l^p3QyW9!-4t!{EMWN@ z>1WbFtnMjFl-YoQqSU}WeG%-Ph^82@UvYUym?kaT@AQ?+8TlsvhEpX8YA;aN!o%Z* zE^k@|_hi>+$deeWe|e341x8ZZ%cv<>RO1&v<3gWqw)#>XzUYNX%`Zh&uT}wIUC)uw zQ;p)y#apEgZTiQ`2prtae{Nz>n^i`al7ltx5P(rIGawwvZyBq*SCSh{?ByH9X;730 ztp5>w_WbSHNu$Fbj?gH#B8Sc8w;iSw&rh12;b-}n6k9Eat>%A(Ahd6XApiZzvpT(L z66Z#yy=D@pC(#TOB5`kG-h8Ot&?NR$sQol&oAP|<-AcSozxj**dP05W7k^KDjYIiWMpm@=NLS=(d`Lo|6A&{$aupp;G&f4c}q(l-(2r)RZ8@KFqX% zth~YGREdQh-@UDQnmbvYHyC9`(6M0o4FjT@p5^LaC_PA#eUuV&F*4wNt-^J>Rd+;mFlshs-QEg z3Bna_!x;bCHBvT9gu}N8;%Ba#?peF5nd!0eO6pwiSv%Jc__jrU|A?afUchN5)w`vC zdPCpwTmNg_d6S9TYwMOp^2NqE^HHM}n;xEG=)Xu;Y|*YVi!pih9Y-^^h^EcL+pbWR z{*s?^mSZvaw%L5gt{7^GubgLZ82U{5P*4F=;ZSu#j-GoAD-eMn1?F z3CXQy#yez_`eEyDZ!iLCbO+O$h(}5#yDHp4^>|1$HV@CD5~!nE^4N!(sMTdK31@~p9l1I_@)KN~v!muLyi~?0YYs7;z%RBwNG|ut z>x@+>ua^qNb2*$cgOTOIYmDFO?nHh&brC+?Hj!t6^M~jqnY6y6H5>>Qt2fwRNx&)L zq2xGzMdyjmH1aqObg zm?3d3U}0If&HCZ?k-691#~s(Lk5I*rb&|>xY6u4xn$kS(QU9W+q_*g+Rg)z|QRV2+L!eo{TU+REVG(r}i$@O#Y zC4S9F_{f(~d{qsUTn$|ZF}+qOJC`7x=^R(&m%=m@MjrI3W1UO%OK{b=vjMO{BGmy2 zzy=ojxLbYPyw=>SFa2{B#%ki_<8VoU*BkG=U)$WmKOX3Jbe4@Sx4ppHYj&y~zl_=J zZe@D+)!DRumlwF9KPGn{)VDNwOdeKSi9fqZsrE3J5l8P@o*O?HnslO=o)1D?KIY77 zpc}P7Z9&3){|9z-5Okf2@w0`m{b-l}hwAko?C76?Z|daoGxJ@Q`|Q>kV0hQ{1uekW zwb2@v0oxkaZ3pO9DItIc^R12RDI}Z%K0ZY>-8!3qVMc!ax=zS1oSKE8wT!XF81cXm zlcwvHFPX`E<7&*iNN@PlpBv_&4bgMNKwdMqPt|l{Lh!7+HN!h7qS>tF1wU<~HyiQ> zoA;`OCKO)pA=fD|BVE8UG?WNLLD|~lW0v^n;GfjG1r&)J-kcVzE;_GDUN}zz{QaQh z-U&w*@_PchraNx(PM*M#Zd}L^JZ;<{)S+zw&$R`3Fb1{2rDt}~>V?X+>S?a)Lt^H1 zcf%n^MP{l}8MWXHN3k9|-lji)&RzkInoifD?NiJAOVdyZ9ZZrILKAF+&awG0WO5I} z)6Dhk9AGbeC<%Fz3^fyb!U=D7+CB+Oc2WrG+sMmK4Nx8kVAFXtd6V1&Mai753M^5jhL5d&K^r?H-opp3QY{9X}E{waliUC ztSixY_B|KwG&qSZTN0^KSpQ8O4R%H{`N_^&TW6GIonpK2;fmW|8&M-6{NEp~vAPqc6^M<{Ax)TP!--;q* z?O_&LRR0oFw94~WF_Faa;ebgjkWGn@fv~reNaaUaMAge&4xOdYL;-Mb3`c9t5VU8c zV6FhYI$P1tOfR`8MraKnn*sj;WzB)(F+RB)Z-wH@)@uVkdp4ip*asD$unueE=O(q3 zTsyUK1BD0vUc)MS{+WIU4W>yy5t^5@x)ZYMpb{(R#HCf)xMlAYl5Id*3`wGy2;$ft z&Q6CH5fXoB8Q`+!$8c!~p2=~kciLemsdH_g5Y{GUgD(2V)cb*T{?q{xE5~~fChgJ- zzsl=P1xpJzPH#)?C2T``H6K-7KRsWH_b7wQi zj8D3a{b{+{WVy~S2wAV58JQQz9$a|}7HKhoNKe>^4wr03%9vMW2MO*WKw|lM_}n}7 zK*Ty-l03Jo3lS=2OO9Y%(g&`!S@}>I(CP@ruYCi}Cyl$n!y6|Oms0@zQTcVIe{(kJ z(QKkO*6aiR-!~Lh{kWHrAL#J-$Hw-*HY^h-dk1^he;Sspa-Y=?(#>1bW3+-}6INvj z-38(V37I!jtk7ISiZO_F9XWzUIBNTRU6Ei~W9VIf+*OjnF7I}2;UC*LL`T7dw_q>| z`m#&Hl^j*fzMYv|L0PN)WTO1z-r}F&K_2EI2MXl)RGD8K;$D-|VsxDveNKym(@>TdBrJ zbJ(ZvAIVOmv2s;?;;N9Pv=D;ShWgY8W(mFtwA5YxmQBk&^y?z+zFUZy575ZTvtk`3 zzy77O$muGeSj*9b?#L6I3gJ;>PhM13q~UOL;DDgq*j3QH=wNlY)}R41^UV#Q-H~5X z?=(Al;S_kE9FP2n^gbS0OWUH-=P7pqqrzqQo-?D!z9tB4j)@xm&8^)Qd5xrnfguJh@5X9CE} zNZ6rU_LUP@LeUd#?TPXC41Pa{DC@^FO%c^r?1Fbr(V1D`$cC&xCKLF2h!=EcW3sBu zFbsmd4eTsbZnj_Srs7Yv9(mKR%A*o4L6#HqcZclHh~#9&sHmkd#>_P)l$sV5QBP!N z>J0oYo{5f`~<$sPaUpX-#iUcMcL{g1B(Z#f{!JrJWv# z&83oymna+w9l@Qbe;qt;RP>U3q`eA0$QPA<2HUbD1oLGEEsow^a6YkCA?#!x;b0iu zO+$q1%SI^ndD>25-g~y4sH5z0`TDw%f|KE_ZQ)pYfsli-5JsZ@A}FQX2(eidc#REp z448|-)R%mD`3`JvX}*^YUQ zdkmDXjn;S&YuPryiRd(7AVX_a&%n@>}8iH~B{rZXj-bjo1K*O1qQ zLk0&YU0xPyKqlk6AsRK;(a)Kk**hT`^h_BjOwp3rv2_A0D2D@6l{K!dhZMU-9Wn%( zoYdle%EMKAMFLm75?6@Pd_lzhF+6!9<&Z%LqGV^vP?FrCo<$|1a2ET9{gKf7aLx#$W?s0 zL;-o2oh)g&rj>r%j;3lCVqxKw&sQM3RNlsdJdMS(|oY##knR!t^`{nb~VJ zLJ&C2VIJ^mU$yDpFK}7GE&(GTEGcauhs%Yf>@X9AXCX2hZm=get$>;l*=1i!jy1DR z2f&SVF9A(y2QVE^gbzPJj~#OFpUAa!*6eOS#I--!G_oyxZJa}&?jE2$S4V^GO*hW# zV;EJ}$*3G^O_1%fq?oJ^bZ@v;VDbWIJ*0@(DkB{@#IQ9o_A!%0Nc}2M<-MKYymGd& zdP>fVB76&oA{=$ebbCx0%G)A84VMR0H!`#~W@oRG#;EL(vQOBqhpK3swhgPV;O}S* zc_@h0W|o>NUOfkX>oFLsw0yUgvtny@2;Emx{w8hlm?Z_E1A!LXQDV#3ictoZ!khW2 zx|0sXWHK#|xb{ua&L5e3Glukz6h4TcyAaBtEs5KS2S6(UdojyLwTY1m2vz(5r;Bct z0=rLBcV>t263-tv^!tXgMbjh^_AVy`|~;*S>>Ey;b&(-S0 zwKZ~c>+;^Oh5QG!W8SE{ua4k>4R_pASJS&{Jeq4r03+{)`t2M~f#(AH4!(;0n1mYW zAD3py-u)rCJ~b0D{Rb}At~hK+y0_U>;qu^)Nu1B;ps7%rPXU8IK2h(UXr6%htrx_~ ztQ+66?d)0aq$Tp)qIFM40j)63rAQ^UK&xSwk#_(Utm>hB5myT$`!<#OYh zD7@^x?l5#9t+EdMVO7(*#lccJQ9>nv@e_8zc6rlVdGGq}E_r>zcu;+mNp4ytYb2zW zx@{o3$3>hI1QN5qlvhjVCt3ROmilaURY{!US;q;_lvyZ#R$kWD+qsU7)DMo9fQ@&6AQ z;6E=D8%sM=!+-h5sO$ghfHaj4beBhCOoe#rNdaVJptE@(o(ww_1`+`T%v3RgAOeY^ zy}sx3Dz;FR)M?$+M?!Y%n|t#!c2ZTjK~`DSs31zrt+I4o%<`#ZEEs7u%&1@h#}rLl zS0!YNJHT&Zkx6+gBFKswWjW?;;#sK5A2BpD(jE6*N1{*lArfCd*pT7R^uSK>Y<>qs z5WiFLA5dxNtva)Y)$-UPBKu^5yKDTK8e|?xwUby{%^E0L4Us*gWv5j^s#~KuxalSZ zVp~#=q1K5k!WrTaq54Ku*>R{DpfVe2QX<8fEeI2g$B+nqm>dyZeMnN##X9LHhJkk) z!An3b>%3rf6HN~jX^rY~Gi9!~f`~D8I}X332m6?X!TGSj&e!NnF*_Xl zOU=pB&;C}gvz<)PKDd5>cc9o$2bnPA zgoaWiCL&lj&(SGK`V~@&IF;6jvy_!-@H!G)5MT=;Kv=X(yd$se$XDXAr!UjukP9r< zC`Fvf;v}7VyU7G|grK!toLBwCZb%y&K;({)^A#}QJo?VHd=!?DNnaJH+NFquDRQ{v z8RO`aU5ajBoNdk_b-KQ{t6mR}mS8+c6QvyH^2j3iP=(#ZltP${B8+ky7SR*X9n}5Z z==#*sysC}~6=2GxS%Eq*7S6dH2n+HHK0|j%?LN$R^E&L1%hJ#B+=xg}oPI<40y7P6 zbwj0I1p#sT80+4M%wXMJO`z4lB_X@>I0gV*m$_BY1=xGx?uhA`+J4|sgtOpCJ0Q5Q zP@1w@>9MqQR*A){z|^9S+{rn()U6p2MQYSG#`Q)-pFK@c3(f-gg5WJGNCRM{hUaTP zLJdKGrYBEZYR99NB-P(tnVRnBu7uECdh3i+Bi9DIiKZ?Ne=8M4g7n4Dnkf@@(3Pv& zfJy-({g2}6K_^aG6LRfq3tf{8abYas*xzdXOM&YwW%ql7y;VA=@P%voO`ynI6*G@R zlB0f6L&kcF4qP3~yeM?;2Eky65g`ALbZ(1t(-2SM3_=|^u~S>@4{~O{enPHV-7z)_ zB(}C=I1qIIn1j2!LC!$6P$aJ>_)=HKNd9WFt@wPkfnYO3uyHYg{H1DRMd|@XHlK9M zX3C3yX;3*aD%su36+*vsTJzXEDMjh=5jW9_CC;j^|#5Z&@vpm!sd&|;n$9t_x z-3LUb1{3`6htvQT498qu60LWqxjySx+-nQ>Hb8(x8RNVREi1u^*9 z58oUOix_0I$?J`#lJhZL<@Pz>IuxGaI26fjhKyP!!;av`n?mOi9c@KRKk4JK*Bcp3 zoL($RWn!rq1Z*hWSbxt?s5IQTTqF<9Zgg1cbcr{t#8S!vYz|2--M+GihK1Yxn>P$i zdEWC_@$g@xQ*^l#t4V=rG%mQ&QmV=MYe{(pdCACm3(9#ObsB!-Q9Ffx4IOVga(Ir% zIZ}P2m&_O5VbHwYYkmmL_0A2%xeSa5FJGajEVujGHM^Ymesk%9$C zKCH^dH3=3(-MGZ;kct4&`l@}Ysb_XaqqeFZSI=w(L3?n|lAGv!+nqeIi5V6rT4@iVa2C(RI0pJ= zr_~{FdqjA|YEH#AUMkcM;p1XJgpZMas7j3kExN-v+hJM4WOa3aU~&Mg+Yzq2Nl_IW zJla{ZS|)&LOL+6FJCbS~E|Spcd;bY8V^4OZ{Q4u`pfEwsjVYsWVU~D#C4s}(9A(71Cp@?`&e&wx^d{CyN)Jfo7DoGm+ z<>#t9qJY(UC@RPSgVAr`uWdFn@rX*otA0S+wBM{oZBb~RS;u>%s8KATLY|EMQf~?i zN}#(?gTKjtk|@}2n52;>?^P!hQG56p?i&Tue`ToxM8TSwsgfaySIS)bK?%BOv#vA=H{>lb`= zaKK2_Jb35W7HCX(2o>4g7Cgrl*lODApg9!4fv8o|N5-D_ccGHrc|{N$VBld+wcXZE z-1AP3{A>kUuBCZug$ED2&`3&TUxqkba{AV6&WT5#w>bx|&F|vV%jo*Bjx*u6_gP>% zEBdO2^|m|Yv9UW#lejal-RW>T)8g35x0%)ed8-6=|85M|cNY2F%6!>Pf(+CzaCzwy z)5)*@FdY$*3pL#QsorGx|D}3uEuH@fbGlR?|EGF+*7fTY;8L}gEu3lMh3AILMCq?a zAz-u$jhav-<5hsaecUM{)jq7^ivtNzxjKR5dpR%8F2mYMPuB*u}Krw`tJT zZsJ@|hfyufs%lIx*}HbUm!l6NITPBk3^SIWpw}V1L;iZ`NFCw)TH@L!j(Xl#Of}t~ zEduI#3z!GcO+c~AigwDO)MuvW*r*N()CSb?P+|nFa;nN>Oju;Rp$Xm34?L__YtpB4 zCO2UPl{1dPGV_CiMknxP%(%yFVAlt3^zhb2sNqb^wqopCI>1u21wsmLs2>V> z2-{Fb0-qW~e>LID=x4KF4C&1D6glmJU~cGdQ45I3^J#;{F1wILleB#r;agM;c;%saB}8n(CqZ?wJmhy)tA5f&st(fCc9v>T{9J_J&O>Kdg#>69L@;;6cFkt$V6)VE62Wjd6&!{d**jC^84gIB4k+tig` zHZRJDJbQqSYZ>wiSaCLA&MqHvfHJ8qM zS0L8egm;;qo#JSdjIG?NtBcy1np3-F7O?Y64ap%2&z5MGz<@JWo$bw=V2>J%n$|dO z82{frpENhoG-9vf%a7+LzbyJkNn-xO{;%qG-~V`j*PHjbkN!Zz=ReRe;eW$u{t3P^ zw6XO1e^O{$G~2Ndl9Rnv=TBG%-kpVk6ym`Yh`=-V`bTJX=CKG=#6zZSv@M?vO<`i z$azl$_Zu^!=8`2MP?qS09e*YMn0w-xbSf^%(+ta4=?aUfz$U&Rk|PI9e@|W*@(zBU zv?lcTn@=Deehuoq8WLv(9q48xb>7mYz&Tko(E!{b^{$9RyVoQ$zS_!Is30}^UoNmp z?W@sLejBk#BG~)`D7pBS4R_+)To?v#nGU(wbO;7aQCo5SEeY7z(U-ZDiT<$4fH=Mry)ybINjBODn3vb zO|Bii@Hg5Xq%vUy4;4PiCo>W(w-A>Ygj*<|_<$fDD1BJ7Rv=yp#7o}`Z0IU|z&u4V zm%Mv)$k(Pd2ZztKv|(tQ-1fRtMoRoyJIGvKr8Y!ek8b;qpxotYQ(|328^ z?+BvQep+Mi&o1`QLBapf!~ZK3?WZ@U{Gh{YKj?6hJ|k=x$$IkK2K5+90sV(X ze*Y1TTc&&eq8tc?y;29a1P{z3l&Gm;|9cBTFj^@|kcNs^0ZhysDk;0-0L z{fmC_15sow>ns+sWYX5DNd?7kMYUevX>Gfwca?_rkpLpbOjeOku_kf@bEOg&L6z5e z!SzGBhR$4xC(U(`0+X=PctDE60f! zjH@Kdq&1)%0!o5g)`o|$VVGKd_8s!f?L&p#n$g`X;nT;K@x^0Zf+bsI zUBuDEY~2-^{Cg6rVG5e!-%vUQ-Idr}x<{I9nUfW8>$kvMSs{NQ7juXS=lYzi~^^L zH;YHfM!jRKjh!TtrZ8iZpWRh36%JFJbk!%aRG{bHjmwy7pD*Sb!lDcT*E#(vZ;J7x zHRJqmb4HR0l|v^070+3WS*SNqGw|552mZ0rZ%HiTSfdCV<@tZo5Zf zgZWBjpRJR>0Kv~x@bywy$FT3MUXCt$)7zjD((MbH&v?k@Lo{sRhp(R@b+1?D4-l}bh$3WUOGth+$0(e)wO zU_|%k(sC^kML|gp-Ufa=Z{kX7PC=&vy(cORALnpyVV_#9TI~y9&RYPkLDgDwV@+CF z(q_3-U+Sdd$0oAKijThL*YC8T)l*uV=CP{ommeWL;E+tXY|^Si{ze64r1fFN8#YCLX}aiQumKu(-|v%1Ai<8?RQbjh_KTw$`r=%t{)W|Rl)?E<5ZUAE+JF> z@!iPIsuc&?XjcVFrO;I^(w=dbXz4+JiO!-F%ej|&s0ZMA=>r}g^BDL;RbYlo=E4NmLIF~qLx*2FGdKhlIh;&N_CUR5%8Nj#Fm6jS~Rq5qLMg4L$N%aYZUB1T6` zEdQxwQ)mQjRl$XmU*0fu#OJNr72y)SO;fmUJTg?-g5{)Go=Fw_lB6FQqhccpx|poP z^e{m`DAo?!#Zrgh%i0alSsEu-%UAVy5sArvBlW;k2b;Cr{#f_=@p!QCmUt^vr>isH zq>T3(pq`(W_rshddS9$5O&SJ%s_pzDnCp-m z!pq+WW%s@(Rb`usYyoP&Wq%oi=*Q^mE=V>~MU!~M^Oxz)VPA2IT;Ai!^pqWDqFy`D zeW~;uPP+o6)rgO(;kgXU;N3Co5hQk|n;_GuJG7GTsvnD~Y09vOc%8pw(tASXNCopPHvHD6y$ zSh$b0!s>PhfwXGI7c8i!q6P#BX60<+ifkIR=t_!$wK;;9Ppv(K&ByGVb#}~2rrr~y z9M{ZX%yv~D;%3x%8K8k1SaiJYf;k)T9(-2_f3Ig);o|(k!3deTk(lQnHQt6|ok7S{bESTS)C&s_Q<8!CvI|w~RWBWv`k5-T(PQZ= z-k-1#JJqD!Z-PQ{V@Iaf`{$(X?yte7q#3hiJQLnPvJ6o^nrTnf3Ndrvep|5(z0&sh zh{+*3cz!Cv?FJoD9TuNvk6iwBf~XQL<4*UwlpBrJc-p_ksbv<%ZE)*sULvsA^aC2h z^}Rd_O!LW;&T6HFSQ&8Ev}2h3k?rlcrE|l?yl6-qT6!q9w27C5({dNLMYi(l|GrDE(echG2W=(Hz zD7Kx^WQNoIydc8)Y3aI&qxbqChKUq#G!(4NHZY@CzpIKTdb$&(4%DR%u?`4fI} z@%}4+?C`_hxwu>Y>j0@qR<3cWqxZGDI(w3Rb*N!<*vh0@L zt1imay)oCooYBCCvTxu7^>42{WMh|W;;b=>#;Ekn6D$A8HRh|qDrbF_b~4OqHCv4s z+cV?)Gb)!9cW{N*_S~yix$IJ~d>)krK)pg&tHZ-&n#-zGeV&^KpV`|SMpnBnCZ&2m zJi~U@n`lP3}3FXE&#uEg&98hZ(}1lSj&K(HyUEL?yF9hS6YUk z7Yc5CY2+PV5W?k&`j+Z9-NN!ZxaGyHTrU?0s8ajOz4v}2pvdbe*G0NZsWoCY|1y_u z^ABRRXr&8pATMAAZb)H2U3J~*vYdv?Hk;0l8g_1ZYP28@noZ)C7i!QDC5vny0a3&( zMeYTA0x>VePmX|@nvm`!aIH7mnCw+5f%eS5B-$6b8W8~#bzlP)4air`JqhIr=VCem z+l~;(29S#{C1Qs1Bz3d~7(86k0f__>7R^;u5Cbz0A^-$F;ZwLye3l44;!}7)kOhI> z@uc?5t`)S#KXE`~-JWeOqPws`9uHx9Y{H;KuC(q_Y`(O?v?fRv zX_%&&mrWvNVh&mlZ3}NL2~yD}T29*(+7$X8X;LlQ>VING$k=+g$uTrZQW|ibi+nB3 zP>PQBdIxO}(@Fs!I#W#OW@|mC2#Rv>+hjYZ55M zMH6@(7$k!BJ;bt=ZFOmNWCz5~N-Ad*))P*V1fM?<`et>N<5ogK5C&O`>Jv=@rZecT zBCtU>spf#l5s8}R;S&&D^*aAkRXpnJ9$zFak%Vn|?ooRuL7Kkr`*L925lFQF2b~(9 zQ*B12L3XBUwYsORjW|9~N0Z>H7-i^a`NI~y+$L}q@-@Iqw6v)U<#9d)_E`*HfLy{k zkV_-e<*DT99(S=TZ#vq?*telouJTbkNVbzu2@khV#n%}T0p)ZR z=t{N@TnE&2hM3+{Y$1CZOY7+#AA22G~G?T!>&zHIVK(+&ImGeI=`37 zbx#xJ$I|Zey%Okc%8sO|_>E35lTJ(2OvElYYmGDRYGRpO_hqS|Ua#MHb}I7I1p~m3 ziJj1-Lr7;S`q`K&&ze2kmXJ~$LNmxIy6JU_8}xQ&X(GZ*#C>gBN8?X8R)5xPe>+C3 zW5N(z8+3Tf;Cs4|>0#47zN4Ee)w~>U=i1!bf4_$epUNip9Z){xuAQDEeFw@w=Rj_p z`(yjy+v{9p>Z*CG`2?NoCB!5)R+q9Vh(`YIAexIQlA0~xtFZr92ee5P9(AECX*g8-qGcVv_BBU)2Xn zm;u$jghP}&IOLr=L;MKI13v z?jSsPtM|~OpH_^Y`QR0xekp%lrMLC!=`9#531*sq4VnO=nMP`Kd3q9+smc;ayx}6d zCT91gv8c}HEjsia@HQpSN?RQ2!LXE499G5DvbC)HZ@N+b3n;up)M#gUC(7T%AB?S; z8Tt8ByOOryo{1#XK{CgM09)DHwyo*|qi;LaU}?&s@4gyL1+TX!rwIdhcfT;b-Z=CA zhDh?EMfWhuyaT@OPSD6z+$3@ND5Qp&q9F}o4~876%00=b#M5BEimWhKz&(u#@%kua z@3BR(h_}l6zZxV!(s0*>A3)-nR^Kprht`B&Bq$T%5oQi}M<@yy&?1|0@mx#PNzEO( zec}K6HY@YX7gh0Rn{D`6BL1u3&B5OOhgkea?G&TJYyS^YU8L_A4<6xfz~NR5aHZlx z8R;f5E64yNR%o;I57=k(yqt!wM1Un1fBABCb=$QsnVrf@qdm%LOPkaWaGtV&=4&Oy zN~*7EeNTr1o9bv*quh7xF>w$2k?;m0<3D$oTgE62PDT0JBG?GlJcGj9eZgB#_Rice z%!?e{v~a;)Hm#AAANnj6sk`N51M|_FrcbCln${yn<2804Jp+fb6spF%L|zVzF{2i# zUVi!JsgR|nfv-a~&NO_Wa3i3+R*`==MQsm7R;h02<5?%%o%&;-wr)qL0-7VE%x%Gl z)Mdoinuv8&(ks}OCppmBZ+%JXh#YyAIhT!-WvVnOPzH3wH3{=L93Gbnlr(QqWEUq* zn3|2<>bHV;Ix<7Zlm5){}{6zS2#W(t%%=EWTB!#6e~n z3nwRfR>`n1_Q$rQa1!yoGw1>&Va2H+mT>wu+<SgF|f(I5C~knA{$dEhnwXuA`$((`_ylwHZk^t11>@y$B* z&@;zpl{IyBjit+2%*ilgU9uQ_89o-ZMPy7wxkHTF4h~ukI3+2N7gu9CCdJf`qQTlW z{ex;UfLFS}WIVE2(WwthbPp?J4r@5rWzNOFezkJm$b!^+;4D>p9OarM>9E;EF4Ie$ zO4SbRaN4#ZZIb}o2tcXN7 zd#MUtInli#W)_zkvj2DW@{ngu7qd2YeBUzQyZo9c-*RINx5&n{FLr#FU9z|31;xwH zF_T~ws?I3IYu7f|0Wt@#Q8z5ZDz7AAZ*BMs0$@O|1f$1v=N`%qz7NaZ_0NZD_BzAQ z;yIjs-Z!dFOD#52dX1h#>45I`F^#jrU-`tl=6;ubo?X`M;hy9k=sUW`=gU(0{7U8^ z4B%1ScMIk2#3b=7QX@X5>&2;ye!b(W>;fz?v3}E=iJOug`&~NqtS!?@8_OGlG<2)t zv4FsarHeIlC#3Je{a2>YjBH5rb}ml57{n>w^y_SRj_%Gu>jL?WrylDcUipu48;c#- zy&{|lRqY+ey4$h8w*W8HVo9o+jS(uZ`F|}Hj5F#Etp~fNLbe%VJN#um>X%P;3Q6gq zsjgrBz}i9iMo@`Wock+uGuMinM{`8W@C)g$fq(7dTZZFtUbHtR6k(_9bB+t}=xR8x zSV$j~aS=steHD#X8I)hRE{#IWDzkQo@g?vL4^ZhU!vfOkAAGu#+^AvDY*hdNrO&;D zDm5DNB4;FK<~dlg-MZCp%>aw#?^5BFk3%G#W$ckVqHlv}HuXN&p3kXdu;FvT2qt=V z47^x7b4d54u@v5wIWEGO04laHrzqFm-z^H%AA?r90=#-mm2N<+0$r}(H%rz>%rh0u z+d(OOkkEAimq!y6s=_XGn%26<>e|dZM46*#q5%%ci=|;7_n&F3Lr&c$ufrXGEHO77 zc@S+pcEw1dfqYXhf%uzCmDq`FnA|38G3EI*;i9F?GoZ=KuG|+L~*{)=#S8w5kntcUx z=(Lx2KlAXGTXkCsDiPAB&pNaWiyVuC>~SSdXcUhlTpU|A%f7R?fqJ#vz4^rgV_2E; zpE@Fp;;mrD=Ih=t;VDe@s(0ZO@{}@u8Lv9KrbI|wyAsGIr%AV&AlV|Z6%uImu)nHj z44M*g%1f5Cy#3^++}Yont4&USbFtMM*1eVC zu$T<@Y4sljjiq7eoYi}-4dLxd26#YQdKl@Sv~SycMg97KGf-!)^yCR+?WzgqiRRA_ zMjR#=X4(lyI!ar1b;S8;?=f3W->gUtsTtFWEVe=@saR?@FoV&S7%FGKvAkOsHU54dL zNedU0G6E6|3U-Vr1JvT|Bw>wNw6vLlkQ*>CYNQ|(D}XZS*S68Vga{C%P||^ zS%oKFr$J1EtCDzv$cxtjnG>@3psY&hx|P7j6q4vd!`ib(6TU#@8^X?Bxf?jJ1mUMT zf&o|0NkZI+%4~S%)*R??Las3bm)Ga*MWsytT{gF3vo_6<9IEaPsl)#Lc6Xf&!23&- zN_p0kRTP=XY`|B4u0EGKf)Z3g%ZtJjRG|I&opBZ5XcG|7&?-juE(7(rit5c-Q6s*fiX)pQ?pjs^Dea=Ko#ZIycd|3I40hi%fM ze&{d8ANq^nztJ8}hIS_Qw#tUK4mSVjl5Le`tv7$zFaGbyLCzJr2usV6ym)Ddhq+?X z#**QIk)?LTlqR7>=&$F^$Kfd9tml&Pa>{>Ppi}B^RBz9dKYC&AktolITcw z(E9dk>dq-tk{u{<3*inZe*0`u4bNz(gXmac%6U_GD$|YLv&qt1Iz$hoS5aF%g4bBcr8b}LWnpt2)u zlpyOc7DG&5fIhC$^IDhEt891T!FGbSQbpiT`g4MZx-2(obebw0pN)ETYM0;a08Q%T zwH;^gU|!3CJDmDWI9_(F09?+WZXs?m3Hs0)PZ0c4)v9a(K>^fJ$AI43*q}7Oyl#g6 z;|uM zsZ~}N*21N;rD^Zst?WXflUMbzRVnUAfVOE)(>2$mdYTfrSew4G_@sD2haA$!Lo|vn zQ>YHqAJd{M8j}n5cLiugaGIX$X^8Wv1ZzuK{jaxsyy5ihf^v;=i!{%rbWWRa;7n%6 z06utWS3WihEw(a;YB&g4-)({O{?5{1d3GvPMWB~6C=H8hnR!yN?P-W$Gj&T=e)^@_ zL$jtOlv>TNr!`kCkkOf#>LQVo8!@fjoj>iw>aL5>+i+gADHO|AQq8YAW;ndj_;R;l z!ySX3AnVi7`KE{>@>60RQAFnN?pT#+8HTL82BAK6Al<&6FLG9kXdUkDr`fk|wP1$< z%e>nZQ<<7Iw5ALaA#$cv$~;P0IcWOIM{#V+KYnrw?(9Omy28m&yLLb?T;b-rBKx}v zV_et(qe%7!1ROne;db0OWKSbhI*Xlw1+*Kkl?}$JAJu2g2lnY$_V0`4r5Wn9BZ;{0lG(tzc!_AkClT)e zc_-26vaA3-WE(uyOXj0Eu(fntGTD+8v2e?zQgp)zniFK^_N*F)^)7Aj4@4W?89_dhM~q>HwK0#Sr8lQICWKSvI)dNvv7 z1~9!fK{kI|_O%I5{#jNyEH7=5}z6!w*hj z*J4eiK-O8)?ij!s*bg@9l$qbcLvUWqe3Z_Um<5uRd@Sk^%b9U8JEHCtd@n_p>;Oq( zlmfw%tADs$S3P@eFHL+@7ZkztB!`{I6BWh z`(BF=;e1GpZEW?hhgjby^%h&1oRg`4SHxCT@91_oWbUv~cZglr_KA(2 z7jz3f)YN0>nr>6!rnj9ahDhCo_z_HwpQF`AEl8~DA^g$kVF=2r6XIRlDGu+`M-?q5 zFueKqy`idG>ysCYaOTntca%do?6K9(z!jecxji};mb)jYI!a4ak;ekl;V!ktW@T(1 zaHq5JSmW{RLUQuZaORctN3sGB)(FKkEbZjiB8OV^X?~aJQX84VpqgK*aYHN!Kb>{4 zl4yhahI@yzed+1dt$&8A%*92JvrUt4MWm(M<91q`Q#=FFMBSSx%;9>Qw5L7w6{&<+ zC+1%64$|dmrYg!P%n7{*jw!J)8=okHCG}KTUvee5198# zBnou)C<-4~xxtaAYj{<2EgQImGh#9)v%^kz>N@ge-2p}gu?eO<9&$L%nA23NZ5sVO z&#Y#9Lo4;v8k@ePknV|r*JGg-@{2(e366QVrj`xLJfLft4`f-8qj!T2 ztM$#umA)cWJqpQg)fCRV4+z->IKBlT*y;@^xCsHVgaMlHwhet?Pb(0zP*4d z$ZPdrhDhM+X%YoT zsAWdoM09^ScG(n~QonjCYvU4N;quC)ooo)OP@OeW0;7waCXh3dqDiYmSzM%Gy(mM7 zVV=i2%BFs3m3fLz@}>prH9ZeZ0DKpk9$r&bBg|+LC*Jw0zPEwBP@?E#8lA1#&@tZi zmjlm-^|JzkTv4PNNpd1@?`z@x7av_mZZR<^|^TsOc@#O*hAd84_qqBjha2SOd;Gi z`9y>7MdJf^Wi9`Xc;HGXiR#Gnp0zuZ`~w>EaL(sKbO z7<)?p(cry@jYSDx11OnB>{HZ}ioCrM2S#{*OgTh(NfL?|@9#YiD87!8A$%^vXOy_v zZ#>U+DDdkreG{l!0OD7kVn@ZyiHZ{N;2((Sbzsl<}gXZ`=t_%%UfQHmx)qQBuRrWR$6lX5tjW&l3zV> zzCq90o1`_Vk0#kwn~T%_67kZ2JgWknc+lAIDAoq`edFYE;Wdsf@i{#1EO9rZuH}7% z@nvT>g>oLF;qx*Ck~lNl*5V!I*{SSaLGsIho@mr}Uy6l@VrY0N_9(H(-QJ1>qL$j6 zke1@0jTRrO<0K_xP71)Xx8usr#W{}Q&&iA%y`~;6tS~FB^y!#GXzIU~PI1>`C=i@n zC;*44hO_g&Q>Vtk?ozP5B=Me27FufHteKh_nHU+B1@mMe_Wydy9LR;t(c&U3K^sc} zPiig-A=hRYQE8R1D9_R@eN1X-CjLCTpe7!t^8(G1~1Rl{Eqp`M|((HwO7z;9q)?8 zTZ0@_wst?fO~Xo!Lb&kgkSXVN{KwTW33b7fx0(qxyviDEWp2iL<8`uv)4{KwUklpc z@ML4$7;WRC9hdf!8P2jJ8A1bLsV_jpl_>F!(UW${qmfOlMKWXMcC05BuG`=I&xf{V z&!6e~34cZJO}?7)o1{tjcA|no@wDUonUM-=j7m>hI8<|QzjsZ9W~O%f-MefthN3Os~Dn&ywNU&V%B!#2XOokOS-aS{Y`qXh=wHU=UX85@4n zvnHaHMP`c?ELFs(+vbLR9CcEoU9rw+u>) z4x{W`gP6hj$CHSdi?$2|DN1}fI?8Vl^{!GbWAAD{dfwotg1kwLUsQBq(nh!<>S_giI!rJoASdbf?-2pju#lZ`hnu z0B=(P}*< z)J9k0s9N!l$dMr-flEo2TQgCBV@)!33dVUoLeR#8aKRchMB z>`6~ohg$BzSl3GWQ3ONVFwHTZ{TUGp2*WcI+ikt;728U9$d_rU&dE-+HBuGEox9eL zwP;%u+uUCo6+2p`@tw)1dP_4?_-jlyk zCeBN3Vrkt!R3+WOXJsv!X#LShL|8XhHv`)nw7)wCb96AXG;?qT+|?V!_E2EL1w^W0 zxG+q7+3TdR-B-0Pd){9?j-m}N3vLsQKXP8UeWqEeY&Z^m{9*%gZf zN5qrJh7ZwfWp_Rax+SPPV8)~cMZ3?*&f3wW2vIIq;Sj8xfM)n#TIy4Y)<+*ul@~NqHsH&IX|ap;x%0%WZ2R<8rctrN z6Py?Z9gB;@dOOCom>@57#>=5SFYj)Mtphwoxa9b5MxQ{4@LT%x%{?#e!}jM{w$3&w zh+LM??;$oTHXb5iqK(IUh}p#6%5b*$>{#D6pW8w@`I74TC3p@EzHtMU_zZD+`v^^k zkv)i62t^`0FukQc&5a}LSV!jP>yx>60pCghiF*RKb%_7k)Uq=I@+s(9895l){dt-D z`&6_WYIC5cY{;H$%ic*}MH=6Ja0(|xfa>KBE#UWMMT!{Zo(U2@P|)aTT=!e=SHU}< z<7z5BBf3gi^nra(NM{lh%w#h3QbST+&`xId#k4)G;gizRfQfIjyjQQ~&dw92#K%I# z6%=GhuLYUWnksl^)n}LHmOgExiYsHJx{`Y+g_hE8L$QAb9P$&x(lmToLabHujLp(s zD{S9785|&6gEh!Cqi^Ml( z+vBU8sm|7B-^S6|6{7Kbzhg^E3AZ_Jt)+#93~#G@u6ws>sB1jG&|o5Dio~Iipzwm6 zGFa{c33&;Xv@e{wv}tWIEIre4RL|QnGtB}6V_c}iW82IkG?yPiOL(TS<`(BrYBHwO zzfOFrmZU*PFVOeGH#&?rbS32r-ic!&t~soq;xoO@ix*H8SjH_K{A8cNTWJ=3Ke?fk zBatJSX^q6#$yCIJ>q^3NXtse!bR<=Bq8Th>{}GqOmwuj@pQq^hbZH%GZ44Jlj;|Uf@O%k`J67dXWu9|ZdMlNoZsxZ*DIup+BCzWY=uaPbBpM{LhF`MU$C(Iv<|7Kq?TEujvLk^jGJQ$Ckmp+ zebW|>F?ul(93j)8*a7yDRz!0jF`KqM~O5W?$1!L3Wokt!g5N-J^-qX|bGRI-> zE04;NYBy!{C`$27YG+-vmkD#k9N`e+2(mW*({X_tW9fIOT`ePTE5r3uH~YwsPj(SN z$5*SoOQ*RlV!_?B@2rC(tLl$?=H6{P74Ojweir!r)NZdwqILDW*>Y+i^=a}27Mr5V z7%JuUx1>{63JNBv9OE`nq>Dbn#QU>%^p9R!_J~m3s$BNwLaI(W4H+9#fr88#KR)2qFJl0r!4q+j)wo4hN*`W_{FTXb?}lC!wsuSutKFfLl5?1lmq;@ zABEAdF6<4`uxe8Zwai-ZN&ZzP{G7WDpTXv%6h?YCO*0pleZ&ToSplQ>*YBn3Ms!o= zlRxCI^*A$OM{@NnA_7QILxU-jNd=^=7g-^;_66ZeXc=R@YC*XD zw?VpzXkm`5$ML&4_}#2@JFC(jo!yP1bCo!Ko#pGJ$~`bBm~mp7sn`)KHt%C){F-Z+ zRlq4W%WSRPhRSHK2p}20W-+)CP)9owOUqxRqqE!+=0}ZEP$R6(HCDW-LT6#)6K&gD z*hYE7$mc~kNL-lBo2pms942}&Jl_*`FbuLx0dZ0vs>pLME6#p~7r6)5ejLxv49R(A zeo(WpctCn9_oIie57x5o25x4)0M-CB|6=R^Y5oD9Kd2@X3EZAQKC6m!Cvs0?l8|>A1G1gwGaz-dEGn>olkpFCIP?2+rD1y0;WaWtJM&~MYW=S^*2^=o_ zQiE#NSy<33=W0qx%V$#vG>&6Dlpas@%wu<{B1h40kHaf(7n7;OfoNY#iP1ne6oy(&cNZUL|9{UEvRpfkqDZx#jAd)#r@0{({MZU5TjMwM$|D?3D^VNuL<_4 zs8ysIxi0;%Y216V?kdRe;o`rJw&n$q=e(nM{MnU!Q1b+H!C>%deje;Nu2eW| z$CKJXTh822dFk!zXjDQds?AVTe;yG^UsN*3*3suvmS)S)pr;CZ3PDQyIBuWX-tn(Q zLy}J_P!@1eMtS)Scm=%C`ry2ow1{(>_DO4^ zp!FbcbV`bcMP`%KW0*NUJCPfjxkBmM5t*wOeC1wtbNuV3n3b+KLQc&G9H|f|t?Q^u zL{#SZqRk8j8k}j(MOiLs!%?cJ0in-$l4i@@PxnmXZI3!R{mVVxxMznwePNENf;nkg z6_dXDG929FK=ji-j1?}AYIn`0A2@<)uj(xxv@47JAjY~C9|R2yn=W~;y--Iw)9ujb zZc{b$=xYz2LIPvPaDBhp zO zR&<4<$v->B)#~hq852oC%I2%js2g+6TTz!KH}`}J8-CXrm$A*gtS8oDZ-i)i#E6@} zIj9PBwlMG9+O&_Z1S)Il!VB z+tJjT`Kx%5#Ua1NeNe+Gjv=)ri%5Y+6Ztp|?VBBx{!>G}0g2b`>zam!FRfSK2@hB!FWDW*xZY{wUf zh!~CvR{}(=5t`WcJ{jb`5l=zo5w2DLNtl|xLgasR2}EyeiNU8&uUvpXk^nMyP*;Z;j2-r6bRU;g6@GPX(2>UzUC|?7mk*Cks-zJptKMZj!y5R6 zpKow6vCZg05RwE9K2gu%L+ZcJ`m>-5*1VZQ5_hE?#CX&W7}CNcs@2S>McK>xz^w_cDxuy zK2~|5;~6Y(B%E_tPcHBDs_^pZZ^rg@+Q$;>5IIF?^DB}~)Ans0%P!O_rwdqq3XUFQ zr6Fx{`^Rk{WJYfi4!&Y;Tr*&qU;okptHl6wmEc(+#Hh}!$I0-K?s8^WeaOB_>r1?3V6b1mG+1~Jt6%!qkN@L zWpq5)-<~7ju8%wtz|Ep)@RgQ|8xX~Wj``3yYuVb&L_#W0MNZ-N7O_TnsL2(t1xJB^ zE1yO!KdkdqIe&7rwX((tfBJ@^bcC>ZvQdvvjZwvM=h@L>e>Xg9xjS+H4cqe_|G}jf zo=q*VOezFjr<2RYT`+Wf_EgGTgiSgFQL2f2g5e9%Csy^qGBHmv0G!n@qL}*fe2jg>Ms^`8S*z>AwLhd!$jCHLI z7EZ~c{>6OnFhot1L-=A$BeT>Bm-y7hxu57Llf(+<)R#c^K=XH{?vtq`M&y)3DDd-T zigHlTYj)`GiD2D*bneU~c~awq)Wq6F(X(w&g%ge_Lp04~)W)FAA~r9XI-j*y zGLq@hY@5hMupXpAoP9UPTn?It8Y>`}{-UHuNmuWGEe0$>ajd1P? zwF^gDn;6;plav?nk_m00a(_!Ykg8Wy*GxmcL&8qFPoPMS*YhJtEyqp^JqOB9iKh)==oL3~)DLKY^uwcx*@Vc5ZKGhVK@RWdd+JhL4+jSc0O_tlfMuVAbAy02Nw4ds0`ev5#T^ z;rJwPM&PSuid)f6Mx5cQRwua+sY}#lx*S_)k`SEX&}u<|sFxj-F$R91_d&|!F^q}0 zp#7|mH+c0u8Y#>KQN3l6?9J-?uUj)4($iri`ou`D5`z`1>S#tV8SYt*o$4EQcMAN- zo8>AA$e&UtEJROsY%ad&3H{VZAJUw2oKxV2lE7+j>_ZA1MJf_5rNN<0R_i9To{Ht+GZ8pgCiTcdu zY`js$TzS>962sbUKcUfAQxmpZ7ruwc^w<3KF=eG_~mWZt|>VpNR8$Tcha>5 zFU*C}Im$Si_%S3ce8uPmHAQ+99FTFW{h+Gf=C`Bd^}#t%E!@)TPL>MW*~iFM-ItmqdTa8oZkpq zWuq~pPo&^2f#ec8A+q8|>pH?^6mzU3-hJ)Ae2-vHrZsF0=PjJiQ?eL2I^|Ae%Kb>z zqrOX|LlWeQi-J(;b+eX(jPFnFqoZ^C-D`amzf`5qd0nGG$~LefV0Y)mpT>=bo|b`1 z&)&cYWJt?M&&Wv6%*ak_XlCy~V+Jy|rjwQs5|LFDd8hElY1HB|6=E_jzs;zV1vxOc zhxqU|M(I^#s0cX}HAB=k)aCiDIM?fO^zE@O-a~?=wFrDf=7#HF`cYGIF5X#?lViT+ zRsnrOqk)ZAmeck&ySg2u%pLo_rCYtd5Le5{V&rYyXeAudde6y0q_5jjee+zkEx+iV z>6g)nX1I2z6ZR|e>E|#=R&9}8QJw94jVl>WUfZuMQxp`fJI_l>=$PE(%!pdOH$EKW ze~nP|@f-0oz?)gTEKG9qkoao)t$oijC@po{N5Zy^bZX>Q0LrFOXvdAKBdYw_6R_@X4N1e_=82gUmL736oY6O5rY6LErhO|kREwiLm0y;G z|9MfAmcc9cdKn2H_R+mcoTYOe#(Unu<29&dtcJ=%!M3GXe9orNF7Sk7*ScTUVXx>% zRz8|We(VeGJLt5ZKiOH4j&#DJdn`k{IF4;?%&s-eoPLoSKa#i6VVSfVP^WUFg5H#M znJ^HlNW1e6^;3LyJT+Vyb478Rg;87*o9OCFikX9UAyi4tNAJl{3mJuIwdwpedTG^C zM!%hK;rK+E=fy^%*!=O#N}<*C6ikd#S-t>lz@%a;Kp7?y(xBWGWpCFqj7=3a2x1tB#gTM$bd zu+!vI$Bd?wXhz%L`b!qYYQEXTL57!V!v-&j66Ln85-$kpx5-cW3EKFt%|?1V*XVEIMSmOxHn3NMwAIn{jLN3>_)`Q zt?gw25zwnpje2dCG3#~2?>Pgpuu}77V$_hSKq7gbwB7sF>`M|KsCCl!GH$=i%JMK& zpy!U-;(#GC4t#$FAN?;wMMgwPKv+OYKC`M3gQz?Xfv+w;$zpmD7L-l^@rXXby)XYj_qw0nWc4sj04oM#p&+5Yaf;( z<7_dOg@#Ox#VBvw5-f;HpYJ9)4~@%Ek$PbaG|UY6D1h%FTEl;7oT|8ph&0@=xXj=v zf)ve*p@HE_1^Nl5Wjnb+DH=)YKDrMI;*x#Tv>|kli{wY>N0?a0na0*(`-ka9PpPL6 zC}<=H`=o*YM{0mlb^}&Ix>iA)mY#JQ8l}F0BiS_{<1i#dqz}iK?A5F9u2?SkrYhOF zd2c@KOgwz_|1(}4tZj5Gjhu`uX&qb~q^QN;ElWb79uB?g!wGTyQglceLK<=iO)B!r z6*~XWjN>wd^stLGgfzeFd(scxlzV_rQ;7wf*3*K4QU1?JD~JfmDG0wy=}9Qj_)vCz z`a)G{MP`i^xkH?=2`ZKWj?=&lGt8^=63tJ?CZIL`Cx8dgnrdn${@01(fJXB%OqS} zvpP`D?)5u9NQrC_JQp8iDB*VRiM{P^qcsE&pG)V=y_IHs-$z`l3s#N6JXLLG^ifq( z9@-R7c$4j+ypPp7k(D9upMC0}ym_{`U*}y9QZe!0xvSK=_pn@cdy_iE_mghrePIOWcT@9)$~ii>PDTvssjC>;aif+&>Zj9F76G zFFsjs6n!X3CCkj4PC%r7U)G1-?SC9v4J2;@Sgg$a1mX$GWoYkW; z4ngXTBy%t1;gP9;Qo=1{Lj z5>syoi=$-N0f}nf1-zW7PFho018I@*8X55Leq>G^q~#z)zUWWMDH~3?f|IR^In3UU zZ~r*>!JO1Tmt3;g5L;4r^wVVJJJ>vZGTgWLHWz_4t+%~x+Zf$r{}gt z?S3+C-jGzNbrX))s@=A&9U^ujw$ZfAJC*EeRMUJEql;16@C?@IDA(gCc9*Ey$SIV6 z>tY`Jj2hWC`DK;P@MD;Q_tTR#T4=(a2dsNh%3L+>D=!kIWLA?2U73Q(hxF1 zKcXSzDu4uahC7>8q+#tMuz;d2Y?=`p`@uvv(`Oc7M_yul3SM=adbJJHAM~m>~8&FQb-GhSo@tW@=eEQ&wE&i?nW@# zn!VBm|9zg0oSVB%l*#lAlBeNobApE|0gqNP>M5=|zI^?B+oxwGQcy>z<|p%t^^FsA zO<-!;RJJxeMq52=SKgoposDP(`O{O>PTp_*beH^(Q2YqkI%nW|OW%zPYKc&x(u&au z$fE)t+l~o`xvwXq7CWTrefRTgg&mjBl@is}iik8Nt91T82|4x|j$#eRNg`W_Ne#hK zhF3O(;zvf&_FK&bQ>P5(GH=ycrxGj2k0|>{=BiO$bgFB6mnkL7HufQ&+}=qeOE*mz zC!Y%4gAVvZ&`PQ~DDi*{(1Z3y;4^_CqBLV+u(9QPU3M-cETa2PV6Dfi6^KEe*WV;- zXPskc;c}+~`4C{o*&)psQmsM+YOCtT%S0!0EpkMry(ZAbCT(3ihu=@# zr*RXG{}fM&E6)g7>i}LteGHeP7=t0){>oR~@R5N42@rCD5dEePO{mhjt9CsC@1WEQ z?$IgoT>PMH(x{1h_&4%RaTZG!DIgj|O~=F@nE(2-b=~q>+T;63?i-0YwE5w$y|=Z* zi`@q;8}(7HwY#WGShfX~Vi+R!cIn?@#ZXVkh7jE)ODYCNA@7n8TUWiW?UI~)P~QV1o^UTaKyYW?DJaqI&Gt6in1vjqt=xwe276*B z6d`s76Vl0%$hwDVpf;s1ai0DB?4_5{0qxTd%twOLPlIYBX@=hBFZ+I8ZB(S__9ZTtyS|3ncay za4LA#bqz_tL%fBxF8vG!S>(>$U|)=(l}rVuetM2Eg+$Pq%^_-n$t@`DZPnU-n)8iB zO;sBw@9J_xIpX#1d@DVU`|a`_P2&ztMCV{)dVV&ZpzKk`Tq!dGK8QK+Bmm#KdH=;o zGUgU!P|m$Y<+IsUH@%AmFYC5GfAY!*5p`WO)y%lZ*u7|82Tvz&_aTypF8j0!dp7wy zG9}NP&#vEw-*@IPA>giwz5noOl$EXFOWKsDN_`OQEKZB_A?tjUC=@9~1`@j84c(hN z^FH!flG+`5&O26%?g~9anL-I9XY|&z@!-tN%_muuEA`xn7a3Zu)!MF}D`zs?N{g#O zTH~y1-xii*8J?%izV;~}K8xcvi}589b$&^*d-hJVLg2Z}iLOZcHe{%O>+mKUmFHx) zYcR2}4vNV)VL$5CMLqus49-{4jl+dRciB$C$a2vW2F#3l4>a zx0Rn0k{#DkKjy$CS6`_RAS~9KI%>9j32}?r&zJvYZg9~L-C?6*tV!%th>CxD+lj?% zlPtq0eQhL)H9fhd(6)2&G)pJ9v-$D9&KseVXt>+E{lLYYMGilDr8ruUcnfbYJG5(= zheraI$O(aQvx(@FEMmTO(GQX26WT%d`IOFnI;F}yYj!cy5Z&{`X?>fni3l{g2H+l{ z%@XgLXWQ{JtZEwuBaiEVv}&1=1Le$TVr#%iU}fl;GNvP!6@$Pnt1T zryZ-99ZN$rKs4Ea>$MoOW6p#CNZm=hu8}ltm=UkM|U6#H`*%!d2Ow`1Ok~UAwS4+!QKa)b!CoMB4Egdls57nY| z^^G!PPT4RBp7{v1F1y{9R4qMLHY?qSai7NZ@#KNHB&>(L2%B9Vk29A5G_S{ml{%{L&)*=1j! zB5S#3;p?RW&mDy9!uQNi31;%Yg0vl=0+LfmRXj?mO_^&Si_G-%-Wv-y!)+em9FbpC z8hCge>bE7$tfQ8)DZo}@bXqEYWSb*yTR*{j;YSo#Pei!#1ckb}ko`ui7N+MsnDp%q zP5uOv8O(~``UiWhg&2o3n|7nO1O0duN$R8vQ5JS>0^87Mhi4EeADy~WsT|OSUf?vZ zyya$pGP~ZWG8cOCk%bIbfk4XV&Z!Og1nJ9dZj`t5>jQ!zAy4Zwy2TMYtJvMk_aCSC zW|B2uF2%(t;a3z8$QDO*@FJ~!lp~?Wmv9G zF6J!OHe-uNytfdoHJgvOncpY`0VStYY5O%?lDqfEP%n>7lnq0Yw**3vv*F~Xd`VB; z3A2V(&O4}6Tb#Xdn-2)BRHihT7IV47s=#{p0vq1TS!-VH)Qi%9H*jCVZBkt~g?nOx zlv@zkPQH2gN3;uOT@sTmrCf66+qAGsCTFA1@}$f$F*hjoi44oPOb6Ifolvpmi1Iyv5VUs>)%Vf0m`oJ^(?GCI_8zi}EL z!tuVowoKuB{K}2UKLGZUmY%QVUWm>st=07z)7_lE*pc67)i*_#1`VHIOwP+C`;}NF zmuJzPSAQs*|CnA4JAs%9{SD)47IW(DHE%0UvG3INF6)~{NK+x_Tm#Rd$0kD)Wa}X< zJ0DlgSP4p_AL9kzVSn_d%X)SB5fv(=e(fDn_Ys6Vj;xGJ%2H`SKP+~fT|}M`Z{HWs ztw-L1ceBK?L+*PlgUv`BnUuj|Mn+K~B&)G@f+KD(F{p#5*_GLnr82O9)IRhVimmsB zo9>_kUc(+Wk)hN_@y1n_s3S}2tb|5sR{=b-rw$yUofTAEO3%BwzG)Z;WO!nxLI@~&*Zacg?A5MCb(%lePK%&%_eXy z=q(e>`SFsL+c}{L&L*R^o}7MH^C-EDts*Hp9_PZq4`IV4#aS#0{p#6B`P2Dcs}P7x z+X?A`iV}6FFONS3Zn?aIq8PJdd-2TX?qgRIA3w~MYumBZ*-gi06|IxH8t&}Iy6ony zg8@v|@-PghizzR52yknpB;xdB&%hkqyXPzybg`ePnSKVL5vh^ziBS^e24QX^8AMn$ z{#gH3xV_BXkkhdLGw>A4NgO+cCDs|auf!>;bSNUi zzF8cu(e7_)T*56Vdp5$QVsXCvlqWN~tkwDY`2<%*(^7il+e$e{OV1V+M(z?hg>|3x z;IilUj~4J(r_t!?Qr>%@A4>Lj9@1vWiuitc6Qh(vBjkynyjYo60jhk~2)|f|i#|4`yROFArkl6m@kUM=c&uZi4e`-$~5ij-o#l(*f2FstNL!bRkp8SSfBg z&yGwd@>8nOY-;ESeAgo?`iNZ+Nbpn??GDK1H-J?G1x)mksrA|Aep!GqlKGpUUCtz^ z?KIx#;kp`>_gPBPMznFctfoFnwOV4Hutj_!bp8t^w4)`i>eR{!PcoQkw`l6+%oJB* zJBGLGaL<*KUK^e%mKJv`HGaY&6{Ia}Ii^A47Jj(uql;ajZ+I!`mKC7)+XtoFX1>3;a612!U4TQCt2LlVR&Jd(hnY*FD?D1V#tCSj%eK z!n(LLcWHjPNbW3{J7=0Yk%`IC672Ak$S#fm%JKonvCOb(35qya6%$N1&NfK20N28@ zEhhIi`dxR)8#{2{WE6QDPJ33BUCJ8C)s~#u@$7-BjSO#H^*$e3)$z+U9|M>h&HdD# z0%sZF!F3Kt)1GYVw~^#NQSH^l!!o;OR{d0M{b1h z6ZP~lnbS8#+&U0cdzN!si%Ue@7SJ}>TkSQ|y$niIO8!Q8)7a{mTu@d zD+ApZiAt8Z*1SDYw*ZOtUT&cl{WT)=J+D0eRjMPbdTqB16Q@aNF#c9$p@1N(BTfz3 zL3afeepK1MarvSmx6F6CyZA?iE+wO1pgx^>dM_p$mAlSOO6NE(gq~%v;?Fj0KSLdP z+bPz>hg7MZtW9q?9yz-j*zhfDExnBF15IBqv@}~XFO1$?z=!O_82&<;o%aL+*dvI@ zkNo)zJP*RSG<7pxU|7JyzEq|~Jj{zaHmTxt$o%j$4XTvGoL}y|n>WXc>e%~O^5E3% z-GucC3A7U$Y^a0b2s)!9p`BEC!6(jFVi6fwJkN;*c97Tw2>BrW3o^7>RAtN^8^Anf zze+!?6Wk~|aa|Yy?!mieO2lpTwZvI{-EAcEYko5H*al0p{3;f$fg(|OnDzt?-)A`P zrD=IgbwPW%UK4na6};@Z)`wmH7dN2EXM90!>B0#qHLMv)H^@lMj1yrjrTUI4`^RN# z{0rr4E%bec7R~};QPw%PJ+~b@C>%?X7r8-Xd&2eLFi|~VAvd^hkKaKKw+U;LFBx3D z2h!v*N1RExg!<{ydp)QWg@cy#S z%1dbQE4{{i%Ya|+U@Fg-lEXP9BYrv-2{KIiMmQTM$(Z~kS;dz@O&d4?STHzxkPue` zsXM+<;!-6U@ztD9|Oizf@NhreM1BEz90PSneHj&i)uHnTP7XS;Dkm{VHF7CiE%&8c2Hw$#2jF0^Gv zWXrtjNPKdcp7BMJ_)8Q`S7+LYfA06!2+2_=2Z$}JM!eb!xupx7G zp1WDIcOfNT6-dtUBnEL@f8dZGd-<)tpD@c~GlZ1_<8m9W?P7feK{_fx#QWW~hUK&5IH`CXh|ab81)3bH zFC7&H`W%St$Lt#C&HH+<-NyI37p1h*iwAPaZ8$~nIvFxxEuhj)%CsX`pSWc>IUH1? zYHffb4eWjHQ%1Bygj9?>ccMvF2QA6MmCO7-FRj@yMPdM#g{vp=+9mzw z+J7H(d#p;Qa?`UZF6a@dNK8lrBeo^Ks90D&%<4-g7N?a*+JHshI`A0p*7CJ=DUbpn zuk##oq?hmuqB55Mn&KhefX$2IcoA4LR8V4ssBS)Ypqx>QTNk)*pEn%@*!`1RzH%g9 zubapI^zb?Km&1{+n7v=?E)yM$)n;|?n|KWMF=jun3)&4R_4Iy@{Je|foDqY!mcowG zI4LK#0vA312{->PB819fjQn%n{jT72RaSl@P9~bkXQh++t7R1ECTQNVtLU-kWQ5P2 z+<62w=K0@Ey`c6r%I7PmfZp(mUVUTCPePK?gGzN&hjUvhqt5X*0zzww+=GK=t*40i z%=toSyupZhkI`E#h>sKliw{ha8=-eHa*`wv#%uOEj=eHRXDkvkpYRou4-+0)X7Iv& zd-xDzs23;R-60ST02~4x5*^s(hXLCKjvYEdU2nBlz`*!nAi>Ci|N2+C^?mr;!-0v8 z5UqiYtAnXEh=z&HibhY*&cKwFnFfdp=IBCW0&=9axBedV0sws%tM-l|3k>Z2V~9V_ zGX6hkE1-kX6YyI{#|&iVprZra zi21(h54E+CR$YaFm%E(6IS%FzwW;j?tgRp`O9y;{dIlB`htdDUAYUHn5CH0m0pEk9 z{wh_zIiBBP{0kQuLNYRh0RLxCz`%fvQGb^zUktDB5Db7A4-aWber^P>B2{My&;{zi z^G(#hD+B3Fd<}iSt8Qg&X!Ohaukxc%V}W|C0LG6E!1w>NJ`gG7`-c@jbC9*6Rm24# z@B$xjGWB<<@?HHA0Z1Ti4?Knbr7S|}vR7?^Ug!kg{{1Kmg@Etb0C@RBSstDNe$U7} zpvqTXkM@rdz`zn1e`e%7_f--7@_ z&J2v~?REYLA`d`4HNfwm^VcAV0_7rOH>d;X5p#O+QtB(9SxMd>I_z zVuUSlG2%zTB6fUN*}&S$M(-ib?a#!U>l>c=0avJ#41N}@>c{Wu8{1if9R8`>5B>Q8 zmvNyU;9HdGXAa;Nze4~_gyA2NWWML%p#@1CauR!hOq~Fk{Mh`Hz3=J+J@^y*50yRF z!_VD-%AfwB5QsT^ov!93NL;pecRLh3~_63IeXX^wF|6O%wJH21Y|3O;-@&8dC zBF6t!s(hO)x8XVIa4dccbYw4JARcZH{#B}c&;N;I>Ft@34m`VZa!?XtJF`&Yc8<&U%(0FOifkI#R= z>%#vho;Bdr{uS;4;jgZHNa^%f0gUqVKjDCU_6~Mt`oHVA2k~(iX36IPR2I|lDfRwmqT?=68J@AqL#TxKi<(mlvtNJ57$6sQxa{j3!Oh>Z0P)%wf=Vv z+mmbBiv=8_HZd3&FvrI@E{dy-~-z6cd7C{Gx>?g-;=A~n$!n7^|FYYT@sk-nA!tT@BW5+;M&9V z_ka)gNdXVw=wS4qvG%|}^$hhMF3$gKwp?;PM851pfS8s(j(U{5ME@lV2?=A^7)E9uyf+ho7cHcgH^= z+5=DE?SXIy|K`UH8ge_efVU&_z&aQ2Z}uL>2k7K?aC-K?nv;rf&(N6zb2N7V=0^_> z4G#6AK>lXzkG~k{JeWC4vtKEXn~DxqF2EKZFxUEV@>libCr}q4*rA=Z&A+x5gkpE& z3_#rjuI5KwOo06z++XJRFZ`cW{9)d<$mIvTGX~b#fRP70^xvh*R|)=4SVPC(FO(l3 zRJHxNqJXK&3b4-paXE2|@IAx>&QBaXj0QK6iinQ@U2A}ghd@n#l`7vR^gpZrF{1uh zj{KWGL`lbKt^f$Rpr1Ru9^;=t|8|PCjgK(1il}A+_czVl^^Dvf36sG^lWS_|5dA7Y9Tp10l_Z; z909}l?^5NfF8+JKKjyYS(D((futMIe4+G@^_QxHM=aPRyKg@pq&Y1Xbs?{1sxXA+O z!UW)se_U(^{Tk~(#@bTP)yVEw%Z7(xGre8cRtoT@4~)DY_3K{#PyW8M{{OPmM$9ki zV+V{sYd|3g0l>dXmG8R3KY{*sBPMowzcSKR9=6D2fVS=d+6w&nyHxqcss9VwuVxtO z*sa7*05z)ty6i`&Q~lpT{l*zhyR&xE0G>DiFwf8b=IO!Kb6EWj3S{kM{hLOT$@u1M z1DtUHa6h_^e3#$B{nMvCNU>7ec)kMYd>mje>&IQf9{+zL{i1abApSZT!SO(!7ZLtE zA$S-1Pmn*hm;ajC0o$JrdOvCZ1LOuRUv~r0VG%EW?y$hP{{s25C?DVw9=q?F0sCDJ zz&wuVhyM4<`fqSQ8{8*1iM;w5o049?^&Zja*{teE~ z${yI1{Z%J&aw9w%1GJbFFmr$+|6QtlDd+zOh&m1r=`6lq7Cf}t1WP=~80bk=!k?`P z|JJ{O{&RTzzsAlsq^U59<16xm5sM1U`j8Yz^kSAqUwWZTsVpKb6N?yEH*30$P@5W2 z-7?EQ7@1jN;EJ%2=tUQq`w&TxMf5_1y-*NZmKC#zu=>*f-g)=zIXj!XGWPA~dCqyB z=bZc8yZ_Ph)x|AcKmUz0nc9(R;v)7X$x@*D%3fT9Tz3|?Nfa2n+pnZRl(uXYt#^z*0)$5B&W@A$S4*xT^; z!XQ-JD)%PEinWy5%3SzSk32oJX!r{f(`TSmu0bYDlIWSqls>Adzv8~1GhuezL}=wz z+maF;7TLfrgU$@BF#SiD@`l{E$Y53@BE`}2eX^r#K=WM|K5~9onI=PA?ux2tq^1>D zp7z3D@i?%RNo^d-<#sA%a6aRC&Dkgw^h?gBzNNBF+P#x4no)mVrcb#E&|iFH)Nllp zQ?c_8xDNiDm~aw{;TFOuySslGXO)$^Rmxl9^Pfi&ZAAqmmqKkTI7r0VX@1nznTkEO z2ssK9GWS`ey=SX>qCkD0tlIMU12CV zk#{o@>p$#oO2la1U^>e)(PNK5K?T8pUY+`|U@kxo3`{oXM=1l+-b7$1d6pByJ#g>< zQjKg*j*Fw9PC&zD2Grfy+OZVos4z!f@wvMhkj+_Wvswdf*J!v744PBk0+WgDe;%fz zae39AG6rRLS@8l#xx-#$vy`CsD9*a^QUrZOt<9#hb-O-S0-TL}D6hJ2RSXz!f7ldm zqx@0-?8`BX)r%cPc75}Hj)NO+#$%ZqYn6APG@^2mDFtw1pk4^%5j~kW?d4pKZ{K;Z z@*x8F6#<-ScnaTM)ltJxq#IJkDBAnoAbQn`S_Y?A4Ge~M?G~6ZbsWa%;Z3IkZ8j@7 z4fPBswDoKliu3^#sEpGr%IFG6L9~sV06jj#fdWn4D1~;L5tPyvhC+JJP#SG{B4~>* za5T~Yh2p3@zrgjjvN$2uPf=7QU7(twToE?Qg=RVhQITr_O1{c~49RPXp)$AvlYN83 z1VwQvh$^cJ(8~4^P`J=uw3>;^_X*(q&d9*<|G_LrDug7^%kM@;%d$wJT)gO=rP5%K)S2C9X literal 0 HcmV?d00001 diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/archived_changes.md b/PyTorch/contrib/cv/classification/convmixer/docs/archived_changes.md new file mode 100644 index 0000000000..f8d88fd78f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/archived_changes.md @@ -0,0 +1,205 @@ +# Archived Changes + +### Dec 18, 2020 +* Add ResNet-101D, ResNet-152D, and ResNet-200D weights trained @ 256x256 + * 256x256 val, 0.94 crop (top-1) - 101D (82.33), 152D (83.08), 200D (83.25) + * 288x288 val, 1.0 crop - 101D (82.64), 152D (83.48), 200D (83.76) + * 320x320 val, 1.0 crop - 101D (83.00), 152D (83.66), 200D (84.01) + +### Dec 7, 2020 +* Simplify EMA module (ModelEmaV2), compatible with fully torchscripted models +* Misc fixes for SiLU ONNX export, default_cfg missing from Feature extraction models, Linear layer w/ AMP + torchscript +* PyPi release @ 0.3.2 (needed by EfficientDet) + + +### Oct 30, 2020 +* Test with PyTorch 1.7 and fix a small top-n metric view vs reshape issue. +* Convert newly added 224x224 Vision Transformer weights from official JAX repo. 81.8 top-1 for B/16, 83.1 L/16. +* Support PyTorch 1.7 optimized, native SiLU (aka Swish) activation. Add mapping to 'silu' name, custom swish will eventually be deprecated. +* Fix regression for loading pretrained classifier via direct model entrypoint functions. Didn't impact create_model() factory usage. +* PyPi release @ 0.3.0 version! + +### Oct 26, 2020 +* Update Vision Transformer models to be compatible with official code release at https://github.com/google-research/vision_transformer +* Add Vision Transformer weights (ImageNet-21k pretrain) for 384x384 base and large models converted from official jax impl + * ViT-B/16 - 84.2 + * ViT-B/32 - 81.7 + * ViT-L/16 - 85.2 + * ViT-L/32 - 81.5 + +### Oct 21, 2020 +* Weights added for Vision Transformer (ViT) models. 77.86 top-1 for 'small' and 79.35 for 'base'. Thanks to [Christof](https://www.kaggle.com/christofhenkel) for training the base model w/ lots of GPUs. + +### Oct 13, 2020 +* Initial impl of Vision Transformer models. Both patch and hybrid (CNN backbone) variants. Currently trying to train... +* Adafactor and AdaHessian (FP32 only, no AMP) optimizers +* EdgeTPU-M (`efficientnet_em`) model trained in PyTorch, 79.3 top-1 +* Pip release, doc updates pending a few more changes... + +### Sept 18, 2020 +* New ResNet 'D' weights. 72.7 (top-1) ResNet-18-D, 77.1 ResNet-34-D, 80.5 ResNet-50-D +* Added a few untrained defs for other ResNet models (66D, 101D, 152D, 200/200D) + +### Sept 3, 2020 +* New weights + * Wide-ResNet50 - 81.5 top-1 (vs 78.5 torchvision) + * SEResNeXt50-32x4d - 81.3 top-1 (vs 79.1 cadene) +* Support for native Torch AMP and channels_last memory format added to train/validate scripts (`--channels-last`, `--native-amp` vs `--apex-amp`) +* Models tested with channels_last on latest NGC 20.08 container. AdaptiveAvgPool in attn layers changed to mean((2,3)) to work around bug with NHWC kernel. + +### Aug 12, 2020 +* New/updated weights from training experiments + * EfficientNet-B3 - 82.1 top-1 (vs 81.6 for official with AA and 81.9 for AdvProp) + * RegNetY-3.2GF - 82.0 top-1 (78.9 from official ver) + * CSPResNet50 - 79.6 top-1 (76.6 from official ver) +* Add CutMix integrated w/ Mixup. See [pull request](https://github.com/rwightman/pytorch-image-models/pull/218) for some usage examples +* Some fixes for using pretrained weights with `in_chans` != 3 on several models. + +### Aug 5, 2020 +Universal feature extraction, new models, new weights, new test sets. +* All models support the `features_only=True` argument for `create_model` call to return a network that extracts feature maps from the deepest layer at each stride. +* New models + * CSPResNet, CSPResNeXt, CSPDarkNet, DarkNet + * ReXNet + * (Modified Aligned) Xception41/65/71 (a proper port of TF models) +* New trained weights + * SEResNet50 - 80.3 top-1 + * CSPDarkNet53 - 80.1 top-1 + * CSPResNeXt50 - 80.0 top-1 + * DPN68b - 79.2 top-1 + * EfficientNet-Lite0 (non-TF ver) - 75.5 (submitted by [@hal-314](https://github.com/hal-314)) +* Add 'real' labels for ImageNet and ImageNet-Renditions test set, see [`results/README.md`](results/README.md) +* Test set ranking/top-n diff script by [@KushajveerSingh](https://github.com/KushajveerSingh) +* Train script and loader/transform tweaks to punch through more aug arguments +* README and documentation overhaul. See initial (WIP) documentation at https://rwightman.github.io/pytorch-image-models/ +* adamp and sgdp optimizers added by [@hellbell](https://github.com/hellbell) + +### June 11, 2020 +Bunch of changes: +* DenseNet models updated with memory efficient addition from torchvision (fixed a bug), blur pooling and deep stem additions +* VoVNet V1 and V2 models added, 39 V2 variant (ese_vovnet_39b) trained to 79.3 top-1 +* Activation factory added along with new activations: + * select act at model creation time for more flexibility in using activations compatible with scripting or tracing (ONNX export) + * hard_mish (experimental) added with memory-efficient grad, along with ME hard_swish + * context mgr for setting exportable/scriptable/no_jit states +* Norm + Activation combo layers added with initial trial support in DenseNet and VoVNet along with impl of EvoNorm and InplaceAbn wrapper that fit the interface +* Torchscript works for all but two of the model types as long as using Pytorch 1.5+, tests added for this +* Some import cleanup and classifier reset changes, all models will have classifier reset to nn.Identity on reset_classifer(0) call +* Prep for 0.1.28 pip release + +### May 12, 2020 +* Add ResNeSt models (code adapted from https://github.com/zhanghang1989/ResNeSt, paper https://arxiv.org/abs/2004.08955)) + +### May 3, 2020 +* Pruned EfficientNet B1, B2, and B3 (https://arxiv.org/abs/2002.08258) contributed by [Yonathan Aflalo](https://github.com/yoniaflalo) + +### May 1, 2020 +* Merged a number of execellent contributions in the ResNet model family over the past month + * BlurPool2D and resnetblur models initiated by [Chris Ha](https://github.com/VRandme), I trained resnetblur50 to 79.3. + * TResNet models and SpaceToDepth, AntiAliasDownsampleLayer layers by [mrT23](https://github.com/mrT23) + * ecaresnet (50d, 101d, light) models and two pruned variants using pruning as per (https://arxiv.org/abs/2002.08258) by [Yonathan Aflalo](https://github.com/yoniaflalo) +* 200 pretrained models in total now with updated results csv in results folder + +### April 5, 2020 +* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite + * 3.5M param MobileNet-V2 100 @ 73% + * 4.5M param MobileNet-V2 110d @ 75% + * 6.1M param MobileNet-V2 140 @ 76.5% + * 5.8M param MobileNet-V2 120d @ 77.3% + +### March 18, 2020 +* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) +* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) + +### April 5, 2020 +* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite + * 3.5M param MobileNet-V2 100 @ 73% + * 4.5M param MobileNet-V2 110d @ 75% + * 6.1M param MobileNet-V2 140 @ 76.5% + * 5.8M param MobileNet-V2 120d @ 77.3% + +### March 18, 2020 +* Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite) +* Add RandAugment trained ResNeXt-50 32x4d weights with 79.8 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) + +### Feb 29, 2020 +* New MobileNet-V3 Large weights trained from stratch with this code to 75.77% top-1 +* IMPORTANT CHANGE - default weight init changed for all MobilenetV3 / EfficientNet / related models + * overall results similar to a bit better training from scratch on a few smaller models tried + * performance early in training seems consistently improved but less difference by end + * set `fix_group_fanout=False` in `_init_weight_goog` fn if you need to reproducte past behaviour +* Experimental LR noise feature added applies a random perturbation to LR each epoch in specified range of training + +### Feb 18, 2020 +* Big refactor of model layers and addition of several attention mechanisms. Several additions motivated by 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268): + * Move layer/module impl into `layers` subfolder/module of `models` and organize in a more granular fashion + * ResNet downsample paths now properly support dilation (output stride != 32) for avg_pool ('D' variant) and 3x3 (SENets) networks + * Add Selective Kernel Nets on top of ResNet base, pretrained weights + * skresnet18 - 73% top-1 + * skresnet34 - 76.9% top-1 + * skresnext50_32x4d (equiv to SKNet50) - 80.2% top-1 + * ECA and CECA (circular padding) attention layer contributed by [Chris Ha](https://github.com/VRandme) + * CBAM attention experiment (not the best results so far, may remove) + * Attention factory to allow dynamically selecting one of SE, ECA, CBAM in the `.se` position for all ResNets + * Add DropBlock and DropPath (formerly DropConnect for EfficientNet/MobileNetv3) support to all ResNet variants +* Full dataset results updated that incl NoisyStudent weights and 2 of the 3 SK weights + +### Feb 12, 2020 +* Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet) + +### Feb 6, 2020 +* Add RandAugment trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin) (see Training section for hparams) + +### Feb 1/2, 2020 +* Port new EfficientNet-B8 (RandAugment) weights, these are different than the B8 AdvProp, different input normalization. +* Update results csv files on all models for ImageNet validation and three other test sets +* Push PyPi package update + +### Jan 31, 2020 +* Update ResNet50 weights with a new 79.038 result from further JSD / AugMix experiments. Full command line for reproduction in training section below. + +### Jan 11/12, 2020 +* Master may be a bit unstable wrt to training, these changes have been tested but not all combos +* Implementations of AugMix added to existing RA and AA. Including numerous supporting pieces like JSD loss (Jensen-Shannon divergence + CE), and AugMixDataset +* SplitBatchNorm adaptation layer added for implementing Auxiliary BN as per AdvProp paper +* ResNet-50 AugMix trained model w/ 79% top-1 added +* `seresnext26tn_32x4d` - 77.99 top-1, 93.75 top-5 added to tiered experiment, higher img/s than 't' and 'd' + +### Jan 3, 2020 +* Add RandAugment trained EfficientNet-B0 weight with 77.7 top-1. Trained by [Michael Klachko](https://github.com/michaelklachko) with this code and recent hparams (see Training section) +* Add `avg_checkpoints.py` script for post training weight averaging and update all scripts with header docstrings and shebangs. + +### Dec 30, 2019 +* Merge [Dushyant Mehta's](https://github.com/mehtadushy) PR for SelecSLS (Selective Short and Long Range Skip Connections) networks. Good GPU memory consumption and throughput. Original: https://github.com/mehtadushy/SelecSLS-Pytorch + +### Dec 28, 2019 +* Add new model weights and training hparams (see Training Hparams section) + * `efficientnet_b3` - 81.5 top-1, 95.7 top-5 at default res/crop, 81.9, 95.8 at 320x320 1.0 crop-pct + * trained with RandAugment, ended up with an interesting but less than perfect result (see training section) + * `seresnext26d_32x4d`- 77.6 top-1, 93.6 top-5 + * deep stem (32, 32, 64), avgpool downsample + * stem/dowsample from bag-of-tricks paper + * `seresnext26t_32x4d`- 78.0 top-1, 93.7 top-5 + * deep tiered stem (24, 48, 64), avgpool downsample (a modified 'D' variant) + * stem sizing mods from Jeremy Howard and fastai devs discussing ResNet architecture experiments + +### Dec 23, 2019 +* Add RandAugment trained MixNet-XL weights with 80.48 top-1. +* `--dist-bn` argument added to train.py, will distribute BN stats between nodes after each train epoch, before eval + +### Dec 4, 2019 +* Added weights from the first training from scratch of an EfficientNet (B2) with my new RandAugment implementation. Much better than my previous B2 and very close to the official AdvProp ones (80.4 top-1, 95.08 top-5). + +### Nov 29, 2019 +* Brought EfficientNet and MobileNetV3 up to date with my https://github.com/rwightman/gen-efficientnet-pytorch code. Torchscript and ONNX export compat excluded. + * AdvProp weights added + * Official TF MobileNetv3 weights added +* EfficientNet and MobileNetV3 hook based 'feature extraction' classes added. Will serve as basis for using models as backbones in obj detection/segmentation tasks. Lots more to be done here... +* HRNet classification models and weights added from https://github.com/HRNet/HRNet-Image-Classification +* Consistency in global pooling, `reset_classifer`, and `forward_features` across models + * `forward_features` always returns unpooled feature maps now +* Reasonable chance I broke something... let me know + +### Nov 22, 2019 +* Add ImageNet training RandAugment implementation alongside AutoAugment. PyTorch Transform compatible format, using PIL. Currently training two EfficientNet models from scratch with promising results... will update. +* `drop-connect` cmd line arg finally added to `train.py`, no need to hack model fns. Works for efficientnet/mobilenetv3 based models, ignored otherwise. \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/changes.md b/PyTorch/contrib/cv/classification/convmixer/docs/changes.md new file mode 100644 index 0000000000..6ff5075639 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/changes.md @@ -0,0 +1,130 @@ +# Recent Changes + +### June 8, 2021 +* Add first ResMLP weights, trained in PyTorch XLA on TPU-VM w/ my XLA branch. 24 block variant, 79.2 top-1. +* Add ResNet51-Q model w/ pretrained weights at 82.36 top-1. + * NFNet inspired block layout with quad layer stem and no maxpool + * Same param count (35.7M) and throughput as ResNetRS-50 but +1.5 top-1 @ 224x224 and +2.5 top-1 at 288x288 + +### May 25, 2021 +* Add LeViT, Visformer, Convit (PR by Aman Arora), Twins (PR by paper authors) transformer models +* Cleanup input_size/img_size override handling and testing for all vision transformer models +* Add `efficientnetv2_rw_m` model and weights (started training before official code). 84.8 top-1, 53M params. + +### May 14, 2021 +* Add EfficientNet-V2 official model defs w/ ported weights from official [Tensorflow/Keras](https://github.com/google/automl/tree/master/efficientnetv2) impl. + * 1k trained variants: `tf_efficientnetv2_s/m/l` + * 21k trained variants: `tf_efficientnetv2_s/m/l_in21k` + * 21k pretrained -> 1k fine-tuned: `tf_efficientnetv2_s/m/l_in21ft1k` + * v2 models w/ v1 scaling: `tf_efficientnetv2_b0` through `b3` + * Rename my prev V2 guess `efficientnet_v2s` -> `efficientnetv2_rw_s` + * Some blank `efficientnetv2_*` models in-place for future native PyTorch training + +### May 5, 2021 +* Add MLP-Mixer models and port pretrained weights from [Google JAX impl](https://github.com/google-research/vision_transformer/tree/linen) +* Add CaiT models and pretrained weights from [FB](https://github.com/facebookresearch/deit) +* Add ResNet-RS models and weights from [TF](https://github.com/tensorflow/tpu/tree/master/models/official/resnet/resnet_rs). Thanks [Aman Arora](https://github.com/amaarora) +* Add CoaT models and weights. Thanks [Mohammed Rizin](https://github.com/morizin) +* Add new ImageNet-21k weights & finetuned weights for TResNet, MobileNet-V3, ViT models. Thanks [mrT](https://github.com/mrT23) +* Add GhostNet models and weights. Thanks [Kai Han](https://github.com/iamhankai) +* Update ByoaNet attention modles + * Improve SA module inits + * Hack together experimental stand-alone Swin based attn module and `swinnet` + * Consistent '26t' model defs for experiments. +* Add improved Efficientnet-V2S (prelim model def) weights. 83.8 top-1. +* WandB logging support + +### April 13, 2021 +* Add Swin Transformer models and weights from https://github.com/microsoft/Swin-Transformer + +### April 12, 2021 +* Add ECA-NFNet-L1 (slimmed down F1 w/ SiLU, 41M params) trained with this code. 84% top-1 @ 320x320. Trained at 256x256. +* Add EfficientNet-V2S model (unverified model definition) weights. 83.3 top-1 @ 288x288. Only trained single res 224. Working on progressive training. +* Add ByoaNet model definition (Bring-your-own-attention) w/ SelfAttention block and corresponding SA/SA-like modules and model defs + * Lambda Networks - https://arxiv.org/abs/2102.08602 + * Bottleneck Transformers - https://arxiv.org/abs/2101.11605 + * Halo Nets - https://arxiv.org/abs/2103.12731 +* Adabelief optimizer contributed by Juntang Zhuang + +### April 1, 2021 +* Add snazzy `benchmark.py` script for bulk `timm` model benchmarking of train and/or inference +* Add Pooling-based Vision Transformer (PiT) models (from https://github.com/naver-ai/pit) + * Merged distilled variant into main for torchscript compatibility + * Some `timm` cleanup/style tweaks and weights have hub download support +* Cleanup Vision Transformer (ViT) models + * Merge distilled (DeiT) model into main so that torchscript can work + * Support updated weight init (defaults to old still) that closer matches original JAX impl (possibly better training from scratch) + * Separate hybrid model defs into different file and add several new model defs to fiddle with, support patch_size != 1 for hybrids + * Fix fine-tuning num_class changes (PiT and ViT) and pos_embed resizing (Vit) with distilled variants + * nn.Sequential for block stack (does not break downstream compat) +* TnT (Transformer-in-Transformer) models contributed by author (from https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT) +* Add RegNetY-160 weights from DeiT teacher model +* Add new NFNet-L0 w/ SE attn (rename `nfnet_l0b`->`nfnet_l0`) weights 82.75 top-1 @ 288x288 +* Some fixes/improvements for TFDS dataset wrapper + +### March 7, 2021 +* First 0.4.x PyPi release w/ NFNets (& related), ByoB (GPU-Efficient, RepVGG, etc). +* Change feature extraction for pre-activation nets (NFNets, ResNetV2) to return features before activation. + +### Feb 18, 2021 +* Add pretrained weights and model variants for NFNet-F* models from [DeepMind Haiku impl](https://github.com/deepmind/deepmind-research/tree/master/nfnets). + * Models are prefixed with `dm_`. They require SAME padding conv, skipinit enabled, and activation gains applied in act fn. + * These models are big, expect to run out of GPU memory. With the GELU activiation + other options, they are roughly 1/2 the inference speed of my SiLU PyTorch optimized `s` variants. + * Original model results are based on pre-processing that is not the same as all other models so you'll see different results in the results csv (once updated). + * Matching the original pre-processing as closely as possible I get these results: + * `dm_nfnet_f6` - 86.352 + * `dm_nfnet_f5` - 86.100 + * `dm_nfnet_f4` - 85.834 + * `dm_nfnet_f3` - 85.676 + * `dm_nfnet_f2` - 85.178 + * `dm_nfnet_f1` - 84.696 + * `dm_nfnet_f0` - 83.464 + +### Feb 16, 2021 +* Add Adaptive Gradient Clipping (AGC) as per https://arxiv.org/abs/2102.06171. Integrated w/ PyTorch gradient clipping via mode arg that defaults to prev 'norm' mode. For backward arg compat, clip-grad arg must be specified to enable when using train.py. + * AGC w/ default clipping factor `--clip-grad .01 --clip-mode agc` + * PyTorch global norm of 1.0 (old behaviour, always norm), `--clip-grad 1.0` + * PyTorch value clipping of 10, `--clip-grad 10. --clip-mode value` + * AGC performance is definitely sensitive to the clipping factor. More experimentation needed to determine good values for smaller batch sizes and optimizers besides those in paper. So far I've found .001-.005 is necessary for stable RMSProp training w/ NFNet/NF-ResNet. + +### Feb 12, 2021 +* Update Normalization-Free nets to include new NFNet-F (https://arxiv.org/abs/2102.06171) model defs + +### Feb 10, 2021 +* More model archs, incl a flexible ByobNet backbone ('Bring-your-own-blocks') + * GPU-Efficient-Networks (https://github.com/idstcv/GPU-Efficient-Networks), impl in `byobnet.py` + * RepVGG (https://github.com/DingXiaoH/RepVGG), impl in `byobnet.py` + * classic VGG (from torchvision, impl in `vgg`) +* Refinements to normalizer layer arg handling and normalizer+act layer handling in some models +* Default AMP mode changed to native PyTorch AMP instead of APEX. Issues not being fixed with APEX. Native works with `--channels-last` and `--torchscript` model training, APEX does not. +* Fix a few bugs introduced since last pypi release + +### Feb 8, 2021 +* Add several ResNet weights with ECA attention. 26t & 50t trained @ 256, test @ 320. 269d train @ 256, fine-tune @320, test @ 352. + * `ecaresnet26t` - 79.88 top-1 @ 320x320, 79.08 @ 256x256 + * `ecaresnet50t` - 82.35 top-1 @ 320x320, 81.52 @ 256x256 + * `ecaresnet269d` - 84.93 top-1 @ 352x352, 84.87 @ 320x320 +* Remove separate tiered (`t`) vs tiered_narrow (`tn`) ResNet model defs, all `tn` changed to `t` and `t` models removed (`seresnext26t_32x4d` only model w/ weights that was removed). +* Support model default_cfgs with separate train vs test resolution `test_input_size` and remove extra `_320` suffix ResNet model defs that were just for test. + +### Jan 30, 2021 +* Add initial "Normalization Free" NF-RegNet-B* and NF-ResNet model definitions based on [paper](https://arxiv.org/abs/2101.08692) + +### Jan 25, 2021 +* Add ResNetV2 Big Transfer (BiT) models w/ ImageNet-1k and 21k weights from https://github.com/google-research/big_transfer +* Add official R50+ViT-B/16 hybrid models + weights from https://github.com/google-research/vision_transformer +* ImageNet-21k ViT weights are added w/ model defs and representation layer (pre logits) support + * NOTE: ImageNet-21k classifier heads were zero'd in original weights, they are only useful for transfer learning +* Add model defs and weights for DeiT Vision Transformer models from https://github.com/facebookresearch/deit +* Refactor dataset classes into ImageDataset/IterableImageDataset + dataset specific parser classes +* Add Tensorflow-Datasets (TFDS) wrapper to allow use of TFDS image classification sets with train script + * Ex: `train.py /data/tfds --dataset tfds/oxford_iiit_pet --val-split test --model resnet50 -b 256 --amp --num-classes 37 --opt adamw --lr 3e-4 --weight-decay .001 --pretrained -j 2` +* Add improved .tar dataset parser that reads images from .tar, folder of .tar files, or .tar within .tar + * Run validation on full ImageNet-21k directly from tar w/ BiT model: `validate.py /data/fall11_whole.tar --model resnetv2_50x1_bitm_in21k --amp` +* Models in this update should be stable w/ possible exception of ViT/BiT, possibility of some regressions with train/val scripts and dataset handling + +### Jan 3, 2021 +* Add SE-ResNet-152D weights + * 256x256 val, 0.94 crop top-1 - 83.75 + * 320x320 val, 1.0 crop - 84.36 +* Update results files diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/feature_extraction.md b/PyTorch/contrib/cv/classification/convmixer/docs/feature_extraction.md new file mode 100644 index 0000000000..b41c15597b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/feature_extraction.md @@ -0,0 +1,173 @@ +# Feature Extraction + +All of the models in `timm` have consistent mechanisms for obtaining various types of features from the model for tasks besides classification. + +## Penultimate Layer Features (Pre-Classifier Features) + +The features from the penultimate model layer can be obtained in several ways without requiring model surgery (although feel free to do surgery). One must first decide if they want pooled or un-pooled features. + +### Unpooled + +There are three ways to obtain unpooled features. + +Without modifying the network, one can call `model.forward_features(input)` on any model instead of the usual `model(input)`. This will bypass the head classifier and global pooling for networks. + +If one wants to explicitly modify the network to return unpooled features, they can either create the model without a classifier and pooling, or remove it later. Both paths remove the parameters associated with the classifier from the network. + +#### forward_features() +```python hl_lines="3 6" +import torch +import timm +m = timm.create_model('xception41', pretrained=True) +o = m(torch.randn(2, 3, 299, 299)) +print(f'Original shape: {o.shape}') +o = m.forward_features(torch.randn(2, 3, 299, 299)) +print(f'Unpooled shape: {o.shape}') +``` +Output: +```text +Original shape: torch.Size([2, 1000]) +Unpooled shape: torch.Size([2, 2048, 10, 10]) +``` + +#### Create with no classifier and pooling +```python hl_lines="3" +import torch +import timm +m = timm.create_model('resnet50', pretrained=True, num_classes=0, global_pool='') +o = m(torch.randn(2, 3, 224, 224)) +print(f'Unpooled shape: {o.shape}') +``` +Output: +```text +Unpooled shape: torch.Size([2, 2048, 7, 7]) +``` + +#### Remove it later +```python hl_lines="3 6" +import torch +import timm +m = timm.create_model('densenet121', pretrained=True) +o = m(torch.randn(2, 3, 224, 224)) +print(f'Original shape: {o.shape}') +m.reset_classifier(0, '') +o = m(torch.randn(2, 3, 224, 224)) +print(f'Unpooled shape: {o.shape}') +``` +Output: +```text +Original shape: torch.Size([2, 1000]) +Unpooled shape: torch.Size([2, 1024, 7, 7]) +``` + +### Pooled + +To modify the network to return pooled features, one can use `forward_features()` and pool/flatten the result themselves, or modify the network like above but keep pooling intact. + +#### Create with no classifier +```python hl_lines="3" +import torch +import timm +m = timm.create_model('resnet50', pretrained=True, num_classes=0) +o = m(torch.randn(2, 3, 224, 224)) +print(f'Pooled shape: {o.shape}') +``` +Output: +```text +Pooled shape: torch.Size([2, 2048]) +``` + +#### Remove it later +```python hl_lines="3 6" +import torch +import timm +m = timm.create_model('ese_vovnet19b_dw', pretrained=True) +o = m(torch.randn(2, 3, 224, 224)) +print(f'Original shape: {o.shape}') +m.reset_classifier(0) +o = m(torch.randn(2, 3, 224, 224)) +print(f'Pooled shape: {o.shape}') +``` +Output: +```text +Pooled shape: torch.Size([2, 1024]) +``` + + +## Multi-scale Feature Maps (Feature Pyramid) + +Object detection, segmentation, keypoint, and a variety of dense pixel tasks require access to feature maps from the backbone network at multiple scales. This is often done by modifying the original classification network. Since each network varies quite a bit in structure, it's not uncommon to see only a few backbones supported in any given obj detection or segmentation library. + +`timm` allows a consistent interface for creating any of the included models as feature backbones that output feature maps for selected levels. + +A feature backbone can be created by adding the argument `features_only=True` to any `create_model` call. By default 5 strides will be output from most models (not all have that many), with the first starting at 2 (some start at 1 or 4). + +### Create a feature map extraction model +```python hl_lines="3" +import torch +import timm +m = timm.create_model('resnest26d', features_only=True, pretrained=True) +o = m(torch.randn(2, 3, 224, 224)) +for x in o: + print(x.shape) +``` +Output: +```text +torch.Size([2, 64, 112, 112]) +torch.Size([2, 256, 56, 56]) +torch.Size([2, 512, 28, 28]) +torch.Size([2, 1024, 14, 14]) +torch.Size([2, 2048, 7, 7]) +``` + +### Query the feature information + +After a feature backbone has been created, it can be queried to provide channel or resolution reduction information to the downstream heads without requiring static config or hardcoded constants. The `.feature_info` attribute is a class encapsulating the information about the feature extraction points. + +```python hl_lines="3 4" +import torch +import timm +m = timm.create_model('regnety_032', features_only=True, pretrained=True) +print(f'Feature channels: {m.feature_info.channels()}') +o = m(torch.randn(2, 3, 224, 224)) +for x in o: + print(x.shape) +``` +Output: +```text +Feature channels: [32, 72, 216, 576, 1512] +torch.Size([2, 32, 112, 112]) +torch.Size([2, 72, 56, 56]) +torch.Size([2, 216, 28, 28]) +torch.Size([2, 576, 14, 14]) +torch.Size([2, 1512, 7, 7]) +``` + +### Select specific feature levels or limit the stride + +There are to additional creation arguments impacting the output features. + +* `out_indices` selects which indices to output +* `output_stride` limits the feature output stride of the network (also works in classification mode BTW) + +`out_indices` is supported by all models, but not all models have the same index to feature stride mapping. Look at the code or check feature_info to compare. The out indices generally correspond to the `C(i+1)th` feature level (a `2^(i+1)` reduction). For most models, index 0 is the stride 2 features, and index 4 is stride 32. + +`output_stride` is achieved by converting layers to use dilated convolutions. Doing so is not always straightforward, some networks only support `output_stride=32`. + +```python hl_lines="3 4 5" +import torch +import timm +m = timm.create_model('ecaresnet101d', features_only=True, output_stride=8, out_indices=(2, 4), pretrained=True) +print(f'Feature channels: {m.feature_info.channels()}') +print(f'Feature reduction: {m.feature_info.reduction()}') +o = m(torch.randn(2, 3, 320, 320)) +for x in o: + print(x.shape) +``` +Output: +```text +Feature channels: [512, 2048] +Feature reduction: [8, 8] +torch.Size([2, 512, 40, 40]) +torch.Size([2, 2048, 40, 40]) +``` diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/index.md b/PyTorch/contrib/cv/classification/convmixer/docs/index.md new file mode 100644 index 0000000000..95f7df642a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/index.md @@ -0,0 +1,80 @@ +# Getting Started + +## Welcome + +Welcome to the `timm` documentation, a lean set of docs that covers the basics of `timm`. + +For a more comprehensive set of docs (currently under development), please visit [timmdocs](https://fastai.github.io/timmdocs/) by [Aman Arora](https://github.com/amaarora). + +## Install + +The library can be installed with pip: + +``` +pip install timm +``` + +I update the PyPi (pip) packages when I'm confident there are no significant model regressions from previous releases. If you want to pip install the bleeding edge from GitHub, use: +``` +pip install git+https://github.com/rwightman/pytorch-image-models.git +``` + +!!! info "Conda Environment" + All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically Python 3.6.x, 3.7.x., 3.8.x., 3.9 + + Little to no care has been taken to be Python 2.x friendly and will not support it. If you run into any challenges running on Windows, or other OS, I'm definitely open to looking into those issues so long as it's in a reproducible (read Conda) environment. + + PyTorch versions 1.4, 1.5.x, 1.6, 1.7.x, and 1.8 have been tested with this code. + + I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda: + ``` + conda create -n torch-env + conda activate torch-env + conda install pytorch torchvision cudatoolkit=11.1 -c pytorch -c conda-forge + conda install pyyaml + ``` + +## Load a Pretrained Model + +Pretrained models can be loaded using `timm.create_model` + +```python +import timm + +m = timm.create_model('mobilenetv3_large_100', pretrained=True) +m.eval() +``` + +## List Models with Pretrained Weights +```python +import timm +from pprint import pprint +model_names = timm.list_models(pretrained=True) +pprint(model_names) +>>> ['adv_inception_v3', + 'cspdarknet53', + 'cspresnext50', + 'densenet121', + 'densenet161', + 'densenet169', + 'densenet201', + 'densenetblur121d', + 'dla34', + 'dla46_c', +... +] +``` + +## List Model Architectures by Wildcard +```python +import timm +from pprint import pprint +model_names = timm.list_models('*resne*t*') +pprint(model_names) +>>> ['cspresnet50', + 'cspresnet50d', + 'cspresnet50w', + 'cspresnext50', +... +] +``` diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/javascripts/tables.js b/PyTorch/contrib/cv/classification/convmixer/docs/javascripts/tables.js new file mode 100644 index 0000000000..5f21b4d22c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/javascripts/tables.js @@ -0,0 +1,6 @@ +app.location$.subscribe(function() { + var tables = document.querySelectorAll("article table") + tables.forEach(function(table) { + new Tablesort(table) + }) +}) \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models.md b/PyTorch/contrib/cv/classification/convmixer/docs/models.md new file mode 100644 index 0000000000..fd43805ea9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models.md @@ -0,0 +1,171 @@ +# Model Summaries + +The model architectures included come from a wide variety of sources. Sources, including papers, original impl ("reference code") that I rewrote / adapted, and PyTorch impl that I leveraged directly ("code") are listed below. + +Most included models have pretrained weights. The weights are either: + +1. from their original sources +2. ported by myself from their original impl in a different framework (e.g. Tensorflow models) +3. trained from scratch using the included training script + +The validation results for the pretrained weights are [here](results.md) + +A more exciting view (with pretty pictures) of the models within `timm` can be found at [paperswithcode](https://paperswithcode.com/lib/timm). + +## Big Transfer ResNetV2 (BiT) [[resnetv2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnetv2.py)] +* Paper: `Big Transfer (BiT): General Visual Representation Learning` - https://arxiv.org/abs/1912.11370 +* Reference code: https://github.com/google-research/big_transfer + +## Cross-Stage Partial Networks [[cspnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cspnet.py)] +* Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 +* Reference impl: https://github.com/WongKinYiu/CrossStagePartialNetworks + +## DenseNet [[densenet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/densenet.py)] +* Paper: `Densely Connected Convolutional Networks` - https://arxiv.org/abs/1608.06993 +* Code: https://github.com/pytorch/vision/tree/master/torchvision/models + +## DLA [[dla.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dla.py)] +* Paper: https://arxiv.org/abs/1707.06484 +* Code: https://github.com/ucbdrive/dla + +## Dual-Path Networks [[dpn.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/dpn.py)] +* Paper: `Dual Path Networks` - https://arxiv.org/abs/1707.01629 +* My PyTorch code: https://github.com/rwightman/pytorch-dpn-pretrained +* Reference code: https://github.com/cypw/DPNs + +## GPU-Efficient Networks [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)] +* Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 +* Reference code: https://github.com/idstcv/GPU-Efficient-Networks + +## HRNet [[hrnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/hrnet.py)] +* Paper: `Deep High-Resolution Representation Learning for Visual Recognition` - https://arxiv.org/abs/1908.07919 +* Code: https://github.com/HRNet/HRNet-Image-Classification + +## Inception-V3 [[inception_v3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v3.py)] +* Paper: `Rethinking the Inception Architecture for Computer Vision` - https://arxiv.org/abs/1512.00567 +* Code: https://github.com/pytorch/vision/tree/master/torchvision/models + +## Inception-V4 [[inception_v4.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_v4.py)] +* Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 +* Code: https://github.com/Cadene/pretrained-models.pytorch +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets + +## Inception-ResNet-V2 [[inception_resnet_v2.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/inception_resnet_v2.py)] +* Paper: `Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning` - https://arxiv.org/abs/1602.07261 +* Code: https://github.com/Cadene/pretrained-models.pytorch +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets + +## NASNet-A [[nasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/nasnet.py)] +* Papers: `Learning Transferable Architectures for Scalable Image Recognition` - https://arxiv.org/abs/1707.07012 +* Code: https://github.com/Cadene/pretrained-models.pytorch +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet + +## PNasNet-5 [[pnasnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/pnasnet.py)] +* Papers: `Progressive Neural Architecture Search` - https://arxiv.org/abs/1712.00559 +* Code: https://github.com/Cadene/pretrained-models.pytorch +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/nasnet + +## EfficientNet [[efficientnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/efficientnet.py)] + +* Papers: + * EfficientNet NoisyStudent (B0-B7, L2) - https://arxiv.org/abs/1911.04252 + * EfficientNet AdvProp (B0-B8) - https://arxiv.org/abs/1911.09665 + * EfficientNet (B0-B7) - https://arxiv.org/abs/1905.11946 + * EfficientNet-EdgeTPU (S, M, L) - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + * MixNet - https://arxiv.org/abs/1907.09595 + * MNASNet B1, A1 (Squeeze-Excite), and Small - https://arxiv.org/abs/1807.11626 + * MobileNet-V2 - https://arxiv.org/abs/1801.04381 + * FBNet-C - https://arxiv.org/abs/1812.03443 + * Single-Path NAS - https://arxiv.org/abs/1904.02877 +* My PyTorch code: https://github.com/rwightman/gen-efficientnet-pytorch +* Reference code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + +## MobileNet-V3 [[mobilenetv3.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mobilenetv3.py)] +* Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 +* Reference code: https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet + +## RegNet [[regnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/regnet.py)] +* Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 +* Reference code: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + +## RepVGG [[byobnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/byobnet.py)] +* Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 +* Reference code: https://github.com/DingXiaoH/RepVGG + +## ResNet, ResNeXt [[resnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py)] + +* ResNet (V1B) + * Paper: `Deep Residual Learning for Image Recognition` - https://arxiv.org/abs/1512.03385 + * Code: https://github.com/pytorch/vision/tree/master/torchvision/models +* ResNeXt + * Paper: `Aggregated Residual Transformations for Deep Neural Networks` - https://arxiv.org/abs/1611.05431 + * Code: https://github.com/pytorch/vision/tree/master/torchvision/models +* 'Bag of Tricks' / Gluon C, D, E, S ResNet variants + * Paper: `Bag of Tricks for Image Classification with CNNs` - https://arxiv.org/abs/1812.01187 + * Code: https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnetv1b.py +* Instagram pretrained / ImageNet tuned ResNeXt101 + * Paper: `Exploring the Limits of Weakly Supervised Pretraining` - https://arxiv.org/abs/1805.00932 + * Weights: https://pytorch.org/hub/facebookresearch_WSL-Images_resnext (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) +* Semi-supervised (SSL) / Semi-weakly Supervised (SWSL) ResNet and ResNeXts + * Paper: `Billion-scale semi-supervised learning for image classification` - https://arxiv.org/abs/1905.00546 + * Weights: https://github.com/facebookresearch/semi-supervised-ImageNet1K-models (NOTE: CC BY-NC 4.0 License, NOT commercial friendly) +* Squeeze-and-Excitation Networks + * Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + * Code: Added to ResNet base, this is current version going forward, old `senet.py` is being deprecated +* ECAResNet (ECA-Net) + * Paper: `ECA-Net: Efficient Channel Attention for Deep CNN` - https://arxiv.org/abs/1910.03151v4 + * Code: Added to ResNet base, ECA module contributed by @VRandme, reference https://github.com/BangguWu/ECANet + +## Res2Net [[res2net.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/res2net.py)] +* Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +* Code: https://github.com/gasvn/Res2Net + +## ResNeSt [[resnest.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnest.py)] +* Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 +* Code: https://github.com/zhanghang1989/ResNeSt + +## ReXNet [[rexnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/rexnet.py)] +* Paper: `ReXNet: Diminishing Representational Bottleneck on CNN` - https://arxiv.org/abs/2007.00992 +* Code: https://github.com/clovaai/rexnet + +## Selective-Kernel Networks [[sknet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/sknet.py)] +* Paper: `Selective-Kernel Networks` - https://arxiv.org/abs/1903.06586 +* Code: https://github.com/implus/SKNet, https://github.com/clovaai/assembled-cnn + +## SelecSLS [[selecsls.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/selecsls.py)] +* Paper: `XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera` - https://arxiv.org/abs/1907.00837 +* Code: https://github.com/mehtadushy/SelecSLS-Pytorch + +## Squeeze-and-Excitation Networks [[senet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/senet.py)] +NOTE: I am deprecating this version of the networks, the new ones are part of `resnet.py` + +* Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 +* Code: https://github.com/Cadene/pretrained-models.pytorch + +## TResNet [[tresnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tresnet.py)] +* Paper: `TResNet: High Performance GPU-Dedicated Architecture` - https://arxiv.org/abs/2003.13630 +* Code: https://github.com/mrT23/TResNet + +## VGG [[vgg.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vgg.py)] +* Paper: `Very Deep Convolutional Networks For Large-Scale Image Recognition` - https://arxiv.org/pdf/1409.1556.pdf +* Reference code: https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py + +## Vision Transformer [[vision_transformer.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py)] +* Paper: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 +* Reference code and pretrained weights: https://github.com/google-research/vision_transformer + +## VovNet V2 and V1 [[vovnet.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vovnet.py)] +* Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 +* Reference code: https://github.com/youngwanLEE/vovnet-detectron2 + +## Xception [[xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/xception.py)] +* Paper: `Xception: Deep Learning with Depthwise Separable Convolutions` - https://arxiv.org/abs/1610.02357 +* Code: https://github.com/Cadene/pretrained-models.pytorch + +## Xception (Modified Aligned, Gluon) [[gluon_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/gluon_xception.py)] +* Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 +* Reference code: https://github.com/dmlc/gluon-cv/tree/master/gluoncv/model_zoo, https://github.com/jfzhang95/pytorch-deeplab-xception/ + +## Xception (Modified Aligned, TF) [[aligned_xception.py](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/aligned_xception.py)] +* Paper: `Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation` - https://arxiv.org/abs/1802.02611 +* Reference code: https://github.com/tensorflow/models/tree/master/research/deeplab diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.pages b/PyTorch/contrib/cv/classification/convmixer/docs/models/.pages new file mode 100644 index 0000000000..c3c88fc4d9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.pages @@ -0,0 +1 @@ +title: Model Pages \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/code_snippets.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/code_snippets.md new file mode 100644 index 0000000000..1dbea838ff --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/code_snippets.md @@ -0,0 +1,62 @@ +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('{{ model_name }}', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `{{ model_name }}`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('{{ model_name }}', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/generate_readmes.py b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/generate_readmes.py new file mode 100644 index 0000000000..0cdefd0d55 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/generate_readmes.py @@ -0,0 +1,64 @@ +""" +Run this script to generate the model-index files in `models` from the templates in `.templates/models`. +""" + +import argparse +from pathlib import Path + +from jinja2 import Environment, FileSystemLoader + +import modelindex + + +def generate_readmes(templates_path: Path, dest_path: Path): + """Add the code snippet template to the readmes""" + readme_templates_path = templates_path / "models" + code_template_path = templates_path / "code_snippets.md" + + env = Environment( + loader=FileSystemLoader([readme_templates_path, readme_templates_path.parent]), + ) + + for readme in readme_templates_path.iterdir(): + if readme.suffix == ".md": + template = env.get_template(readme.name) + + # get the first model_name for this model family + mi = modelindex.load(str(readme)) + model_name = mi.models[0].name + + full_content = template.render(model_name=model_name) + + # generate full_readme + with open(dest_path / readme.name, "w") as f: + f.write(full_content) + + +def main(): + parser = argparse.ArgumentParser(description="Model index generation config") + parser.add_argument( + "-t", + "--templates", + default=Path(__file__).parent / ".templates", + type=str, + help="Location of the markdown templates", + ) + parser.add_argument( + "-d", + "--dest", + default=Path(__file__).parent / "models", + type=str, + help="Destination folder that contains the generated model-index files.", + ) + args = parser.parse_args() + templates_path = Path(args.templates) + dest_readmes_path = Path(args.dest) + + generate_readmes( + templates_path, + dest_readmes_path, + ) + + +if __name__ == "__main__": + main() diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/adversarial-inception-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/adversarial-inception-v3.md new file mode 100644 index 0000000000..b4aa036121 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/adversarial-inception-v3.md @@ -0,0 +1,98 @@ +# Adversarial Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +This particular model was trained for study of adversarial examples (adversarial training). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1804-00097, + author = {Alexey Kurakin and + Ian J. Goodfellow and + Samy Bengio and + Yinpeng Dong and + Fangzhou Liao and + Ming Liang and + Tianyu Pang and + Jun Zhu and + Xiaolin Hu and + Cihang Xie and + Jianyu Wang and + Zhishuai Zhang and + Zhou Ren and + Alan L. Yuille and + Sangxia Huang and + Yao Zhao and + Yuzhe Zhao and + Zhonglin Han and + Junjiajia Long and + Yerkebulan Berdibekov and + Takuya Akiba and + Seiya Tokui and + Motoki Abe}, + title = {Adversarial Attacks and Defences Competition}, + journal = {CoRR}, + volume = {abs/1804.00097}, + year = {2018}, + url = {http://arxiv.org/abs/1804.00097}, + archivePrefix = {arXiv}, + eprint = {1804.00097}, + timestamp = {Thu, 31 Oct 2019 16:31:22 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/advprop.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/advprop.md new file mode 100644 index 0000000000..c204d871af --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/advprop.md @@ -0,0 +1,457 @@ +# AdvProp (EfficientNet) + +**AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples. + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{xie2020adversarial, + title={Adversarial Examples Improve Image Recognition}, + author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le}, + year={2020}, + eprint={1911.09665}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/big-transfer.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/big-transfer.md new file mode 100644 index 0000000000..b593b41aae --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/big-transfer.md @@ -0,0 +1,295 @@ +# Big Transfer (BiT) + +**Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{kolesnikov2020big, + title={Big Transfer (BiT): General Visual Representation Learning}, + author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, + year={2020}, + eprint={1912.11370}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-darknet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-darknet.md new file mode 100644 index 0000000000..b6ab42d127 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-darknet.md @@ -0,0 +1,81 @@ +# CSP-DarkNet + +**CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. + +This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{bochkovskiy2020yolov4, + title={YOLOv4: Optimal Speed and Accuracy of Object Detection}, + author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao}, + year={2020}, + eprint={2004.10934}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-resnet.md new file mode 100644 index 0000000000..228faa0c20 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-resnet.md @@ -0,0 +1,76 @@ +# CSP-ResNet + +**CSPResNet** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNet](https://paperswithcode.com/method/resnet). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{wang2019cspnet, + title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, + author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, + year={2019}, + eprint={1911.11929}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-resnext.md new file mode 100644 index 0000000000..cea88183df --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/csp-resnext.md @@ -0,0 +1,77 @@ +# CSP-ResNeXt + +**CSPResNeXt** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNeXt](https://paperswithcode.com/method/resnext). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{wang2019cspnet, + title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, + author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, + year={2019}, + eprint={1911.11929}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/densenet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/densenet.md new file mode 100644 index 0000000000..9db9f32407 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/densenet.md @@ -0,0 +1,305 @@ +# DenseNet + +**DenseNet** is a type of convolutional neural network that utilises dense connections between layers, through [Dense Blocks](http://www.paperswithcode.com/method/dense-block), where we connect *all layers* (with matching feature-map sizes) directly with each other. To preserve the feed-forward nature, each layer obtains additional inputs from all preceding layers and passes on its own feature-maps to all subsequent layers. + +The **DenseNet Blur** variant in this collection by Ross Wightman employs [Blur Pooling](http://www.paperswithcode.com/method/blur-pooling) + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/HuangLW16a, + author = {Gao Huang and + Zhuang Liu and + Kilian Q. Weinberger}, + title = {Densely Connected Convolutional Networks}, + journal = {CoRR}, + volume = {abs/1608.06993}, + year = {2016}, + url = {http://arxiv.org/abs/1608.06993}, + archivePrefix = {arXiv}, + eprint = {1608.06993}, + timestamp = {Mon, 10 Sep 2018 15:49:32 +0200}, + biburl = {https://dblp.org/rec/journals/corr/HuangLW16a.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + +``` +@misc{rw2019timm, + author = {Ross Wightman}, + title = {PyTorch Image Models}, + year = {2019}, + publisher = {GitHub}, + journal = {GitHub repository}, + doi = {10.5281/zenodo.4414861}, + howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/dla.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/dla.md new file mode 100644 index 0000000000..7f860a5663 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/dla.md @@ -0,0 +1,545 @@ +# Deep Layer Aggregation + +Extending “shallow” skip connections, **Dense Layer Aggregation (DLA)** incorporates more depth and sharing. The authors introduce two structures for deep layer aggregation (DLA): iterative deep aggregation (IDA) and hierarchical deep aggregation (HDA). These structures are expressed through an architectural framework, independent of the choice of backbone, for compatibility with current and future networks. + +IDA focuses on fusing resolutions and scales while HDA focuses on merging features from all modules and channels. IDA follows the base hierarchy to refine resolution and aggregate scale stage-bystage. HDA assembles its own hierarchy of tree-structured connections that cross and merge stages to aggregate different levels of representation. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{yu2019deep, + title={Deep Layer Aggregation}, + author={Fisher Yu and Dequan Wang and Evan Shelhamer and Trevor Darrell}, + year={2019}, + eprint={1707.06484}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/dpn.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/dpn.md new file mode 100644 index 0000000000..460ee78ff5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/dpn.md @@ -0,0 +1,256 @@ +# Dual Path Network (DPN) + +A **Dual Path Network (DPN)** is a convolutional neural network which presents a new topology of connection paths internally. The intuition is that [ResNets](https://paperswithcode.com/method/resnet) enables feature re-usage while DenseNet enables new feature exploration, and both are important for learning good representations. To enjoy the benefits from both path topologies, Dual Path Networks share common features while maintaining the flexibility to explore new features through dual path architectures. + +The principal building block is an [DPN Block](https://paperswithcode.com/method/dpn-block). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{chen2017dual, + title={Dual Path Networks}, + author={Yunpeng Chen and Jianan Li and Huaxin Xiao and Xiaojie Jin and Shuicheng Yan and Jiashi Feng}, + year={2017}, + eprint={1707.01629}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ecaresnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ecaresnet.md new file mode 100644 index 0000000000..126aaaccc3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ecaresnet.md @@ -0,0 +1,236 @@ +# ECA-ResNet + +An **ECA ResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that utilises an [Efficient Channel Attention module](https://paperswithcode.com/method/efficient-channel-attention). Efficient Channel Attention is an architectural unit based on [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) that reduces model complexity without dimensionality reduction. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{wang2020ecanet, + title={ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks}, + author={Qilong Wang and Banggu Wu and Pengfei Zhu and Peihua Li and Wangmeng Zuo and Qinghua Hu}, + year={2020}, + eprint={1910.03151}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/efficientnet-pruned.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/efficientnet-pruned.md new file mode 100644 index 0000000000..1742f12777 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/efficientnet-pruned.md @@ -0,0 +1,145 @@ +# EfficientNet (Knapsack Pruned) + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +This collection consists of pruned EfficientNet models. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +``` +@misc{aflalo2020knapsack, + title={Knapsack Pruning with Inner Distillation}, + author={Yonathan Aflalo and Asaf Noy and Ming Lin and Itamar Friedman and Lihi Zelnik}, + year={2020}, + eprint={2002.08258}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/efficientnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/efficientnet.md new file mode 100644 index 0000000000..729df0cacd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/efficientnet.md @@ -0,0 +1,325 @@ +# EfficientNet + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ensemble-adversarial.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ensemble-adversarial.md new file mode 100644 index 0000000000..e21fe51e9d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ensemble-adversarial.md @@ -0,0 +1,98 @@ +# # Ensemble Adversarial Inception ResNet v2 + +**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture). + +This particular model was trained for study of adversarial examples (adversarial training). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1804-00097, + author = {Alexey Kurakin and + Ian J. Goodfellow and + Samy Bengio and + Yinpeng Dong and + Fangzhou Liao and + Ming Liang and + Tianyu Pang and + Jun Zhu and + Xiaolin Hu and + Cihang Xie and + Jianyu Wang and + Zhishuai Zhang and + Zhou Ren and + Alan L. Yuille and + Sangxia Huang and + Yao Zhao and + Yuzhe Zhao and + Zhonglin Han and + Junjiajia Long and + Yerkebulan Berdibekov and + Takuya Akiba and + Seiya Tokui and + Motoki Abe}, + title = {Adversarial Attacks and Defences Competition}, + journal = {CoRR}, + volume = {abs/1804.00097}, + year = {2018}, + url = {http://arxiv.org/abs/1804.00097}, + archivePrefix = {arXiv}, + eprint = {1804.00097}, + timestamp = {Thu, 31 Oct 2019 16:31:22 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ese-vovnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ese-vovnet.md new file mode 100644 index 0000000000..5f942f00f2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ese-vovnet.md @@ -0,0 +1,92 @@ +# ESE-VoVNet + +**VoVNet** is a convolutional neural network that seeks to make [DenseNet](https://paperswithcode.com/method/densenet) more efficient by concatenating all features only once in the last feature map, which makes input size constant and enables enlarging new output channel. + +Read about [one-shot aggregation here](https://paperswithcode.com/method/one-shot-aggregation). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{lee2019energy, + title={An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection}, + author={Youngwan Lee and Joong-won Hwang and Sangrok Lee and Yuseok Bae and Jongyoul Park}, + year={2019}, + eprint={1904.09730}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/fbnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/fbnet.md new file mode 100644 index 0000000000..0a6de412cb --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/fbnet.md @@ -0,0 +1,76 @@ +# FBNet + +**FBNet** is a type of convolutional neural architectures discovered through [DNAS](https://paperswithcode.com/method/dnas) neural architecture search. It utilises a basic type of image model block inspired by [MobileNetv2](https://paperswithcode.com/method/mobilenetv2) that utilises depthwise convolutions and an inverted residual structure (see components). + +The principal building block is the [FBNet Block](https://paperswithcode.com/method/fbnet-block). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{wu2019fbnet, + title={FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search}, + author={Bichen Wu and Xiaoliang Dai and Peizhao Zhang and Yanghan Wang and Fei Sun and Yiming Wu and Yuandong Tian and Peter Vajda and Yangqing Jia and Kurt Keutzer}, + year={2019}, + eprint={1812.03443}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-inception-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-inception-v3.md new file mode 100644 index 0000000000..90e25b911c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-inception-v3.md @@ -0,0 +1,78 @@ +# (Gluon) Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-resnet.md new file mode 100644 index 0000000000..a66a658c22 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-resnet.md @@ -0,0 +1,504 @@ +# (Gluon) ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/HeZRS15, + author = {Kaiming He and + Xiangyu Zhang and + Shaoqing Ren and + Jian Sun}, + title = {Deep Residual Learning for Image Recognition}, + journal = {CoRR}, + volume = {abs/1512.03385}, + year = {2015}, + url = {http://arxiv.org/abs/1512.03385}, + archivePrefix = {arXiv}, + eprint = {1512.03385}, + timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, + biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-resnext.md new file mode 100644 index 0000000000..b41353f07d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-resnext.md @@ -0,0 +1,142 @@ +# (Gluon) ResNeXt + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/XieGDTH16, + author = {Saining Xie and + Ross B. Girshick and + Piotr Doll{\'{a}}r and + Zhuowen Tu and + Kaiming He}, + title = {Aggregated Residual Transformations for Deep Neural Networks}, + journal = {CoRR}, + volume = {abs/1611.05431}, + year = {2016}, + url = {http://arxiv.org/abs/1611.05431}, + archivePrefix = {arXiv}, + eprint = {1611.05431}, + timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, + biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-senet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-senet.md new file mode 100644 index 0000000000..281a782fb9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-senet.md @@ -0,0 +1,63 @@ +# (Gluon) SENet + +A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-seresnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-seresnext.md new file mode 100644 index 0000000000..d0f2de01b6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-seresnext.md @@ -0,0 +1,136 @@ +# (Gluon) SE-ResNeXt + +**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-xception.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-xception.md new file mode 100644 index 0000000000..9dfc773aa4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/gloun-xception.md @@ -0,0 +1,66 @@ +# (Gluon) Xception + +**Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution](https://paperswithcode.com/method/depthwise-separable-convolution) layers. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{chollet2017xception, + title={Xception: Deep Learning with Depthwise Separable Convolutions}, + author={François Chollet}, + year={2017}, + eprint={1610.02357}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/hrnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/hrnet.md new file mode 100644 index 0000000000..ab496f3db3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/hrnet.md @@ -0,0 +1,358 @@ +# HRNet + +**HRNet**, or **High-Resolution Net**, is a general purpose convolutional neural network for tasks like semantic segmentation, object detection and image classification. It is able to maintain high resolution representations through the whole process. We start from a high-resolution convolution stream, gradually add high-to-low resolution convolution streams one by one, and connect the multi-resolution streams in parallel. The resulting network consists of several ($4$ in the paper) stages and the $n$th stage contains $n$ streams corresponding to $n$ resolutions. The authors conduct repeated multi-resolution fusions by exchanging the information across the parallel streams over and over. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{sun2019highresolution, + title={High-Resolution Representations for Labeling Pixels and Regions}, + author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, + year={2019}, + eprint={1904.04514}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ig-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ig-resnext.md new file mode 100644 index 0000000000..6a317b2da7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ig-resnext.md @@ -0,0 +1,209 @@ +# Instagram ResNeXt WSL + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +This model was trained on billions of Instagram images using thousands of distinct hashtags as labels exhibit excellent transfer learning performance. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{mahajan2018exploring, + title={Exploring the Limits of Weakly Supervised Pretraining}, + author={Dhruv Mahajan and Ross Girshick and Vignesh Ramanathan and Kaiming He and Manohar Paluri and Yixuan Li and Ashwin Bharambe and Laurens van der Maaten}, + year={2018}, + eprint={1805.00932}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-resnet-v2.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-resnet-v2.md new file mode 100644 index 0000000000..99e09a1da3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-resnet-v2.md @@ -0,0 +1,72 @@ +# Inception ResNet v2 + +**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{szegedy2016inceptionv4, + title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning}, + author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi}, + year={2016}, + eprint={1602.07261}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-v3.md new file mode 100644 index 0000000000..ec8f4c0def --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-v3.md @@ -0,0 +1,85 @@ +# Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-v4.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-v4.md new file mode 100644 index 0000000000..3427cc85e4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/inception-v4.md @@ -0,0 +1,71 @@ +# Inception v4 + +**Inception-v4** is a convolutional neural network architecture that builds on previous iterations of the Inception family by simplifying the architecture and using more inception modules than [Inception-v3](https://paperswithcode.com/method/inception-v3). +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{szegedy2016inceptionv4, + title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning}, + author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi}, + year={2016}, + eprint={1602.07261}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-se-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-se-resnet.md new file mode 100644 index 0000000000..33f0c806c3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-se-resnet.md @@ -0,0 +1,257 @@ +# (Legacy) SE-ResNet + +**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-se-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-se-resnext.md new file mode 100644 index 0000000000..fd610b59e8 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-se-resnext.md @@ -0,0 +1,167 @@ +# (Legacy) SE-ResNeXt + +**SE ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-senet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-senet.md new file mode 100644 index 0000000000..2547999451 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/legacy-senet.md @@ -0,0 +1,74 @@ +# (Legacy) SENet + +A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from Gluon. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mixnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mixnet.md new file mode 100644 index 0000000000..f1e04e454d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mixnet.md @@ -0,0 +1,164 @@ +# MixNet + +**MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2019mixconv, + title={MixConv: Mixed Depthwise Convolutional Kernels}, + author={Mingxing Tan and Quoc V. Le}, + year={2019}, + eprint={1907.09595}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mnasnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mnasnet.md new file mode 100644 index 0000000000..e9ad625f93 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mnasnet.md @@ -0,0 +1,109 @@ +# MnasNet + +**MnasNet** is a type of convolutional neural network optimized for mobile devices that is discovered through mobile neural architecture search, which explicitly incorporates model latency into the main objective so that the search can identify a model that achieves a good trade-off between accuracy and latency. The main building block is an [inverted residual block](https://paperswithcode.com/method/inverted-residual-block) (from [MobileNetV2](https://paperswithcode.com/method/mobilenetv2)). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2019mnasnet, + title={MnasNet: Platform-Aware Neural Architecture Search for Mobile}, + author={Mingxing Tan and Bo Chen and Ruoming Pang and Vijay Vasudevan and Mark Sandler and Andrew Howard and Quoc V. Le}, + year={2019}, + eprint={1807.11626}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mobilenet-v2.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mobilenet-v2.md new file mode 100644 index 0000000000..ef37f4e7ac --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mobilenet-v2.md @@ -0,0 +1,210 @@ +# MobileNet v2 + +**MobileNetV2** is a convolutional neural network architecture that seeks to perform well on mobile devices. It is based on an [inverted residual structure](https://paperswithcode.com/method/inverted-residual-block) where the residual connections are between the bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. As a whole, the architecture of MobileNetV2 contains the initial fully convolution layer with 32 filters, followed by 19 residual bottleneck layers. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1801-04381, + author = {Mark Sandler and + Andrew G. Howard and + Menglong Zhu and + Andrey Zhmoginov and + Liang{-}Chieh Chen}, + title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, + Detection and Segmentation}, + journal = {CoRR}, + volume = {abs/1801.04381}, + year = {2018}, + url = {http://arxiv.org/abs/1801.04381}, + archivePrefix = {arXiv}, + eprint = {1801.04381}, + timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mobilenet-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mobilenet-v3.md new file mode 100644 index 0000000000..8cc75b383b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/mobilenet-v3.md @@ -0,0 +1,138 @@ +# MobileNet v3 + +**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-02244, + author = {Andrew Howard and + Mark Sandler and + Grace Chu and + Liang{-}Chieh Chen and + Bo Chen and + Mingxing Tan and + Weijun Wang and + Yukun Zhu and + Ruoming Pang and + Vijay Vasudevan and + Quoc V. Le and + Hartwig Adam}, + title = {Searching for MobileNetV3}, + journal = {CoRR}, + volume = {abs/1905.02244}, + year = {2019}, + url = {http://arxiv.org/abs/1905.02244}, + archivePrefix = {arXiv}, + eprint = {1905.02244}, + timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/nasnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/nasnet.md new file mode 100644 index 0000000000..5cc7e5b0da --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/nasnet.md @@ -0,0 +1,70 @@ +# NASNet + +**NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{zoph2018learning, + title={Learning Transferable Architectures for Scalable Image Recognition}, + author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le}, + year={2018}, + eprint={1707.07012}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/noisy-student.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/noisy-student.md new file mode 100644 index 0000000000..6525f6937f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/noisy-student.md @@ -0,0 +1,510 @@ +# Noisy Student (EfficientNet) + +**Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training +and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps: + +1. train a teacher model on labeled images +2. use the teacher to generate pseudo labels on unlabeled images +3. train a student model on the combination of labeled images and pseudo labeled images. + +The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student. + +Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{xie2020selftraining, + title={Self-training with Noisy Student improves ImageNet classification}, + author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le}, + year={2020}, + eprint={1911.04252}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/pnasnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/pnasnet.md new file mode 100644 index 0000000000..49d117ec7d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/pnasnet.md @@ -0,0 +1,71 @@ +# PNASNet + +**Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{liu2018progressive, + title={Progressive Neural Architecture Search}, + author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy}, + year={2018}, + eprint={1712.00559}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/regnetx.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/regnetx.md new file mode 100644 index 0000000000..90e3a2d68c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/regnetx.md @@ -0,0 +1,492 @@ +# RegNetX + +**RegNetX** is a convolutional network design space with simple, regular models with parameters: depth $d$, initial width $w\_{0} > 0$, and slope $w\_{a} > 0$, and generates a different block width $u\_{j}$ for each block $j < d$. The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): + +$$ u\_{j} = w\_{0} + w\_{a}\cdot{j} $$ + +For **RegNetX** we have additional restrictions: we set $b = 1$ (the bottleneck ratio), $12 \leq d \leq 28$, and $w\_{m} \geq 2$ (the width multiplier). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/regnety.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/regnety.md new file mode 100644 index 0000000000..2dc9fca91a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/regnety.md @@ -0,0 +1,506 @@ +# RegNetY + +**RegNetY** is a convolutional network design space with simple, regular models with parameters: depth $d$, initial width $w\_{0} > 0$, and slope $w\_{a} > 0$, and generates a different block width $u\_{j}$ for each block $j < d$. The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): + +$$ u\_{j} = w\_{0} + w\_{a}\cdot{j} $$ + +For **RegNetX** authors have additional restrictions: we set $b = 1$ (the bottleneck ratio), $12 \leq d \leq 28$, and $w\_{m} \geq 2$ (the width multiplier). + +For **RegNetY** authors make one change, which is to include [Squeeze-and-Excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/res2net.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/res2net.md new file mode 100644 index 0000000000..ce59015283 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/res2net.md @@ -0,0 +1,260 @@ +# Res2Net + +**Res2Net** is an image model that employs a variation on bottleneck residual blocks, [Res2Net Blocks](https://paperswithcode.com/method/res2net-block). The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{Gao_2021, + title={Res2Net: A New Multi-Scale Backbone Architecture}, + volume={43}, + ISSN={1939-3539}, + url={http://dx.doi.org/10.1109/TPAMI.2019.2938758}, + DOI={10.1109/tpami.2019.2938758}, + number={2}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + year={2021}, + month={Feb}, + pages={652–662} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/res2next.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/res2next.md new file mode 100644 index 0000000000..03ab96e8cd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/res2next.md @@ -0,0 +1,75 @@ +# Res2NeXt + +**Res2NeXt** is an image model that employs a variation on [ResNeXt](https://paperswithcode.com/method/resnext) bottleneck residual blocks. The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{Gao_2021, + title={Res2Net: A New Multi-Scale Backbone Architecture}, + volume={43}, + ISSN={1939-3539}, + url={http://dx.doi.org/10.1109/TPAMI.2019.2938758}, + DOI={10.1109/tpami.2019.2938758}, + number={2}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + year={2021}, + month={Feb}, + pages={652–662} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnest.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnest.md new file mode 100644 index 0000000000..320aacb86e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnest.md @@ -0,0 +1,408 @@ +# ResNeSt + +A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: $V = \text{Concat}${$V^{1},V^{2},\cdots{V}^{K}$}. As in standard residual blocks, the final output $Y$ of otheur Split-Attention block is produced using a shortcut connection: $Y=V+X$, if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation $\mathcal{T}$ is applied to the shortcut connection to align the output shapes: $Y=V+\mathcal{T}(X)$. For example, $\mathcal{T}$ can be strided convolution or combined convolution-with-pooling. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola}, + year={2020}, + eprint={2004.08955}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnet-d.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnet-d.md new file mode 100644 index 0000000000..689f181c6f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnet-d.md @@ -0,0 +1,263 @@ +# ResNet-D + +**ResNet-D** is a modification on the [ResNet](https://paperswithcode.com/method/resnet) architecture that utilises an [average pooling](https://paperswithcode.com/method/average-pooling) tweak for downsampling. The motivation is that in the unmodified ResNet, the [1×1 convolution](https://paperswithcode.com/method/1x1-convolution) for the downsampling block ignores 3/4 of input feature maps, so this is modified so no information will be ignored + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{he2018bag, + title={Bag of Tricks for Image Classification with Convolutional Neural Networks}, + author={Tong He and Zhi Zhang and Hang Zhang and Zhongyue Zhang and Junyuan Xie and Mu Li}, + year={2018}, + eprint={1812.01187}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnet.md new file mode 100644 index 0000000000..58cd0599ed --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnet.md @@ -0,0 +1,378 @@ +# ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/HeZRS15, + author = {Kaiming He and + Xiangyu Zhang and + Shaoqing Ren and + Jian Sun}, + title = {Deep Residual Learning for Image Recognition}, + journal = {CoRR}, + volume = {abs/1512.03385}, + year = {2015}, + url = {http://arxiv.org/abs/1512.03385}, + archivePrefix = {arXiv}, + eprint = {1512.03385}, + timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, + biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnext.md new file mode 100644 index 0000000000..e2dcbb863b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/resnext.md @@ -0,0 +1,183 @@ +# ResNeXt + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/XieGDTH16, + author = {Saining Xie and + Ross B. Girshick and + Piotr Doll{\'{a}}r and + Zhuowen Tu and + Kaiming He}, + title = {Aggregated Residual Transformations for Deep Neural Networks}, + journal = {CoRR}, + volume = {abs/1611.05431}, + year = {2016}, + url = {http://arxiv.org/abs/1611.05431}, + archivePrefix = {arXiv}, + eprint = {1611.05431}, + timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, + biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/rexnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/rexnet.md new file mode 100644 index 0000000000..602f5c493d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/rexnet.md @@ -0,0 +1,197 @@ +# RexNet + +**Rank Expansion Networks** (ReXNets) follow a set of new design principles for designing bottlenecks in image classification models. Authors refine each layer by 1) expanding the input channel size of the convolution layer and 2) replacing the [ReLU6s](https://www.paperswithcode.com/method/relu6). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{han2020rexnet, + title={ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network}, + author={Dongyoon Han and Sangdoo Yun and Byeongho Heo and YoungJoon Yoo}, + year={2020}, + eprint={2007.00992}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/se-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/se-resnet.md new file mode 100644 index 0000000000..e115549202 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/se-resnet.md @@ -0,0 +1,122 @@ +# SE-ResNet + +**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/selecsls.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/selecsls.md new file mode 100644 index 0000000000..9a359e6492 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/selecsls.md @@ -0,0 +1,136 @@ +# SelecSLS + +**SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{Mehta_2020, + title={XNect}, + volume={39}, + ISSN={1557-7368}, + url={http://dx.doi.org/10.1145/3386569.3392410}, + DOI={10.1145/3386569.3392410}, + number={4}, + journal={ACM Transactions on Graphics}, + publisher={Association for Computing Machinery (ACM)}, + author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, + year={2020}, + month={Jul} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/seresnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/seresnext.md new file mode 100644 index 0000000000..41be0451a6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/seresnext.md @@ -0,0 +1,167 @@ +# SE-ResNeXt + +**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resneXt) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/skresnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/skresnet.md new file mode 100644 index 0000000000..3df53b0335 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/skresnet.md @@ -0,0 +1,112 @@ +# SK-ResNet + +**SK ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNet are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{li2019selective, + title={Selective Kernel Networks}, + author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, + year={2019}, + eprint={1903.06586}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/skresnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/skresnext.md new file mode 100644 index 0000000000..06e98b06dd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/skresnext.md @@ -0,0 +1,70 @@ +# SK-ResNeXt + +**SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{li2019selective, + title={Selective Kernel Networks}, + author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, + year={2019}, + eprint={1903.06586}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/spnasnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/spnasnet.md new file mode 100644 index 0000000000..99cc96126b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/spnasnet.md @@ -0,0 +1,62 @@ +# SPNASNet + +**Single-Path NAS** is a novel differentiable NAS method for designing hardware-efficient ConvNets in less than 4 hours. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{stamoulis2019singlepath, + title={Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours}, + author={Dimitrios Stamoulis and Ruizhou Ding and Di Wang and Dimitrios Lymberopoulos and Bodhi Priyantha and Jie Liu and Diana Marculescu}, + year={2019}, + eprint={1904.02877}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ssl-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ssl-resnet.md new file mode 100644 index 0000000000..9d4392db8d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ssl-resnet.md @@ -0,0 +1,131 @@ +# SSL ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ssl-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ssl-resnext.md new file mode 100644 index 0000000000..c540e78a4e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/ssl-resnext.md @@ -0,0 +1,217 @@ +# SSL ResNeXT + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/swsl-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/swsl-resnet.md new file mode 100644 index 0000000000..6d76422690 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/swsl-resnet.md @@ -0,0 +1,131 @@ +# SWSL ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/swsl-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/swsl-resnext.md new file mode 100644 index 0000000000..cdc59511fe --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/swsl-resnext.md @@ -0,0 +1,217 @@ +# SWSL ResNeXt + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet-condconv.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet-condconv.md new file mode 100644 index 0000000000..a70cc8fa83 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet-condconv.md @@ -0,0 +1,189 @@ +# (Tensorflow) EfficientNet CondConv + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to squeeze-and-excitation blocks. + +This collection of models amends EfficientNet by adding [CondConv](https://paperswithcode.com/method/condconv) convolutions. + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1904-04971, + author = {Brandon Yang and + Gabriel Bender and + Quoc V. Le and + Jiquan Ngiam}, + title = {Soft Conditional Computation}, + journal = {CoRR}, + volume = {abs/1904.04971}, + year = {2019}, + url = {http://arxiv.org/abs/1904.04971}, + archivePrefix = {arXiv}, + eprint = {1904.04971}, + timestamp = {Thu, 25 Apr 2019 13:55:01 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1904-04971.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet-lite.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet-lite.md new file mode 100644 index 0000000000..deb35d94e4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet-lite.md @@ -0,0 +1,195 @@ +# (Tensorflow) EfficientNet Lite + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2). + +EfficientNet-Lite makes EfficientNet more suitable for mobile devices by introducing [ReLU6](https://paperswithcode.com/method/relu6) activation functions and removing [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet.md new file mode 100644 index 0000000000..473dd6839b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-efficientnet.md @@ -0,0 +1,602 @@ +# (Tensorflow) EfficientNet + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-inception-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-inception-v3.md new file mode 100644 index 0000000000..9f140b02ef --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-inception-v3.md @@ -0,0 +1,87 @@ +# (Tensorflow) Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-mixnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-mixnet.md new file mode 100644 index 0000000000..1cff51101a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-mixnet.md @@ -0,0 +1,133 @@ +# (Tensorflow) MixNet + +**MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2019mixconv, + title={MixConv: Mixed Depthwise Convolutional Kernels}, + author={Mingxing Tan and Quoc V. Le}, + year={2019}, + eprint={1907.09595}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-mobilenet-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-mobilenet-v3.md new file mode 100644 index 0000000000..5e93db8586 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tf-mobilenet-v3.md @@ -0,0 +1,320 @@ +# (Tensorflow) MobileNet v3 + +**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-02244, + author = {Andrew Howard and + Mark Sandler and + Grace Chu and + Liang{-}Chieh Chen and + Bo Chen and + Mingxing Tan and + Weijun Wang and + Yukun Zhu and + Ruoming Pang and + Vijay Vasudevan and + Quoc V. Le and + Hartwig Adam}, + title = {Searching for MobileNetV3}, + journal = {CoRR}, + volume = {abs/1905.02244}, + year = {2019}, + url = {http://arxiv.org/abs/1905.02244}, + archivePrefix = {arXiv}, + eprint = {1905.02244}, + timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tresnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tresnet.md new file mode 100644 index 0000000000..2ecf677bd1 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/tresnet.md @@ -0,0 +1,291 @@ +# TResNet + +A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{ridnik2020tresnet, + title={TResNet: High Performance GPU-Dedicated Architecture}, + author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman}, + year={2020}, + eprint={2003.13630}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/vision-transformer.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/vision-transformer.md new file mode 100644 index 0000000000..105e985322 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/vision-transformer.md @@ -0,0 +1,319 @@ +# Vision Transformer (ViT) + +The **Vision Transformer** is a model for image classification that employs a Transformer-like architecture over patches of the image. This includes the use of [Multi-Head Attention](https://paperswithcode.com/method/multi-head-attention), [Scaled Dot-Product Attention](https://paperswithcode.com/method/scaled) and other architectural features seen in the [Transformer](https://paperswithcode.com/method/transformer) architecture traditionally used for NLP. + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{dosovitskiy2020image, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, + year={2020}, + eprint={2010.11929}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/wide-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/wide-resnet.md new file mode 100644 index 0000000000..2e4bb89a39 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/wide-resnet.md @@ -0,0 +1,102 @@ +# Wide ResNet + +**Wide Residual Networks** are a variant on [ResNets](https://paperswithcode.com/method/resnet) where we decrease depth and increase the width of residual networks. This is achieved through the use of [wide residual blocks](https://paperswithcode.com/method/wide-residual-block). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/ZagoruykoK16, + author = {Sergey Zagoruyko and + Nikos Komodakis}, + title = {Wide Residual Networks}, + journal = {CoRR}, + volume = {abs/1605.07146}, + year = {2016}, + url = {http://arxiv.org/abs/1605.07146}, + archivePrefix = {arXiv}, + eprint = {1605.07146}, + timestamp = {Mon, 13 Aug 2018 16:46:42 +0200}, + biburl = {https://dblp.org/rec/journals/corr/ZagoruykoK16.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/xception.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/xception.md new file mode 100644 index 0000000000..ee5216e828 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/.templates/models/xception.md @@ -0,0 +1,163 @@ +# Xception + +**Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution layers](https://paperswithcode.com/method/depthwise-separable-convolution). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +{% include 'code_snippets.md' %} + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/ZagoruykoK16, +@misc{chollet2017xception, + title={Xception: Deep Learning with Depthwise Separable Convolutions}, + author={François Chollet}, + year={2017}, + eprint={1610.02357}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/adversarial-inception-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/adversarial-inception-v3.md new file mode 100644 index 0000000000..2ee4014408 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/adversarial-inception-v3.md @@ -0,0 +1,159 @@ +# Adversarial Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +This particular model was trained for study of adversarial examples (adversarial training). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('adv_inception_v3', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `adv_inception_v3`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('adv_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1804-00097, + author = {Alexey Kurakin and + Ian J. Goodfellow and + Samy Bengio and + Yinpeng Dong and + Fangzhou Liao and + Ming Liang and + Tianyu Pang and + Jun Zhu and + Xiaolin Hu and + Cihang Xie and + Jianyu Wang and + Zhishuai Zhang and + Zhou Ren and + Alan L. Yuille and + Sangxia Huang and + Yao Zhao and + Yuzhe Zhao and + Zhonglin Han and + Junjiajia Long and + Yerkebulan Berdibekov and + Takuya Akiba and + Seiya Tokui and + Motoki Abe}, + title = {Adversarial Attacks and Defences Competition}, + journal = {CoRR}, + volume = {abs/1804.00097}, + year = {2018}, + url = {http://arxiv.org/abs/1804.00097}, + archivePrefix = {arXiv}, + eprint = {1804.00097}, + timestamp = {Thu, 31 Oct 2019 16:31:22 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/advprop.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/advprop.md new file mode 100644 index 0000000000..42ffbf87a2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/advprop.md @@ -0,0 +1,518 @@ +# AdvProp (EfficientNet) + +**AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples. + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ap`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{xie2020adversarial, + title={Adversarial Examples Improve Image Recognition}, + author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le}, + year={2020}, + eprint={1911.09665}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/big-transfer.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/big-transfer.md new file mode 100644 index 0000000000..3663ff5aa9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/big-transfer.md @@ -0,0 +1,356 @@ +# Big Transfer (BiT) + +**Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnetv2_101x1_bitm', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnetv2_101x1_bitm`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnetv2_101x1_bitm', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{kolesnikov2020big, + title={Big Transfer (BiT): General Visual Representation Learning}, + author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby}, + year={2020}, + eprint={1912.11370}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-darknet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-darknet.md new file mode 100644 index 0000000000..7a0a1bd052 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-darknet.md @@ -0,0 +1,142 @@ +# CSP-DarkNet + +**CSPDarknet53** is a convolutional neural network and backbone for object detection that uses [DarkNet-53](https://paperswithcode.com/method/darknet-53). It employs a CSPNet strategy to partition the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. + +This CNN is used as the backbone for [YOLOv4](https://paperswithcode.com/method/yolov4). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('cspdarknet53', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `cspdarknet53`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('cspdarknet53', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{bochkovskiy2020yolov4, + title={YOLOv4: Optimal Speed and Accuracy of Object Detection}, + author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao}, + year={2020}, + eprint={2004.10934}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-resnet.md new file mode 100644 index 0000000000..707f20eb3b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-resnet.md @@ -0,0 +1,137 @@ +# CSP-ResNet + +**CSPResNet** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNet](https://paperswithcode.com/method/resnet). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('cspresnet50', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `cspresnet50`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('cspresnet50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{wang2019cspnet, + title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, + author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, + year={2019}, + eprint={1911.11929}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-resnext.md new file mode 100644 index 0000000000..ff35f0eee2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/csp-resnext.md @@ -0,0 +1,138 @@ +# CSP-ResNeXt + +**CSPResNeXt** is a convolutional neural network where we apply the Cross Stage Partial Network (CSPNet) approach to [ResNeXt](https://paperswithcode.com/method/resnext). The CSPNet partitions the feature map of the base layer into two parts and then merges them through a cross-stage hierarchy. The use of a split and merge strategy allows for more gradient flow through the network. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('cspresnext50', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `cspresnext50`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('cspresnext50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{wang2019cspnet, + title={CSPNet: A New Backbone that can Enhance Learning Capability of CNN}, + author={Chien-Yao Wang and Hong-Yuan Mark Liao and I-Hau Yeh and Yueh-Hua Wu and Ping-Yang Chen and Jun-Wei Hsieh}, + year={2019}, + eprint={1911.11929}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/densenet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/densenet.md new file mode 100644 index 0000000000..2ed86503d6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/densenet.md @@ -0,0 +1,366 @@ +# DenseNet + +**DenseNet** is a type of convolutional neural network that utilises dense connections between layers, through [Dense Blocks](http://www.paperswithcode.com/method/dense-block), where we connect *all layers* (with matching feature-map sizes) directly with each other. To preserve the feed-forward nature, each layer obtains additional inputs from all preceding layers and passes on its own feature-maps to all subsequent layers. + +The **DenseNet Blur** variant in this collection by Ross Wightman employs [Blur Pooling](http://www.paperswithcode.com/method/blur-pooling) + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('densenet121', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `densenet121`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('densenet121', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/HuangLW16a, + author = {Gao Huang and + Zhuang Liu and + Kilian Q. Weinberger}, + title = {Densely Connected Convolutional Networks}, + journal = {CoRR}, + volume = {abs/1608.06993}, + year = {2016}, + url = {http://arxiv.org/abs/1608.06993}, + archivePrefix = {arXiv}, + eprint = {1608.06993}, + timestamp = {Mon, 10 Sep 2018 15:49:32 +0200}, + biburl = {https://dblp.org/rec/journals/corr/HuangLW16a.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + +``` +@misc{rw2019timm, + author = {Ross Wightman}, + title = {PyTorch Image Models}, + year = {2019}, + publisher = {GitHub}, + journal = {GitHub repository}, + doi = {10.5281/zenodo.4414861}, + howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/dla.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/dla.md new file mode 100644 index 0000000000..d4d7626e5a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/dla.md @@ -0,0 +1,606 @@ +# Deep Layer Aggregation + +Extending “shallow” skip connections, **Dense Layer Aggregation (DLA)** incorporates more depth and sharing. The authors introduce two structures for deep layer aggregation (DLA): iterative deep aggregation (IDA) and hierarchical deep aggregation (HDA). These structures are expressed through an architectural framework, independent of the choice of backbone, for compatibility with current and future networks. + +IDA focuses on fusing resolutions and scales while HDA focuses on merging features from all modules and channels. IDA follows the base hierarchy to refine resolution and aggregate scale stage-bystage. HDA assembles its own hierarchy of tree-structured connections that cross and merge stages to aggregate different levels of representation. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('dla102', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `dla102`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('dla102', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{yu2019deep, + title={Deep Layer Aggregation}, + author={Fisher Yu and Dequan Wang and Evan Shelhamer and Trevor Darrell}, + year={2019}, + eprint={1707.06484}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/dpn.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/dpn.md new file mode 100644 index 0000000000..b18ce9b82d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/dpn.md @@ -0,0 +1,317 @@ +# Dual Path Network (DPN) + +A **Dual Path Network (DPN)** is a convolutional neural network which presents a new topology of connection paths internally. The intuition is that [ResNets](https://paperswithcode.com/method/resnet) enables feature re-usage while DenseNet enables new feature exploration, and both are important for learning good representations. To enjoy the benefits from both path topologies, Dual Path Networks share common features while maintaining the flexibility to explore new features through dual path architectures. + +The principal building block is an [DPN Block](https://paperswithcode.com/method/dpn-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('dpn107', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `dpn107`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('dpn107', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{chen2017dual, + title={Dual Path Networks}, + author={Yunpeng Chen and Jianan Li and Huaxin Xiao and Xiaojie Jin and Shuicheng Yan and Jiashi Feng}, + year={2017}, + eprint={1707.01629}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/ecaresnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/ecaresnet.md new file mode 100644 index 0000000000..fe76b6dd51 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/ecaresnet.md @@ -0,0 +1,297 @@ +# ECA-ResNet + +An **ECA ResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that utilises an [Efficient Channel Attention module](https://paperswithcode.com/method/efficient-channel-attention). Efficient Channel Attention is an architectural unit based on [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) that reduces model complexity without dimensionality reduction. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('ecaresnet101d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `ecaresnet101d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('ecaresnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{wang2020ecanet, + title={ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks}, + author={Qilong Wang and Banggu Wu and Pengfei Zhu and Peihua Li and Wangmeng Zuo and Qinghua Hu}, + year={2020}, + eprint={1910.03151}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/efficientnet-pruned.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/efficientnet-pruned.md new file mode 100644 index 0000000000..8dd8827240 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/efficientnet-pruned.md @@ -0,0 +1,206 @@ +# EfficientNet (Knapsack Pruned) + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +This collection consists of pruned EfficientNet models. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('efficientnet_b1_pruned', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `efficientnet_b1_pruned`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('efficientnet_b1_pruned', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +``` +@misc{aflalo2020knapsack, + title={Knapsack Pruning with Inner Distillation}, + author={Yonathan Aflalo and Asaf Noy and Ming Lin and Itamar Friedman and Lihi Zelnik}, + year={2020}, + eprint={2002.08258}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/efficientnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/efficientnet.md new file mode 100644 index 0000000000..d04d86ab1d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/efficientnet.md @@ -0,0 +1,386 @@ +# EfficientNet + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('efficientnet_b0', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `efficientnet_b0`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/ensemble-adversarial.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/ensemble-adversarial.md new file mode 100644 index 0000000000..bc33b7f018 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/ensemble-adversarial.md @@ -0,0 +1,159 @@ +# # Ensemble Adversarial Inception ResNet v2 + +**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture). + +This particular model was trained for study of adversarial examples (adversarial training). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `ens_adv_inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('ens_adv_inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1804-00097, + author = {Alexey Kurakin and + Ian J. Goodfellow and + Samy Bengio and + Yinpeng Dong and + Fangzhou Liao and + Ming Liang and + Tianyu Pang and + Jun Zhu and + Xiaolin Hu and + Cihang Xie and + Jianyu Wang and + Zhishuai Zhang and + Zhou Ren and + Alan L. Yuille and + Sangxia Huang and + Yao Zhao and + Yuzhe Zhao and + Zhonglin Han and + Junjiajia Long and + Yerkebulan Berdibekov and + Takuya Akiba and + Seiya Tokui and + Motoki Abe}, + title = {Adversarial Attacks and Defences Competition}, + journal = {CoRR}, + volume = {abs/1804.00097}, + year = {2018}, + url = {http://arxiv.org/abs/1804.00097}, + archivePrefix = {arXiv}, + eprint = {1804.00097}, + timestamp = {Thu, 31 Oct 2019 16:31:22 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1804-00097.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/ese-vovnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/ese-vovnet.md new file mode 100644 index 0000000000..ca02b0a47d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/ese-vovnet.md @@ -0,0 +1,153 @@ +# ESE-VoVNet + +**VoVNet** is a convolutional neural network that seeks to make [DenseNet](https://paperswithcode.com/method/densenet) more efficient by concatenating all features only once in the last feature map, which makes input size constant and enables enlarging new output channel. + +Read about [one-shot aggregation here](https://paperswithcode.com/method/one-shot-aggregation). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('ese_vovnet19b_dw', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `ese_vovnet19b_dw`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('ese_vovnet19b_dw', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{lee2019energy, + title={An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection}, + author={Youngwan Lee and Joong-won Hwang and Sangrok Lee and Yuseok Bae and Jongyoul Park}, + year={2019}, + eprint={1904.09730}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/fbnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/fbnet.md new file mode 100644 index 0000000000..7c5de5a652 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/fbnet.md @@ -0,0 +1,137 @@ +# FBNet + +**FBNet** is a type of convolutional neural architectures discovered through [DNAS](https://paperswithcode.com/method/dnas) neural architecture search. It utilises a basic type of image model block inspired by [MobileNetv2](https://paperswithcode.com/method/mobilenetv2) that utilises depthwise convolutions and an inverted residual structure (see components). + +The principal building block is the [FBNet Block](https://paperswithcode.com/method/fbnet-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('fbnetc_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `fbnetc_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('fbnetc_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{wu2019fbnet, + title={FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search}, + author={Bichen Wu and Xiaoliang Dai and Peizhao Zhang and Yanghan Wang and Fei Sun and Yiming Wu and Yuandong Tian and Peter Vajda and Yangqing Jia and Kurt Keutzer}, + year={2019}, + eprint={1812.03443}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-inception-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-inception-v3.md new file mode 100644 index 0000000000..3f1988f349 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-inception-v3.md @@ -0,0 +1,139 @@ +# (Gluon) Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('gluon_inception_v3', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `gluon_inception_v3`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('gluon_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-resnet.md new file mode 100644 index 0000000000..e1bfad3120 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-resnet.md @@ -0,0 +1,565 @@ +# (Gluon) ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('gluon_resnet101_v1b', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `gluon_resnet101_v1b`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('gluon_resnet101_v1b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/HeZRS15, + author = {Kaiming He and + Xiangyu Zhang and + Shaoqing Ren and + Jian Sun}, + title = {Deep Residual Learning for Image Recognition}, + journal = {CoRR}, + volume = {abs/1512.03385}, + year = {2015}, + url = {http://arxiv.org/abs/1512.03385}, + archivePrefix = {arXiv}, + eprint = {1512.03385}, + timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, + biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-resnext.md new file mode 100644 index 0000000000..c446510b0c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-resnext.md @@ -0,0 +1,203 @@ +# (Gluon) ResNeXt + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('gluon_resnext101_32x4d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `gluon_resnext101_32x4d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('gluon_resnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/XieGDTH16, + author = {Saining Xie and + Ross B. Girshick and + Piotr Doll{\'{a}}r and + Zhuowen Tu and + Kaiming He}, + title = {Aggregated Residual Transformations for Deep Neural Networks}, + journal = {CoRR}, + volume = {abs/1611.05431}, + year = {2016}, + url = {http://arxiv.org/abs/1611.05431}, + archivePrefix = {arXiv}, + eprint = {1611.05431}, + timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, + biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-senet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-senet.md new file mode 100644 index 0000000000..833307857d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-senet.md @@ -0,0 +1,124 @@ +# (Gluon) SENet + +A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('gluon_senet154', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `gluon_senet154`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('gluon_senet154', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-seresnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-seresnext.md new file mode 100644 index 0000000000..bef4b11944 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-seresnext.md @@ -0,0 +1,197 @@ +# (Gluon) SE-ResNeXt + +**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `gluon_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('gluon_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-xception.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-xception.md new file mode 100644 index 0000000000..ae1e464815 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/gloun-xception.md @@ -0,0 +1,127 @@ +# (Gluon) Xception + +**Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution](https://paperswithcode.com/method/depthwise-separable-convolution) layers. + +The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('gluon_xception65', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `gluon_xception65`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('gluon_xception65', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{chollet2017xception, + title={Xception: Deep Learning with Depthwise Separable Convolutions}, + author={François Chollet}, + year={2017}, + eprint={1610.02357}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/hrnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/hrnet.md new file mode 100644 index 0000000000..5ef6499e10 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/hrnet.md @@ -0,0 +1,419 @@ +# HRNet + +**HRNet**, or **High-Resolution Net**, is a general purpose convolutional neural network for tasks like semantic segmentation, object detection and image classification. It is able to maintain high resolution representations through the whole process. We start from a high-resolution convolution stream, gradually add high-to-low resolution convolution streams one by one, and connect the multi-resolution streams in parallel. The resulting network consists of several ($4$ in the paper) stages and the $n$th stage contains $n$ streams corresponding to $n$ resolutions. The authors conduct repeated multi-resolution fusions by exchanging the information across the parallel streams over and over. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('hrnet_w18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `hrnet_w18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('hrnet_w18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{sun2019highresolution, + title={High-Resolution Representations for Labeling Pixels and Regions}, + author={Ke Sun and Yang Zhao and Borui Jiang and Tianheng Cheng and Bin Xiao and Dong Liu and Yadong Mu and Xinggang Wang and Wenyu Liu and Jingdong Wang}, + year={2019}, + eprint={1904.04514}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/ig-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/ig-resnext.md new file mode 100644 index 0000000000..6461b4eb5d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/ig-resnext.md @@ -0,0 +1,270 @@ +# Instagram ResNeXt WSL + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +This model was trained on billions of Instagram images using thousands of distinct hashtags as labels exhibit excellent transfer learning performance. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('ig_resnext101_32x16d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `ig_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('ig_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{mahajan2018exploring, + title={Exploring the Limits of Weakly Supervised Pretraining}, + author={Dhruv Mahajan and Ross Girshick and Vignesh Ramanathan and Kaiming He and Manohar Paluri and Yixuan Li and Ashwin Bharambe and Laurens van der Maaten}, + year={2018}, + eprint={1805.00932}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-resnet-v2.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-resnet-v2.md new file mode 100644 index 0000000000..9cb597ebed --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-resnet-v2.md @@ -0,0 +1,133 @@ +# Inception ResNet v2 + +**Inception-ResNet-v2** is a convolutional neural architecture that builds on the Inception family of architectures but incorporates [residual connections](https://paperswithcode.com/method/residual-connection) (replacing the filter concatenation stage of the Inception architecture). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('inception_resnet_v2', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `inception_resnet_v2`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('inception_resnet_v2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{szegedy2016inceptionv4, + title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning}, + author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi}, + year={2016}, + eprint={1602.07261}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-v3.md new file mode 100644 index 0000000000..d51c52470c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-v3.md @@ -0,0 +1,146 @@ +# Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('inception_v3', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `inception_v3`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-v4.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-v4.md new file mode 100644 index 0000000000..5d752b7456 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/inception-v4.md @@ -0,0 +1,132 @@ +# Inception v4 + +**Inception-v4** is a convolutional neural network architecture that builds on previous iterations of the Inception family by simplifying the architecture and using more inception modules than [Inception-v3](https://paperswithcode.com/method/inception-v3). +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('inception_v4', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `inception_v4`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('inception_v4', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{szegedy2016inceptionv4, + title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning}, + author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi}, + year={2016}, + eprint={1602.07261}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-se-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-se-resnet.md new file mode 100644 index 0000000000..78e4a126b0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-se-resnet.md @@ -0,0 +1,318 @@ +# (Legacy) SE-ResNet + +**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('legacy_seresnet101', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `legacy_seresnet101`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('legacy_seresnet101', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-se-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-se-resnext.md new file mode 100644 index 0000000000..d9524aab9d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-se-resnext.md @@ -0,0 +1,228 @@ +# (Legacy) SE-ResNeXt + +**SE ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `legacy_seresnext101_32x4d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('legacy_seresnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-senet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-senet.md new file mode 100644 index 0000000000..c3a49da5ca --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/legacy-senet.md @@ -0,0 +1,135 @@ +# (Legacy) SENet + +A **SENet** is a convolutional neural network architecture that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +The weights from this model were ported from Gluon. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('legacy_senet154', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `legacy_senet154`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('legacy_senet154', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/mixnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/mixnet.md new file mode 100644 index 0000000000..a283de8a23 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/mixnet.md @@ -0,0 +1,225 @@ +# MixNet + +**MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('mixnet_l', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `mixnet_l`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2019mixconv, + title={MixConv: Mixed Depthwise Convolutional Kernels}, + author={Mingxing Tan and Quoc V. Le}, + year={2019}, + eprint={1907.09595}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/mnasnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/mnasnet.md new file mode 100644 index 0000000000..bf638da25b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/mnasnet.md @@ -0,0 +1,170 @@ +# MnasNet + +**MnasNet** is a type of convolutional neural network optimized for mobile devices that is discovered through mobile neural architecture search, which explicitly incorporates model latency into the main objective so that the search can identify a model that achieves a good trade-off between accuracy and latency. The main building block is an [inverted residual block](https://paperswithcode.com/method/inverted-residual-block) (from [MobileNetV2](https://paperswithcode.com/method/mobilenetv2)). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('mnasnet_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `mnasnet_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('mnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2019mnasnet, + title={MnasNet: Platform-Aware Neural Architecture Search for Mobile}, + author={Mingxing Tan and Bo Chen and Ruoming Pang and Vijay Vasudevan and Mark Sandler and Andrew Howard and Quoc V. Le}, + year={2019}, + eprint={1807.11626}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/mobilenet-v2.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/mobilenet-v2.md new file mode 100644 index 0000000000..d6532e2fbf --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/mobilenet-v2.md @@ -0,0 +1,271 @@ +# MobileNet v2 + +**MobileNetV2** is a convolutional neural network architecture that seeks to perform well on mobile devices. It is based on an [inverted residual structure](https://paperswithcode.com/method/inverted-residual-block) where the residual connections are between the bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. As a whole, the architecture of MobileNetV2 contains the initial fully convolution layer with 32 filters, followed by 19 residual bottleneck layers. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('mobilenetv2_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `mobilenetv2_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('mobilenetv2_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1801-04381, + author = {Mark Sandler and + Andrew G. Howard and + Menglong Zhu and + Andrey Zhmoginov and + Liang{-}Chieh Chen}, + title = {Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, + Detection and Segmentation}, + journal = {CoRR}, + volume = {abs/1801.04381}, + year = {2018}, + url = {http://arxiv.org/abs/1801.04381}, + archivePrefix = {arXiv}, + eprint = {1801.04381}, + timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1801-04381.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/mobilenet-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/mobilenet-v3.md new file mode 100644 index 0000000000..e68b5259a3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/mobilenet-v3.md @@ -0,0 +1,199 @@ +# MobileNet v3 + +**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('mobilenetv3_large_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `mobilenetv3_large_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('mobilenetv3_large_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-02244, + author = {Andrew Howard and + Mark Sandler and + Grace Chu and + Liang{-}Chieh Chen and + Bo Chen and + Mingxing Tan and + Weijun Wang and + Yukun Zhu and + Ruoming Pang and + Vijay Vasudevan and + Quoc V. Le and + Hartwig Adam}, + title = {Searching for MobileNetV3}, + journal = {CoRR}, + volume = {abs/1905.02244}, + year = {2019}, + url = {http://arxiv.org/abs/1905.02244}, + archivePrefix = {arXiv}, + eprint = {1905.02244}, + timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/nasnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/nasnet.md new file mode 100644 index 0000000000..76b29fc9fe --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/nasnet.md @@ -0,0 +1,131 @@ +# NASNet + +**NASNet** is a type of convolutional neural network discovered through neural architecture search. The building blocks consist of normal and reduction cells. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('nasnetalarge', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `nasnetalarge`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('nasnetalarge', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{zoph2018learning, + title={Learning Transferable Architectures for Scalable Image Recognition}, + author={Barret Zoph and Vijay Vasudevan and Jonathon Shlens and Quoc V. Le}, + year={2018}, + eprint={1707.07012}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/noisy-student.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/noisy-student.md new file mode 100644 index 0000000000..5a92d9282f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/noisy-student.md @@ -0,0 +1,571 @@ +# Noisy Student (EfficientNet) + +**Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training +and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps: + +1. train a teacher model on labeled images +2. use the teacher to generate pseudo labels on unlabeled images +3. train a student model on the combination of labeled images and pseudo labeled images. + +The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student. + +Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ns`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{xie2020selftraining, + title={Self-training with Noisy Student improves ImageNet classification}, + author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le}, + year={2020}, + eprint={1911.04252}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/pnasnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/pnasnet.md new file mode 100644 index 0000000000..52dfd946da --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/pnasnet.md @@ -0,0 +1,132 @@ +# PNASNet + +**Progressive Neural Architecture Search**, or **PNAS**, is a method for learning the structure of convolutional neural networks (CNNs). It uses a sequential model-based optimization (SMBO) strategy, where we search the space of cell structures, starting with simple (shallow) models and progressing to complex ones, pruning out unpromising structures as we go. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('pnasnet5large', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `pnasnet5large`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('pnasnet5large', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{liu2018progressive, + title={Progressive Neural Architecture Search}, + author={Chenxi Liu and Barret Zoph and Maxim Neumann and Jonathon Shlens and Wei Hua and Li-Jia Li and Li Fei-Fei and Alan Yuille and Jonathan Huang and Kevin Murphy}, + year={2018}, + eprint={1712.00559}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/regnetx.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/regnetx.md new file mode 100644 index 0000000000..0842a60174 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/regnetx.md @@ -0,0 +1,553 @@ +# RegNetX + +**RegNetX** is a convolutional network design space with simple, regular models with parameters: depth $d$, initial width $w\_{0} > 0$, and slope $w\_{a} > 0$, and generates a different block width $u\_{j}$ for each block $j < d$. The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): + +$$ u\_{j} = w\_{0} + w\_{a}\cdot{j} $$ + +For **RegNetX** we have additional restrictions: we set $b = 1$ (the bottleneck ratio), $12 \leq d \leq 28$, and $w\_{m} \geq 2$ (the width multiplier). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('regnetx_002', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `regnetx_002`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('regnetx_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/regnety.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/regnety.md new file mode 100644 index 0000000000..ba73f73cb5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/regnety.md @@ -0,0 +1,567 @@ +# RegNetY + +**RegNetY** is a convolutional network design space with simple, regular models with parameters: depth $d$, initial width $w\_{0} > 0$, and slope $w\_{a} > 0$, and generates a different block width $u\_{j}$ for each block $j < d$. The key restriction for the RegNet types of model is that there is a linear parameterisation of block widths (the design space only contains models with this linear structure): + +$$ u\_{j} = w\_{0} + w\_{a}\cdot{j} $$ + +For **RegNetX** authors have additional restrictions: we set $b = 1$ (the bottleneck ratio), $12 \leq d \leq 28$, and $w\_{m} \geq 2$ (the width multiplier). + +For **RegNetY** authors make one change, which is to include [Squeeze-and-Excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('regnety_002', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `regnety_002`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('regnety_002', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/res2net.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/res2net.md new file mode 100644 index 0000000000..4fb0a6b7e2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/res2net.md @@ -0,0 +1,321 @@ +# Res2Net + +**Res2Net** is an image model that employs a variation on bottleneck residual blocks, [Res2Net Blocks](https://paperswithcode.com/method/res2net-block). The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('res2net101_26w_4s', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `res2net101_26w_4s`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('res2net101_26w_4s', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{Gao_2021, + title={Res2Net: A New Multi-Scale Backbone Architecture}, + volume={43}, + ISSN={1939-3539}, + url={http://dx.doi.org/10.1109/TPAMI.2019.2938758}, + DOI={10.1109/tpami.2019.2938758}, + number={2}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + year={2021}, + month={Feb}, + pages={652–662} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/res2next.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/res2next.md new file mode 100644 index 0000000000..336d1277c1 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/res2next.md @@ -0,0 +1,136 @@ +# Res2NeXt + +**Res2NeXt** is an image model that employs a variation on [ResNeXt](https://paperswithcode.com/method/resnext) bottleneck residual blocks. The motivation is to be able to represent features at multiple scales. This is achieved through a novel building block for CNNs that constructs hierarchical residual-like connections within one single residual block. This represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('res2next50', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `res2next50`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('res2next50', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{Gao_2021, + title={Res2Net: A New Multi-Scale Backbone Architecture}, + volume={43}, + ISSN={1939-3539}, + url={http://dx.doi.org/10.1109/TPAMI.2019.2938758}, + DOI={10.1109/tpami.2019.2938758}, + number={2}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + publisher={Institute of Electrical and Electronics Engineers (IEEE)}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + year={2021}, + month={Feb}, + pages={652–662} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/resnest.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/resnest.md new file mode 100644 index 0000000000..7b2bf0ae66 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/resnest.md @@ -0,0 +1,469 @@ +# ResNeSt + +A **ResNeSt** is a variant on a [ResNet](https://paperswithcode.com/method/resnet), which instead stacks [Split-Attention blocks](https://paperswithcode.com/method/split-attention). The cardinal group representations are then concatenated along the channel dimension: $V = \text{Concat}${$V^{1},V^{2},\cdots{V}^{K}$}. As in standard residual blocks, the final output $Y$ of otheur Split-Attention block is produced using a shortcut connection: $Y=V+X$, if the input and output feature-map share the same shape. For blocks with a stride, an appropriate transformation $\mathcal{T}$ is applied to the shortcut connection to align the output shapes: $Y=V+\mathcal{T}(X)$. For example, $\mathcal{T}$ can be strided convolution or combined convolution-with-pooling. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnest101e', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnest101e`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnest101e', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola}, + year={2020}, + eprint={2004.08955}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/resnet-d.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/resnet-d.md new file mode 100644 index 0000000000..323dc629b8 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/resnet-d.md @@ -0,0 +1,324 @@ +# ResNet-D + +**ResNet-D** is a modification on the [ResNet](https://paperswithcode.com/method/resnet) architecture that utilises an [average pooling](https://paperswithcode.com/method/average-pooling) tweak for downsampling. The motivation is that in the unmodified ResNet, the [1×1 convolution](https://paperswithcode.com/method/1x1-convolution) for the downsampling block ignores 3/4 of input feature maps, so this is modified so no information will be ignored + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnet101d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnet101d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{he2018bag, + title={Bag of Tricks for Image Classification with Convolutional Neural Networks}, + author={Tong He and Zhi Zhang and Hang Zhang and Zhongyue Zhang and Junyuan Xie and Mu Li}, + year={2018}, + eprint={1812.01187}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/resnet.md new file mode 100644 index 0000000000..f770d3bea5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/resnet.md @@ -0,0 +1,439 @@ +# ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnet18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnet18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/HeZRS15, + author = {Kaiming He and + Xiangyu Zhang and + Shaoqing Ren and + Jian Sun}, + title = {Deep Residual Learning for Image Recognition}, + journal = {CoRR}, + volume = {abs/1512.03385}, + year = {2015}, + url = {http://arxiv.org/abs/1512.03385}, + archivePrefix = {arXiv}, + eprint = {1512.03385}, + timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, + biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/resnext.md new file mode 100644 index 0000000000..5d6451c2bc --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/resnext.md @@ -0,0 +1,244 @@ +# ResNeXt + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('resnext101_32x8d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `resnext101_32x8d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('resnext101_32x8d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/XieGDTH16, + author = {Saining Xie and + Ross B. Girshick and + Piotr Doll{\'{a}}r and + Zhuowen Tu and + Kaiming He}, + title = {Aggregated Residual Transformations for Deep Neural Networks}, + journal = {CoRR}, + volume = {abs/1611.05431}, + year = {2016}, + url = {http://arxiv.org/abs/1611.05431}, + archivePrefix = {arXiv}, + eprint = {1611.05431}, + timestamp = {Mon, 13 Aug 2018 16:45:58 +0200}, + biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/rexnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/rexnet.md new file mode 100644 index 0000000000..cb009f83d4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/rexnet.md @@ -0,0 +1,258 @@ +# RexNet + +**Rank Expansion Networks** (ReXNets) follow a set of new design principles for designing bottlenecks in image classification models. Authors refine each layer by 1) expanding the input channel size of the convolution layer and 2) replacing the [ReLU6s](https://www.paperswithcode.com/method/relu6). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('rexnet_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `rexnet_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('rexnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{han2020rexnet, + title={ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network}, + author={Dongyoon Han and Sangdoo Yun and Byeongho Heo and YoungJoon Yoo}, + year={2020}, + eprint={2007.00992}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/se-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/se-resnet.md new file mode 100644 index 0000000000..206b9394f6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/se-resnet.md @@ -0,0 +1,183 @@ +# SE-ResNet + +**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('seresnet152d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `seresnet152d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('seresnet152d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/selecsls.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/selecsls.md new file mode 100644 index 0000000000..741275db65 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/selecsls.md @@ -0,0 +1,197 @@ +# SelecSLS + +**SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('selecsls42b', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `selecsls42b`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('selecsls42b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{Mehta_2020, + title={XNect}, + volume={39}, + ISSN={1557-7368}, + url={http://dx.doi.org/10.1145/3386569.3392410}, + DOI={10.1145/3386569.3392410}, + number={4}, + journal={ACM Transactions on Graphics}, + publisher={Association for Computing Machinery (ACM)}, + author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, + year={2020}, + month={Jul} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/seresnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/seresnext.md new file mode 100644 index 0000000000..2a85a842bb --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/seresnext.md @@ -0,0 +1,228 @@ +# SE-ResNeXt + +**SE ResNeXt** is a variant of a [ResNext](https://www.paperswithcode.com/method/resneXt) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('seresnext26d_32x4d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `seresnext26d_32x4d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('seresnext26d_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{hu2019squeezeandexcitation, + title={Squeeze-and-Excitation Networks}, + author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, + year={2019}, + eprint={1709.01507}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/skresnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/skresnet.md new file mode 100644 index 0000000000..fd2dcd7557 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/skresnet.md @@ -0,0 +1,173 @@ +# SK-ResNet + +**SK ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNet are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('skresnet18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `skresnet18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('skresnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{li2019selective, + title={Selective Kernel Networks}, + author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, + year={2019}, + eprint={1903.06586}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/skresnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/skresnext.md new file mode 100644 index 0000000000..582a8d5572 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/skresnext.md @@ -0,0 +1,131 @@ +# SK-ResNeXt + +**SK ResNeXt** is a variant of a [ResNeXt](https://www.paperswithcode.com/method/resnext) that employs a [Selective Kernel](https://paperswithcode.com/method/selective-kernel) unit. In general, all the large kernel convolutions in the original bottleneck blocks in ResNext are replaced by the proposed [SK convolutions](https://paperswithcode.com/method/selective-kernel-convolution), enabling the network to choose appropriate receptive field sizes in an adaptive manner. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('skresnext50_32x4d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `skresnext50_32x4d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('skresnext50_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{li2019selective, + title={Selective Kernel Networks}, + author={Xiang Li and Wenhai Wang and Xiaolin Hu and Jian Yang}, + year={2019}, + eprint={1903.06586}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/spnasnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/spnasnet.md new file mode 100644 index 0000000000..6b54f2f7e5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/spnasnet.md @@ -0,0 +1,123 @@ +# SPNASNet + +**Single-Path NAS** is a novel differentiable NAS method for designing hardware-efficient ConvNets in less than 4 hours. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('spnasnet_100', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `spnasnet_100`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('spnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{stamoulis2019singlepath, + title={Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours}, + author={Dimitrios Stamoulis and Ruizhou Ding and Di Wang and Dimitrios Lymberopoulos and Bodhi Priyantha and Jie Liu and Diana Marculescu}, + year={2019}, + eprint={1904.02877}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/ssl-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/ssl-resnet.md new file mode 100644 index 0000000000..02bc8d285a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/ssl-resnet.md @@ -0,0 +1,192 @@ +# SSL ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('ssl_resnet18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `ssl_resnet18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('ssl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/ssl-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/ssl-resnext.md new file mode 100644 index 0000000000..878ed00308 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/ssl-resnext.md @@ -0,0 +1,278 @@ +# SSL ResNeXT + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +The model in this collection utilises semi-supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('ssl_resnext101_32x16d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `ssl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('ssl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/swsl-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/swsl-resnet.md new file mode 100644 index 0000000000..4c935aed52 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/swsl-resnet.md @@ -0,0 +1,192 @@ +# SWSL ResNet + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. + +The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('swsl_resnet18', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `swsl_resnet18`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('swsl_resnet18', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/swsl-resnext.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/swsl-resnext.md new file mode 100644 index 0000000000..1c0ce03122 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/swsl-resnext.md @@ -0,0 +1,278 @@ +# SWSL ResNeXt + +A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) $C$, as an essential factor in addition to the dimensions of depth and width. + +The models in this collection utilise semi-weakly supervised learning to improve the performance of the model. The approach brings important gains to standard architectures for image, video and fine-grained classification. + +Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('swsl_resnext101_32x16d', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `swsl_resnext101_32x16d`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('swsl_resnext101_32x16d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-00546, + author = {I. Zeki Yalniz and + Herv{\'{e}} J{\'{e}}gou and + Kan Chen and + Manohar Paluri and + Dhruv Mahajan}, + title = {Billion-scale semi-supervised learning for image classification}, + journal = {CoRR}, + volume = {abs/1905.00546}, + year = {2019}, + url = {http://arxiv.org/abs/1905.00546}, + archivePrefix = {arXiv}, + eprint = {1905.00546}, + timestamp = {Mon, 28 Sep 2020 08:19:37 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-00546.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet-condconv.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet-condconv.md new file mode 100644 index 0000000000..50c46665b8 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet-condconv.md @@ -0,0 +1,250 @@ +# (Tensorflow) EfficientNet CondConv + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to squeeze-and-excitation blocks. + +This collection of models amends EfficientNet by adding [CondConv](https://paperswithcode.com/method/condconv) convolutions. + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_efficientnet_cc_b0_4e', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_efficientnet_cc_b0_4e`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_efficientnet_cc_b0_4e', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1904-04971, + author = {Brandon Yang and + Gabriel Bender and + Quoc V. Le and + Jiquan Ngiam}, + title = {Soft Conditional Computation}, + journal = {CoRR}, + volume = {abs/1904.04971}, + year = {2019}, + url = {http://arxiv.org/abs/1904.04971}, + archivePrefix = {arXiv}, + eprint = {1904.04971}, + timestamp = {Thu, 25 Apr 2019 13:55:01 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-1904-04971.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet-lite.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet-lite.md new file mode 100644 index 0000000000..6c0ad9158e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet-lite.md @@ -0,0 +1,256 @@ +# (Tensorflow) EfficientNet Lite + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2). + +EfficientNet-Lite makes EfficientNet more suitable for mobile devices by introducing [ReLU6](https://paperswithcode.com/method/relu6) activation functions and removing [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_efficientnet_lite0', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_efficientnet_lite0`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_efficientnet_lite0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet.md new file mode 100644 index 0000000000..39a981ee1c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-efficientnet.md @@ -0,0 +1,663 @@ +# (Tensorflow) EfficientNet + +**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. + +The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. + +The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_efficientnet_b0', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_efficientnet_b0', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2020efficientnet, + title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, + author={Mingxing Tan and Quoc V. Le}, + year={2020}, + eprint={1905.11946}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-inception-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-inception-v3.md new file mode 100644 index 0000000000..cbba35c8d1 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-inception-v3.md @@ -0,0 +1,148 @@ +# (Tensorflow) Inception v3 + +**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_inception_v3', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_inception_v3`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_inception_v3', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/SzegedyVISW15, + author = {Christian Szegedy and + Vincent Vanhoucke and + Sergey Ioffe and + Jonathon Shlens and + Zbigniew Wojna}, + title = {Rethinking the Inception Architecture for Computer Vision}, + journal = {CoRR}, + volume = {abs/1512.00567}, + year = {2015}, + url = {http://arxiv.org/abs/1512.00567}, + archivePrefix = {arXiv}, + eprint = {1512.00567}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-mixnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-mixnet.md new file mode 100644 index 0000000000..862c09d11e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-mixnet.md @@ -0,0 +1,194 @@ +# (Tensorflow) MixNet + +**MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution). + +The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_mixnet_l', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_mixnet_l`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_mixnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{tan2019mixconv, + title={MixConv: Mixed Depthwise Convolutional Kernels}, + author={Mingxing Tan and Quoc V. Le}, + year={2019}, + eprint={1907.09595}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-mobilenet-v3.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-mobilenet-v3.md new file mode 100644 index 0000000000..51946e8acf --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/tf-mobilenet-v3.md @@ -0,0 +1,381 @@ +# (Tensorflow) MobileNet v3 + +**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/abs-1905-02244, + author = {Andrew Howard and + Mark Sandler and + Grace Chu and + Liang{-}Chieh Chen and + Bo Chen and + Mingxing Tan and + Weijun Wang and + Yukun Zhu and + Ruoming Pang and + Vijay Vasudevan and + Quoc V. Le and + Hartwig Adam}, + title = {Searching for MobileNetV3}, + journal = {CoRR}, + volume = {abs/1905.02244}, + year = {2019}, + url = {http://arxiv.org/abs/1905.02244}, + archivePrefix = {arXiv}, + eprint = {1905.02244}, + timestamp = {Tue, 12 Jan 2021 15:30:06 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/tresnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/tresnet.md new file mode 100644 index 0000000000..89d01dec88 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/tresnet.md @@ -0,0 +1,352 @@ +# TResNet + +A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('tresnet_l', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `tresnet_l`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('tresnet_l', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{ridnik2020tresnet, + title={TResNet: High Performance GPU-Dedicated Architecture}, + author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman}, + year={2020}, + eprint={2003.13630}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/vision-transformer.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/vision-transformer.md new file mode 100644 index 0000000000..8e631977db --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/vision-transformer.md @@ -0,0 +1,380 @@ +# Vision Transformer (ViT) + +The **Vision Transformer** is a model for image classification that employs a Transformer-like architecture over patches of the image. This includes the use of [Multi-Head Attention](https://paperswithcode.com/method/multi-head-attention), [Scaled Dot-Product Attention](https://paperswithcode.com/method/scaled) and other architectural features seen in the [Transformer](https://paperswithcode.com/method/transformer) architecture traditionally used for NLP. + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('vit_base_patch16_224', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `vit_base_patch16_224`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('vit_base_patch16_224', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@misc{dosovitskiy2020image, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, + year={2020}, + eprint={2010.11929}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/wide-resnet.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/wide-resnet.md new file mode 100644 index 0000000000..98a166550a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/wide-resnet.md @@ -0,0 +1,163 @@ +# Wide ResNet + +**Wide Residual Networks** are a variant on [ResNets](https://paperswithcode.com/method/resnet) where we decrease depth and increase the width of residual networks. This is achieved through the use of [wide residual blocks](https://paperswithcode.com/method/wide-residual-block). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('wide_resnet101_2', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `wide_resnet101_2`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('wide_resnet101_2', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/ZagoruykoK16, + author = {Sergey Zagoruyko and + Nikos Komodakis}, + title = {Wide Residual Networks}, + journal = {CoRR}, + volume = {abs/1605.07146}, + year = {2016}, + url = {http://arxiv.org/abs/1605.07146}, + archivePrefix = {arXiv}, + eprint = {1605.07146}, + timestamp = {Mon, 13 Aug 2018 16:46:42 +0200}, + biburl = {https://dblp.org/rec/journals/corr/ZagoruykoK16.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/models/xception.md b/PyTorch/contrib/cv/classification/convmixer/docs/models/xception.md new file mode 100644 index 0000000000..1c7abff827 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/models/xception.md @@ -0,0 +1,224 @@ +# Xception + +**Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution layers](https://paperswithcode.com/method/depthwise-separable-convolution). + +The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). + +## How do I use this model on an image? +To load a pretrained model: + +```python +import timm +model = timm.create_model('xception', pretrained=True) +model.eval() +``` + +To load and preprocess the image: +```python +import urllib +from PIL import Image +from timm.data import resolve_data_config +from timm.data.transforms_factory import create_transform + +config = resolve_data_config({}, model=model) +transform = create_transform(**config) + +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") +urllib.request.urlretrieve(url, filename) +img = Image.open(filename).convert('RGB') +tensor = transform(img).unsqueeze(0) # transform and add batch dimension +``` + +To get the model predictions: +```python +import torch +with torch.no_grad(): + out = model(tensor) +probabilities = torch.nn.functional.softmax(out[0], dim=0) +print(probabilities.shape) +# prints: torch.Size([1000]) +``` + +To get the top-5 predictions class names: +```python +# Get imagenet class mappings +url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") +urllib.request.urlretrieve(url, filename) +with open("imagenet_classes.txt", "r") as f: + categories = [s.strip() for s in f.readlines()] + +# Print top categories per image +top5_prob, top5_catid = torch.topk(probabilities, 5) +for i in range(top5_prob.size(0)): + print(categories[top5_catid[i]], top5_prob[i].item()) +# prints class names and probabilities like: +# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] +``` + +Replace the model name with the variant you want to use, e.g. `xception`. You can find the IDs in the model summaries at the top of this page. + +To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use. + +## How do I finetune this model? +You can finetune any of the pre-trained models just by changing the classifier (the last layer). +```python +model = timm.create_model('xception', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) +``` +To finetune on your own dataset, you have to write a training loop or adapt [timm's training +script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. + +## How do I train this model? + +You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. + +## Citation + +```BibTeX +@article{DBLP:journals/corr/ZagoruykoK16, +@misc{chollet2017xception, + title={Xception: Deep Learning with Depthwise Separable Convolutions}, + author={François Chollet}, + year={2017}, + eprint={1610.02357}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/results.md b/PyTorch/contrib/cv/classification/convmixer/docs/results.md new file mode 100644 index 0000000000..a06ff33278 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/results.md @@ -0,0 +1,67 @@ +# Results + +CSV files containing an ImageNet-1K and out-of-distribution (OOD) test set validation results for all models with pretrained weights is located in the repository [results folder](https://github.com/rwightman/pytorch-image-models/tree/master/results). + +## Self-trained Weights + +The table below includes ImageNet-1k validation results of model weights that I've trained myself. It is not updated as frequently as the csv results outputs linked above. + +|Model | Acc@1 (Err) | Acc@5 (Err) | Param # (M) | Interpolation | Image Size | +|---|---|---|---|---|---| +| efficientnet_b3a | 82.242 (17.758) | 96.114 (3.886) | 12.23 | bicubic | 320 (1.0 crop) | +| efficientnet_b3 | 82.076 (17.924) | 96.020 (3.980) | 12.23 | bicubic | 300 | +| regnet_32 | 82.002 (17.998) | 95.906 (4.094) | 19.44 | bicubic | 224 | +| skresnext50d_32x4d | 81.278 (18.722) | 95.366 (4.634) | 27.5 | bicubic | 288 (1.0 crop) | +| seresnext50d_32x4d | 81.266 (18.734) | 95.620 (4.380) | 27.6 | bicubic | 224 | +| efficientnet_b2a | 80.608 (19.392) | 95.310 (4.690) | 9.11 | bicubic | 288 (1.0 crop) | +| resnet50d | 80.530 (19.470) | 95.160 (4.840) | 25.6 | bicubic | 224 | +| mixnet_xl | 80.478 (19.522) | 94.932 (5.068) | 11.90 | bicubic | 224 | +| efficientnet_b2 | 80.402 (19.598) | 95.076 (4.924) | 9.11 | bicubic | 260 | +| seresnet50 | 80.274 (19.726) | 95.070 (4.930) | 28.1 | bicubic | 224 | +| skresnext50d_32x4d | 80.156 (19.844) | 94.642 (5.358) | 27.5 | bicubic | 224 | +| cspdarknet53 | 80.058 (19.942) | 95.084 (4.916) | 27.6 | bicubic | 256 | +| cspresnext50 | 80.040 (19.960) | 94.944 (5.056) | 20.6 | bicubic | 224 | +| resnext50_32x4d | 79.762 (20.238) | 94.600 (5.400) | 25 | bicubic | 224 | +| resnext50d_32x4d | 79.674 (20.326) | 94.868 (5.132) | 25.1 | bicubic | 224 | +| cspresnet50 | 79.574 (20.426) | 94.712 (5.288) | 21.6 | bicubic | 256 | +| ese_vovnet39b | 79.320 (20.680) | 94.710 (5.290) | 24.6 | bicubic | 224 | +| resnetblur50 | 79.290 (20.710) | 94.632 (5.368) | 25.6 | bicubic | 224 | +| dpn68b | 79.216 (20.784) | 94.414 (5.586) | 12.6 | bicubic | 224 | +| resnet50 | 79.038 (20.962) | 94.390 (5.610) | 25.6 | bicubic | 224 | +| mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33 | bicubic | 224 | +| efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.79 | bicubic | 240 | +| efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44 | bicubic | 224 | +| seresnext26t_32x4d | 77.998 (22.002) | 93.708 (6.292) | 16.8 | bicubic | 224 | +| seresnext26tn_32x4d | 77.986 (22.014) | 93.746 (6.254) | 16.8 | bicubic | 224 | +| efficientnet_b0 | 77.698 (22.302) | 93.532 (6.468) | 5.29 | bicubic | 224 | +| seresnext26d_32x4d | 77.602 (22.398) | 93.608 (6.392) | 16.8 | bicubic | 224 | +| mobilenetv2_120d | 77.294 (22.706 | 93.502 (6.498) | 5.8 | bicubic | 224 | +| mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01 | bicubic | 224 | +| resnet34d | 77.116 (22.884) | 93.382 (6.618) | 21.8 | bicubic | 224 | +| seresnext26_32x4d | 77.104 (22.896) | 93.316 (6.684) | 16.8 | bicubic | 224 | +| skresnet34 | 76.912 (23.088) | 93.322 (6.678) | 22.2 | bicubic | 224 | +| ese_vovnet19b_dw | 76.798 (23.202) | 93.268 (6.732) | 6.5 | bicubic | 224 | +| resnet26d | 76.68 (23.32) | 93.166 (6.834) | 16 | bicubic | 224 | +| densenetblur121d | 76.576 (23.424) | 93.190 (6.810) | 8.0 | bicubic | 224 | +| mobilenetv2_140 | 76.524 (23.476) | 92.990 (7.010) | 6.1 | bicubic | 224 | +| mixnet_s | 75.988 (24.012) | 92.794 (7.206) | 4.13 | bicubic | 224 | +| mobilenetv3_large_100 | 75.766 (24.234) | 92.542 (7.458) | 5.5 | bicubic | 224 | +| mobilenetv3_rw | 75.634 (24.366) | 92.708 (7.292) | 5.5 | bicubic | 224 | +| mnasnet_a1 | 75.448 (24.552) | 92.604 (7.396) | 3.89 | bicubic | 224 | +| resnet26 | 75.292 (24.708) | 92.57 (7.43) | 16 | bicubic | 224 | +| fbnetc_100 | 75.124 (24.876) | 92.386 (7.614) | 5.6 | bilinear | 224 | +| resnet34 | 75.110 (24.890) | 92.284 (7.716) | 22 | bilinear | 224 | +| mobilenetv2_110d | 75.052 (24.948) | 92.180 (7.820) | 4.5 | bicubic | 224 | +| seresnet34 | 74.808 (25.192) | 92.124 (7.876) | 22 | bilinear | 224 | +| mnasnet_b1 | 74.658 (25.342) | 92.114 (7.886) | 4.38 | bicubic | 224 | +| spnasnet_100 | 74.084 (25.916) | 91.818 (8.182) | 4.42 | bilinear | 224 | +| skresnet18 | 73.038 (26.962) | 91.168 (8.832) | 11.9 | bicubic | 224 | +| mobilenetv2_100 | 72.978 (27.022) | 91.016 (8.984) | 3.5 | bicubic | 224 | +| resnet18d | 72.260 (27.740) | 90.696 (9.304) | 11.7 | bicubic | 224 | +| seresnet18 | 71.742 (28.258) | 90.334 (9.666) | 11.8 | bicubic | 224 | + +## Ported and Other Weights + +For weights ported from other deep learning frameworks (Tensorflow, MXNet GluonCV) or copied from other PyTorch sources, please see the full results tables for ImageNet and various OOD test sets at in the [results tables](https://github.com/rwightman/pytorch-image-models/tree/master/results). + +Model code .py files contain links to original sources of models and weights. diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/scripts.md b/PyTorch/contrib/cv/classification/convmixer/docs/scripts.md new file mode 100644 index 0000000000..f48eec0d75 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/scripts.md @@ -0,0 +1,27 @@ +# Scripts +A train, validation, inference, and checkpoint cleaning script included in the github root folder. Scripts are not currently packaged in the pip release. + +The training and validation scripts evolved from early versions of the [PyTorch Imagenet Examples](https://github.com/pytorch/examples). I have added significant functionality over time, including CUDA specific performance enhancements based on +[NVIDIA's APEX Examples](https://github.com/NVIDIA/apex/tree/master/examples). + +## Training Script + +The variety of training args is large and not all combinations of options (or even options) have been fully tested. For the training dataset folder, specify the folder to the base that contains a `train` and `validation` folder. + +To train an SE-ResNet34 on ImageNet, locally distributed, 4 GPUs, one process per GPU w/ cosine schedule, random-erasing prob of 50% and per-pixel random value: + +`./distributed_train.sh 4 /data/imagenet --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4` + +NOTE: It is recommended to use PyTorch 1.7+ w/ PyTorch native AMP and DDP instead of APEX AMP. `--amp` defaults to native AMP as of timm ver 0.4.3. `--apex-amp` will force use of APEX components if they are installed. + +## Validation / Inference Scripts + +Validation and inference scripts are similar in usage. One outputs metrics on a validation set and the other outputs topk class ids in a csv. Specify the folder containing validation images, not the base as in training script. + +To validate with the model's pretrained weights (if they exist): + +`python validate.py /imagenet/validation/ --model seresnext26_32x4d --pretrained` + +To run inference from a checkpoint: + +`python inference.py /imagenet/validation/ --model mobilenetv3_large_100 --checkpoint ./output/train/model_best.pth.tar` \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/docs/training_hparam_examples.md b/PyTorch/contrib/cv/classification/convmixer/docs/training_hparam_examples.md new file mode 100644 index 0000000000..c2afc2b108 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/docs/training_hparam_examples.md @@ -0,0 +1,47 @@ +# Training Examples + +## EfficientNet-B2 with RandAugment - 80.4 top-1, 95.1 top-5 +These params are for dual Titan RTX cards with NVIDIA Apex installed: + +`./distributed_train.sh 2 /imagenet/ --model efficientnet_b2 -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .016` + +## MixNet-XL with RandAugment - 80.5 top-1, 94.9 top-5 +This params are for dual Titan RTX cards with NVIDIA Apex installed: + +`./distributed_train.sh 2 /imagenet/ --model mixnet_xl -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .969 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.3 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.3 --amp --lr .016 --dist-bn reduce` + +## SE-ResNeXt-26-D and SE-ResNeXt-26-T +These hparams (or similar) work well for a wide range of ResNet architecture, generally a good idea to increase the epoch # as the model size increases... ie approx 180-200 for ResNe(X)t50, and 220+ for larger. Increase batch size and LR proportionally for better GPUs or with AMP enabled. These params were for 2 1080Ti cards: + +`./distributed_train.sh 2 /imagenet/ --model seresnext26t_32x4d --lr 0.1 --warmup-epochs 5 --epochs 160 --weight-decay 1e-4 --sched cosine --reprob 0.4 --remode pixel -b 112` + +## EfficientNet-B3 with RandAugment - 81.5 top-1, 95.7 top-5 +The training of this model started with the same command line as EfficientNet-B2 w/ RA above. After almost three weeks of training the process crashed. The results weren't looking amazing so I resumed the training several times with tweaks to a few params (increase RE prob, decrease rand-aug, increase ema-decay). Nothing looked great. I ended up averaging the best checkpoints from all restarts. The result is mediocre at default res/crop but oddly performs much better with a full image test crop of 1.0. + +## EfficientNet-B0 with RandAugment - 77.7 top-1, 95.3 top-5 +[Michael Klachko](https://github.com/michaelklachko) achieved these results with the command line for B2 adapted for larger batch size, with the recommended B0 dropout rate of 0.2. + +`./distributed_train.sh 2 /imagenet/ --model efficientnet_b0 -b 384 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .048` + +## ResNet50 with JSD loss and RandAugment (clean + 2x RA augs) - 79.04 top-1, 94.39 top-5 + +Trained on two older 1080Ti cards, this took a while. Only slightly, non statistically better ImageNet validation result than my first good AugMix training of 78.99. However, these weights are more robust on tests with ImageNetV2, ImageNet-Sketch, etc. Unlike my first AugMix runs, I've enabled SplitBatchNorm, disabled random erasing on the clean split, and cranked up random erasing prob on the 2 augmented paths. + +`./distributed_train.sh 2 /imagenet -b 64 --model resnet50 --sched cosine --epochs 200 --lr 0.05 --amp --remode pixel --reprob 0.6 --aug-splits 3 --aa rand-m9-mstd0.5-inc1 --resplit --split-bn --jsd --dist-bn reduce` + +## EfficientNet-ES (EdgeTPU-Small) with RandAugment - 78.066 top-1, 93.926 top-5 +Trained by [Andrew Lavin](https://github.com/andravin) with 8 V100 cards. Model EMA was not used, final checkpoint is the average of 8 best checkpoints during training. + +`./distributed_train.sh 8 /imagenet --model efficientnet_es -b 128 --sched step --epochs 450 --decay-epochs 2.4 --decay-rate .97 --opt rmsproptf --opt-eps .001 -j 8 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-connect 0.2 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064` + +## MobileNetV3-Large-100 - 75.766 top-1, 92,542 top-5 + +`./distributed_train.sh 2 /imagenet/ --model mobilenetv3_large_100 -b 512 --sched step --epochs 600 --decay-epochs 2.4 --decay-rate .973 --opt rmsproptf --opt-eps .001 -j 7 --warmup-lr 1e-6 --weight-decay 1e-5 --drop 0.2 --drop-connect 0.2 --model-ema --model-ema-decay 0.9999 --aa rand-m9-mstd0.5 --remode pixel --reprob 0.2 --amp --lr .064 --lr-noise 0.42 0.9` + + +## ResNeXt-50 32x4d w/ RandAugment - 79.762 top-1, 94.60 top-5 +These params will also work well for SE-ResNeXt-50 and SK-ResNeXt-50 and likely 101. I used them for the SK-ResNeXt-50 32x4d that I trained with 2 GPU using a slightly higher LR per effective batch size (lr=0.18, b=192 per GPU). The cmd line below are tuned for 8 GPU training. + + +`./distributed_train.sh 8 /imagenet --model resnext50_32x4d --lr 0.6 --warmup-epochs 5 --epochs 240 --weight-decay 1e-4 --sched cosine --reprob 0.4 --recount 3 --remode pixel --aa rand-m7-mstd0.5-inc1 -b 192 -j 6 --amp --dist-bn reduce` + diff --git a/PyTorch/contrib/cv/classification/convmixer/hubconf.py b/PyTorch/contrib/cv/classification/convmixer/hubconf.py new file mode 100644 index 0000000000..70fed79a27 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/hubconf.py @@ -0,0 +1,4 @@ +dependencies = ['torch'] +from timm.models import registry + +globals().update(registry._model_entrypoints) diff --git a/PyTorch/contrib/cv/classification/convmixer/inference.py b/PyTorch/contrib/cv/classification/convmixer/inference.py new file mode 100644 index 0000000000..5fcf1e6024 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/inference.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +"""PyTorch Inference Script + +An example inference script that outputs top-k class ids for images in a folder into a csv. + +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" +import os +import time +import argparse +import logging +import numpy as np +import torch + +from timm.models import create_model, apply_test_time_pool +from timm.data import ImageDataset, create_loader, resolve_data_config +from timm.utils import AverageMeter, setup_default_logging + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('inference') + + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--output_dir', metavar='DIR', default='./', + help='path to output files') +parser.add_argument('--model', '-m', metavar='MODEL', default='dpn92', + help='model architecture (default: dpn92)') +parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', + help='number of data loading workers (default: 2)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('--num-classes', type=int, default=1000, + help='Number classes in dataset') +parser.add_argument('--log-freq', default=10, type=int, + metavar='N', help='batch logging frequency (default: 10)') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--num-gpu', type=int, default=1, + help='Number of GPUS to use') +parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true', + help='disable test time pool') +parser.add_argument('--topk', default=5, type=int, + metavar='N', help='Top-k to output to CSV') + + +def main(): + setup_default_logging() + args = parser.parse_args() + # might as well try to do something useful... + args.pretrained = args.pretrained or not args.checkpoint + + # create model + model = create_model( + args.model, + num_classes=args.num_classes, + in_chans=3, + pretrained=args.pretrained, + checkpoint_path=args.checkpoint) + + _logger.info('Model %s created, param count: %d' % + (args.model, sum([m.numel() for m in model.parameters()]))) + + config = resolve_data_config(vars(args), model=model) + model, test_time_pool = (model, False) if args.no_test_pool else apply_test_time_pool(model, config) + + if args.num_gpu > 1: + model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() + else: + model = model.cuda() + + loader = create_loader( + ImageDataset(args.data), + input_size=config['input_size'], + batch_size=args.batch_size, + use_prefetcher=True, + interpolation=config['interpolation'], + mean=config['mean'], + std=config['std'], + num_workers=args.workers, + crop_pct=1.0 if test_time_pool else config['crop_pct']) + + model.eval() + + k = min(args.topk, args.num_classes) + batch_time = AverageMeter() + end = time.time() + topk_ids = [] + with torch.no_grad(): + for batch_idx, (input, _) in enumerate(loader): + input = input.cuda() + labels = model(input) + topk = labels.topk(k)[1] + topk_ids.append(topk.cpu().numpy()) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if batch_idx % args.log_freq == 0: + _logger.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format( + batch_idx, len(loader), batch_time=batch_time)) + + topk_ids = np.concatenate(topk_ids, axis=0) + + with open(os.path.join(args.output_dir, './topk_ids.csv'), 'w') as out_file: + filenames = loader.dataset.filenames(basename=True) + for filename, label in zip(filenames, topk_ids): + out_file.write('{0},{1}\n'.format( + filename, ','.join([ str(v) for v in label]))) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/contrib/cv/classification/convmixer/mkdocs.yml b/PyTorch/contrib/cv/classification/convmixer/mkdocs.yml new file mode 100644 index 0000000000..a72436c67d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/mkdocs.yml @@ -0,0 +1,46 @@ +site_name: 'Pytorch Image Models' +site_description: 'Pretained Image Recognition Models' +repo_name: 'rwightman/pytorch-image-models' +repo_url: 'https://github.com/rwightman/pytorch-image-models' +nav: + - index.md + - models.md + - ... | models/*.md + - results.md + - scripts.md + - training_hparam_examples.md + - feature_extraction.md + - changes.md + - archived_changes.md +theme: + name: 'material' + feature: + tabs: false +extra_javascript: + - 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML' + - https://cdnjs.cloudflare.com/ajax/libs/tablesort/5.2.1/tablesort.min.js + - javascripts/tables.js +markdown_extensions: + - codehilite: + linenums: true + - admonition + - pymdownx.arithmatex + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.critic + - pymdownx.details + - pymdownx.emoji: + emoji_generator: !!python/name:pymdownx.emoji.to_svg + - pymdownx.inlinehilite + - pymdownx.magiclink + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.superfences + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + - mdx_truly_sane_lists +plugins: + - search + - awesome-pages diff --git a/PyTorch/contrib/cv/classification/convmixer/model-index.yml b/PyTorch/contrib/cv/classification/convmixer/model-index.yml new file mode 100644 index 0000000000..38fb78d2a7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/model-index.yml @@ -0,0 +1,14 @@ +Import: +- ./docs/models/*.md +Library: + Name: PyTorch Image Models + Headline: PyTorch image models, scripts, pretrained weights + Website: https://rwightman.github.io/pytorch-image-models/ + Repository: https://github.com/rwightman/pytorch-image-models + Docs: https://rwightman.github.io/pytorch-image-models/ + README: "# PyTorch Image Models\r\n\r\nPyTorch Image Models (TIMM) is a library\ + \ for state-of-the-art image classification. With this library you can:\r\n\r\n\ + - Choose from 300+ pre-trained state-of-the-art image classification models.\r\ + \n- Train models afresh on research datasets such as ImageNet using provided scripts.\r\ + \n- Finetune pre-trained models on your own datasets, including the latest cutting\ + \ edge models." diff --git a/PyTorch/contrib/cv/classification/convmixer/read.md b/PyTorch/contrib/cv/classification/convmixer/read.md new file mode 100644 index 0000000000..48b3656f09 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/read.md @@ -0,0 +1,83 @@ +# Patches Are All You Need? 🤷 +This repository contains an implementation of ConvMixer for the ICLR 2022 submission ["Patches Are All You Need?"](https://openreview.net/forum?id=TVHS5Y4dNvM) by Asher Trockman and Zico Kolter. + +🔎 New: Check out [this repository](https://github.com/locuslab/convmixer-cifar10) for training ConvMixers on CIFAR-10. + +### Code overview +The most important code is in `convmixer.py`. We trained ConvMixers using the `timm` framework, which we copied from [here](http://github.com/rwightman/pytorch-image-models). + +__**Update:**__ ConvMixer is now integrated into the [`timm` framework itself](https://github.com/rwightman/pytorch-image-models). You can see the PR [here](https://github.com/rwightman/pytorch-image-models/pull/910). + +Inside `pytorch-image-models`, we have made the following modifications. (Though one could look at the diff, we think it is convenient to summarize them here.) + +- Added ConvMixers + - added `timm/models/convmixer.py` + - modified `timm/models/__init__.py` +- Added "OneCycle" LR Schedule + - added `timm/scheduler/onecycle_lr.py` + - modified `timm/scheduler/scheduler.py` + - modified `timm/scheduler/scheduler_factory.py` + - modified `timm/scheduler/__init__.py` + - modified `train.py` (added two lines to support this LR schedule) + +We are confident that the use of the OneCycle schedule here is not critical, and one could likely just as well +train ConvMixers with the built-in cosine schedule. + +### Evaluation +We provide some model weights below: + +| Model Name | Kernel Size | Patch Size | File Size | +|------------|:-----------:|:----------:|----------:| +|[ConvMixer-1536/20](https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_1536_20_ks9_p7.pth.tar)| 9 | 7 | 207MB | +|[ConvMixer-768/32](https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_768_32_ks7_p7_relu.pth.tar)\*| 7 | 7 | 85MB | +|[ConvMixer-1024/20](https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_1024_20_ks9_p14.pth.tar)| 9 | 14 | 98MB | + +\* **Important:** ConvMixer-768/32 here uses ReLU instead of GELU, so you would have to change `convmixer.py` accordingly (we will fix this later). + +You can evaluate ConvMixer-1536/20 as follows: + +``` +python validate.py --model convmixer_1536_20 --b 64 --num-classes 1000 --checkpoint [/path/to/convmixer_1536_20_ks9_p7.pth.tar] [/path/to/ImageNet1k-val] +``` + +You should get a `81.37%` accuracy. + +### Training +If you had a node with 10 GPUs, you could train a ConvMixer-1536/20 as follows (these are exactly the settings we used): + +``` +sh distributed_train.sh 10 [/path/to/ImageNet1k] + --train-split [your_train_dir] + --val-split [your_val_dir] + --model convmixer_1536_20 + -b 64 + -j 10 + --opt adamw + --epochs 150 + --sched onecycle + --amp + --input-size 3 224 224 + --lr 0.01 + --aa rand-m9-mstd0.5-inc1 + --cutmix 0.5 + --mixup 0.5 + --reprob 0.25 + --remode pixel + --num-classes 1000 + --warmup-epochs 0 + --opt-eps=1e-3 + --clip-grad 1.0 +``` + +We also included a ConvMixer-768/32 in timm/models/convmixer.py (though it is simple to add more ConvMixers). We trained that one with the above settings but with 300 epochs instead of 150 epochs. + +__**Note:**__ If you are training on CIFAR-10 instead of ImageNet-1k, we recommend setting `--scale 0.75 1.0` as well, since the default value of 0.08 1.0 does not make sense for 32x32 inputs. + +The tweetable version of ConvMixer, which requires `from torch.nn import *`: + +``` +def ConvMixer(h,d,k,p,n): + S,C,A=Sequential,Conv2d,lambda x:S(x,GELU(),BatchNorm2d(h)) + R=type('',(S,),{'forward':lambda s,x:s[0](x)+x}) + return S(A(C(3,h,p,p)),*[S(R(A(C(h,h,k,groups=h,padding=k//2))),A(C(h,h,1))) for i in range(d)],AdaptiveAvgPool2d(1),Flatten(),Linear(h,n)) +``` diff --git a/PyTorch/contrib/cv/classification/convmixer/requirements-docs.txt b/PyTorch/contrib/cv/classification/convmixer/requirements-docs.txt new file mode 100644 index 0000000000..716a3bf73c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/requirements-docs.txt @@ -0,0 +1,4 @@ +mkdocs +mkdocs-material +mdx_truly_sane_lists +mkdocs-awesome-pages-plugin \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/requirements-modelindex.txt b/PyTorch/contrib/cv/classification/convmixer/requirements-modelindex.txt new file mode 100644 index 0000000000..d0a1470cce --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/requirements-modelindex.txt @@ -0,0 +1,2 @@ +model-index==0.1.10 +jinja2==2.11.3 diff --git a/PyTorch/contrib/cv/classification/convmixer/requirements.txt b/PyTorch/contrib/cv/classification/convmixer/requirements.txt new file mode 100644 index 0000000000..8541afb3d5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/requirements.txt @@ -0,0 +1,7 @@ +torch>=1.4.0 +torchvision>=0.5.0 +pyyaml + + + + diff --git a/PyTorch/contrib/cv/classification/convmixer/results/README.md b/PyTorch/contrib/cv/classification/convmixer/results/README.md new file mode 100644 index 0000000000..b3fcec0708 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/README.md @@ -0,0 +1,59 @@ +# Validation Results + +This folder contains validation results for the models in this collection having pretrained weights. Since the focus for this repository is currently ImageNet-1k classification, all of the results are based on datasets compatible with ImageNet-1k classes. + +## Datasets + +There are currently results for the ImageNet validation set and 5 additional test / label sets. + +The test set results include rank and top-1/top-5 differences from clean validation. For the "Real Labels", ImageNetV2, and Sketch test sets, the differences were calculated against the full 1000 class ImageNet-1k validation set. For both the Adversarial and Rendition sets, the differences were calculated against 'clean' runs on the ImageNet-1k validation set with the same 200 classes used in each test set respectively. + +### ImageNet Validation - [`results-imagenet.csv`](results-imagenet.csv) + +The standard 50,000 image ImageNet-1k validation set. Model selection during training utilizes this validation set, so it is not a true test set. Question: Does anyone have the official ImageNet-1k test set classification labels now that challenges are done? + +* Source: http://image-net.org/challenges/LSVRC/2012/index +* Paper: "ImageNet Large Scale Visual Recognition Challenge" - https://arxiv.org/abs/1409.0575 + +### ImageNet-"Real Labels" - [`results-imagenet-real.csv`](results-imagenet-real.csv) + +The usual ImageNet-1k validation set with a fresh new set of labels intended to improve on mistakes in the original annotation process. + +* Source: https://github.com/google-research/reassessed-imagenet +* Paper: "Are we done with ImageNet?" - https://arxiv.org/abs/2006.07159 + +### ImageNetV2 Matched Frequency - [`results-imagenetv2-matched-frequency.csv`](results-imagenetv2-matched-frequency.csv) + +An ImageNet test set of 10,000 images sampled from new images roughly 10 years after the original. Care was taken to replicate the original ImageNet curation/sampling process. + +* Source: https://github.com/modestyachts/ImageNetV2 +* Paper: "Do ImageNet Classifiers Generalize to ImageNet?" - https://arxiv.org/abs/1902.10811 + +### ImageNet-Sketch - [`results-sketch.csv`](results-sketch.csv) + +50,000 non photographic (or photos of such) images (sketches, doodles, mostly monochromatic) covering all 1000 ImageNet classes. + +* Source: https://github.com/HaohanWang/ImageNet-Sketch +* Paper: "Learning Robust Global Representations by Penalizing Local Predictive Power" - https://arxiv.org/abs/1905.13549 + +### ImageNet-Adversarial - [`results-imagenet-a.csv`](results-imagenet-a.csv) + +A collection of 7500 images covering 200 of the 1000 ImageNet classes. Images are naturally occuring adversarial examples that confuse typical ImageNet classifiers. This is a challenging dataset, your typical ResNet-50 will score 0% top-1. + +For clean validation with same 200 classes, see [`results-imagenet-a-clean.csv`](results-imagenet-a-clean.csv) + +* Source: https://github.com/hendrycks/natural-adv-examples +* Paper: "Natural Adversarial Examples" - https://arxiv.org/abs/1907.07174 + + +### ImageNet-Rendition - [`results-imagenet-r.csv`](results-imagenet-r.csv) + +Renditions of 200 ImageNet classes resulting in 30,000 images for testing robustness. + +For clean validation with same 200 classes, see [`results-imagenet-r-clean.csv`](results-imagenet-r-clean.csv) + +* Source: https://github.com/hendrycks/imagenet-r +* Paper: "The Many Faces of Robustness" - https://arxiv.org/abs/2006.16241 + +## TODO +* Explore adding a reduced version of ImageNet-C (Corruptions) and ImageNet-P (Perturbations) from https://github.com/hendrycks/robustness. The originals are huge and image size specific. diff --git a/PyTorch/contrib/cv/classification/convmixer/results/generate_csv_results.py b/PyTorch/contrib/cv/classification/convmixer/results/generate_csv_results.py new file mode 100644 index 0000000000..04cf710ad7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/generate_csv_results.py @@ -0,0 +1,75 @@ +import numpy as np +import pandas as pd + + +results = { + 'results-imagenet.csv': [ + 'results-imagenet-real.csv', + 'results-imagenetv2-matched-frequency.csv', + 'results-sketch.csv' + ], + 'results-imagenet-a-clean.csv': [ + 'results-imagenet-a.csv', + ], + 'results-imagenet-r-clean.csv': [ + 'results-imagenet-r.csv', + ], +} + + +def diff(base_df, test_csv): + base_models = base_df['model'].values + test_df = pd.read_csv(test_csv) + test_models = test_df['model'].values + + rank_diff = np.zeros_like(test_models, dtype='object') + top1_diff = np.zeros_like(test_models, dtype='object') + top5_diff = np.zeros_like(test_models, dtype='object') + + for rank, model in enumerate(test_models): + if model in base_models: + base_rank = int(np.where(base_models == model)[0]) + top1_d = test_df['top1'][rank] - base_df['top1'][base_rank] + top5_d = test_df['top5'][rank] - base_df['top5'][base_rank] + + # rank_diff + if rank == base_rank: + rank_diff[rank] = f'0' + elif rank > base_rank: + rank_diff[rank] = f'-{rank - base_rank}' + else: + rank_diff[rank] = f'+{base_rank - rank}' + + # top1_diff + if top1_d >= .0: + top1_diff[rank] = f'+{top1_d:.3f}' + else: + top1_diff[rank] = f'-{abs(top1_d):.3f}' + + # top5_diff + if top5_d >= .0: + top5_diff[rank] = f'+{top5_d:.3f}' + else: + top5_diff[rank] = f'-{abs(top5_d):.3f}' + + else: + rank_diff[rank] = '' + top1_diff[rank] = '' + top5_diff[rank] = '' + + test_df['top1_diff'] = top1_diff + test_df['top5_diff'] = top5_diff + test_df['rank_diff'] = rank_diff + + test_df['param_count'] = test_df['param_count'].map('{:,.2f}'.format) + test_df.sort_values('top1', ascending=False, inplace=True) + test_df.to_csv(test_csv, index=False, float_format='%.3f') + + +for base_results, test_results in results.items(): + base_df = pd.read_csv(base_results) + base_df.sort_values('top1', ascending=False, inplace=True) + for test_csv in test_results: + diff(base_df, test_csv) + base_df['param_count'] = base_df['param_count'].map('{:,.2f}'.format) + base_df.to_csv(base_results, index=False, float_format='%.3f') diff --git a/PyTorch/contrib/cv/classification/convmixer/results/imagenet21k_goog_synsets.txt b/PyTorch/contrib/cv/classification/convmixer/results/imagenet21k_goog_synsets.txt new file mode 100644 index 0000000000..e276a97bdd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/imagenet21k_goog_synsets.txt @@ -0,0 +1,21843 @@ +n00004475 +n00005787 +n00006024 +n00006484 +n00007846 +n00015388 +n00017222 +n00021265 +n00021939 +n00120010 +n00141669 +n00288000 +n00288190 +n00288384 +n00324978 +n00326094 +n00433458 +n00433661 +n00433802 +n00434075 +n00439826 +n00440039 +n00440218 +n00440382 +n00440509 +n00440643 +n00440747 +n00440941 +n00441073 +n00441824 +n00442115 +n00442437 +n00442847 +n00442981 +n00443231 +n00443375 +n00443517 +n00443692 +n00443803 +n00443917 +n00444142 +n00444340 +n00444490 +n00444651 +n00444846 +n00444937 +n00445055 +n00445226 +n00445351 +n00445685 +n00445802 +n00446311 +n00446411 +n00446493 +n00446632 +n00446804 +n00446980 +n00447073 +n00447221 +n00447361 +n00447463 +n00447540 +n00447957 +n00448126 +n00448232 +n00448466 +n00448640 +n00448748 +n00448872 +n00448958 +n00449054 +n00449168 +n00449295 +n00449517 +n00449695 +n00449796 +n00449892 +n00449977 +n00450070 +n00450335 +n00450700 +n00450866 +n00450998 +n00451186 +n00451370 +n00451563 +n00451635 +n00451768 +n00451866 +n00452034 +n00452152 +n00452293 +n00452734 +n00452864 +n00453126 +n00453313 +n00453396 +n00453478 +n00453631 +n00453935 +n00454237 +n00454395 +n00454493 +n00454624 +n00454855 +n00454983 +n00455076 +n00455173 +n00456465 +n00463246 +n00463543 +n00464277 +n00464478 +n00464651 +n00464894 +n00466273 +n00466377 +n00466524 +n00466630 +n00466712 +n00466880 +n00467320 +n00467536 +n00467719 +n00467995 +n00468299 +n00468480 +n00469651 +n00470554 +n00470682 +n00470830 +n00470966 +n00471437 +n00471613 +n00474568 +n00474657 +n00474769 +n00474881 +n00475014 +n00475142 +n00475273 +n00475403 +n00475535 +n00475661 +n00475787 +n00476140 +n00476235 +n00476389 +n00477392 +n00477639 +n00477827 +n00478262 +n00479076 +n00479440 +n00479616 +n00479734 +n00479887 +n00480211 +n00480366 +n00480508 +n00480885 +n00480993 +n00481803 +n00481938 +n00482122 +n00482298 +n00483205 +n00483313 +n00483409 +n00483508 +n00483605 +n00483705 +n00483848 +n00523513 +n00812526 +n00825773 +n00887544 +n01035504 +n01035667 +n01055165 +n01314388 +n01314663 +n01314781 +n01314910 +n01315213 +n01315330 +n01315581 +n01315805 +n01316422 +n01316579 +n01316734 +n01316949 +n01317089 +n01317294 +n01317391 +n01317541 +n01317813 +n01317916 +n01318053 +n01318279 +n01318381 +n01318478 +n01318660 +n01318894 +n01319001 +n01319187 +n01319467 +n01319685 +n01320872 +n01321123 +n01321230 +n01321456 +n01321579 +n01321770 +n01321854 +n01322221 +n01322343 +n01322508 +n01322604 +n01322685 +n01322898 +n01322983 +n01323068 +n01323155 +n01323261 +n01323355 +n01323493 +n01323599 +n01323781 +n01324305 +n01324431 +n01324610 +n01324799 +n01324916 +n01325060 +n01326291 +n01327909 +n01329186 +n01330126 +n01330497 +n01332181 +n01333082 +n01333483 +n01333610 +n01334217 +n01334690 +n01335218 +n01337191 +n01337734 +n01338685 +n01339083 +n01339336 +n01339471 +n01339801 +n01340014 +n01340522 +n01340785 +n01340935 +n01341090 +n01342269 +n01347583 +n01349735 +n01350226 +n01350701 +n01351170 +n01351315 +n01357328 +n01357507 +n01358572 +n01359762 +n01362336 +n01363719 +n01365474 +n01365885 +n01366700 +n01367772 +n01368672 +n01369358 +n01369484 +n01374703 +n01374846 +n01375204 +n01376237 +n01376437 +n01376543 +n01377278 +n01377510 +n01377694 +n01378545 +n01379389 +n01380610 +n01380754 +n01381044 +n01382033 +n01384084 +n01384164 +n01384687 +n01385017 +n01385330 +n01386007 +n01386182 +n01386354 +n01387065 +n01389507 +n01390123 +n01390763 +n01392275 +n01392380 +n01393486 +n01394040 +n01394492 +n01394771 +n01395254 +n01396048 +n01396617 +n01397114 +n01397690 +n01397871 +n01400247 +n01400391 +n01402600 +n01403457 +n01404365 +n01404495 +n01405007 +n01405616 +n01407798 +n01410457 +n01411450 +n01412694 +n01413457 +n01414216 +n01415626 +n01415920 +n01416213 +n01418498 +n01418620 +n01419332 +n01419573 +n01419888 +n01421333 +n01421807 +n01422185 +n01422335 +n01422450 +n01423302 +n01423617 +n01424420 +n01425223 +n01427399 +n01429172 +n01438208 +n01438581 +n01439121 +n01439514 +n01439808 +n01440160 +n01440242 +n01440467 +n01440764 +n01441117 +n01441272 +n01441425 +n01441910 +n01442450 +n01442710 +n01442972 +n01443243 +n01443537 +n01443831 +n01444339 +n01444783 +n01445429 +n01445593 +n01445857 +n01446152 +n01446589 +n01446760 +n01447139 +n01447331 +n01447658 +n01447946 +n01448291 +n01448594 +n01448951 +n01449374 +n01449712 +n01449980 +n01450661 +n01450950 +n01451115 +n01451295 +n01451426 +n01451863 +n01452345 +n01453087 +n01453475 +n01453742 +n01454545 +n01454856 +n01455317 +n01455461 +n01455778 +n01456137 +n01456454 +n01456756 +n01457082 +n01457407 +n01457852 +n01458746 +n01458842 +n01459791 +n01460303 +n01461315 +n01461646 +n01462042 +n01462544 +n01462803 +n01464844 +n01466257 +n01467336 +n01467804 +n01468238 +n01468712 +n01469103 +n01469723 +n01470145 +n01470479 +n01470733 +n01470895 +n01471682 +n01472303 +n01472502 +n01473806 +n01474283 +n01474864 +n01475232 +n01475940 +n01476418 +n01477080 +n01477525 +n01477875 +n01478511 +n01478969 +n01479213 +n01479820 +n01480106 +n01480516 +n01480880 +n01481331 +n01481498 +n01482071 +n01482330 +n01483021 +n01483522 +n01483830 +n01484097 +n01484285 +n01484447 +n01484562 +n01484850 +n01485479 +n01486010 +n01486540 +n01486838 +n01487506 +n01488038 +n01488918 +n01489501 +n01489709 +n01489920 +n01490112 +n01490360 +n01490670 +n01491006 +n01491361 +n01491661 +n01491874 +n01492357 +n01492569 +n01492708 +n01492860 +n01493146 +n01493541 +n01493829 +n01494041 +n01494475 +n01494757 +n01494882 +n01495006 +n01495493 +n01495701 +n01496331 +n01497118 +n01497413 +n01497738 +n01498041 +n01498406 +n01498699 +n01498989 +n01499396 +n01499732 +n01500091 +n01500476 +n01500854 +n01501160 +n01501641 +n01501777 +n01501948 +n01502101 +n01503061 +n01503976 +n01504179 +n01504344 +n01514668 +n01514752 +n01514859 +n01514926 +n01515078 +n01515217 +n01515303 +n01516212 +n01517389 +n01517565 +n01517966 +n01518878 +n01519563 +n01519873 +n01520576 +n01521399 +n01521756 +n01522450 +n01523105 +n01524359 +n01524761 +n01525720 +n01526521 +n01526766 +n01527194 +n01527347 +n01527617 +n01527917 +n01528396 +n01528654 +n01528845 +n01529672 +n01530439 +n01530575 +n01531178 +n01531344 +n01531512 +n01531639 +n01531811 +n01531971 +n01532325 +n01532511 +n01532829 +n01533000 +n01533339 +n01533481 +n01533651 +n01533893 +n01534155 +n01534433 +n01534582 +n01534762 +n01535140 +n01535469 +n01535690 +n01536035 +n01536186 +n01536334 +n01536644 +n01536780 +n01537134 +n01537544 +n01537895 +n01538059 +n01538200 +n01538362 +n01538630 +n01538955 +n01539272 +n01539573 +n01539925 +n01540090 +n01540233 +n01540566 +n01540832 +n01541102 +n01541386 +n01541760 +n01541922 +n01542168 +n01542433 +n01542786 +n01543175 +n01543383 +n01543632 +n01543936 +n01544208 +n01544389 +n01544704 +n01545574 +n01546039 +n01546506 +n01546921 +n01547832 +n01548301 +n01548492 +n01548694 +n01548865 +n01549053 +n01549430 +n01549641 +n01549886 +n01550172 +n01550761 +n01551080 +n01551300 +n01551711 +n01552034 +n01552333 +n01552813 +n01553142 +n01553527 +n01553762 +n01554017 +n01554448 +n01555004 +n01555305 +n01555809 +n01556182 +n01556514 +n01557185 +n01557962 +n01558149 +n01558307 +n01558461 +n01558594 +n01558765 +n01558993 +n01559160 +n01559477 +n01559639 +n01559804 +n01560105 +n01560280 +n01560419 +n01560636 +n01560793 +n01560935 +n01561181 +n01561452 +n01561732 +n01562014 +n01562265 +n01562451 +n01563128 +n01563449 +n01563746 +n01563945 +n01564101 +n01564217 +n01564394 +n01564773 +n01564914 +n01565078 +n01565345 +n01565599 +n01565930 +n01566207 +n01566645 +n01567133 +n01567678 +n01567879 +n01568132 +n01568294 +n01568720 +n01568892 +n01569060 +n01569262 +n01569423 +n01569566 +n01569836 +n01569971 +n01570267 +n01570421 +n01570676 +n01570839 +n01571410 +n01571904 +n01572328 +n01572489 +n01572654 +n01572782 +n01573074 +n01573240 +n01573360 +n01573627 +n01573898 +n01574045 +n01574390 +n01574560 +n01574801 +n01575117 +n01575401 +n01575745 +n01576076 +n01576358 +n01576695 +n01577035 +n01577458 +n01577659 +n01577941 +n01578180 +n01578575 +n01579028 +n01579149 +n01579260 +n01579410 +n01579578 +n01579729 +n01580077 +n01580379 +n01580490 +n01580772 +n01580870 +n01581166 +n01581434 +n01581730 +n01581874 +n01581984 +n01582220 +n01582398 +n01582498 +n01582856 +n01583209 +n01583495 +n01583828 +n01584225 +n01584695 +n01584853 +n01585121 +n01585287 +n01585422 +n01585715 +n01586020 +n01586374 +n01586941 +n01587278 +n01587526 +n01587834 +n01588002 +n01588431 +n01588725 +n01588996 +n01589286 +n01589718 +n01589893 +n01590220 +n01591005 +n01591123 +n01591301 +n01591697 +n01592084 +n01592257 +n01592387 +n01592540 +n01592694 +n01593028 +n01593282 +n01593553 +n01594004 +n01594372 +n01594787 +n01594968 +n01595168 +n01595450 +n01595624 +n01595974 +n01596273 +n01596608 +n01597022 +n01597336 +n01597737 +n01597906 +n01598074 +n01598271 +n01598588 +n01598988 +n01599159 +n01599269 +n01599388 +n01599556 +n01599741 +n01600085 +n01600341 +n01600657 +n01601068 +n01601410 +n01601694 +n01602080 +n01602209 +n01602630 +n01602832 +n01603000 +n01603152 +n01603600 +n01603812 +n01603953 +n01604330 +n01604968 +n01605630 +n01606097 +n01606177 +n01606522 +n01606672 +n01606809 +n01606978 +n01607309 +n01607429 +n01607600 +n01607812 +n01607962 +n01608265 +n01608432 +n01608814 +n01609062 +n01609391 +n01609751 +n01609956 +n01610100 +n01610226 +n01610552 +n01610955 +n01611472 +n01611674 +n01611800 +n01611969 +n01612122 +n01612275 +n01612476 +n01612628 +n01612955 +n01613177 +n01613294 +n01613615 +n01613807 +n01614038 +n01614343 +n01614556 +n01614925 +n01615121 +n01615303 +n01615458 +n01615703 +n01616086 +n01616318 +n01616551 +n01616764 +n01617095 +n01617443 +n01617766 +n01618082 +n01618503 +n01618922 +n01619310 +n01619536 +n01619835 +n01620135 +n01620414 +n01620735 +n01621127 +n01621635 +n01622120 +n01622352 +n01622483 +n01622779 +n01622959 +n01623110 +n01623425 +n01623615 +n01623706 +n01623880 +n01624115 +n01624212 +n01624305 +n01624537 +n01624833 +n01625121 +n01625562 +n01627424 +n01628331 +n01628770 +n01629276 +n01629819 +n01629962 +n01630148 +n01630284 +n01630670 +n01630901 +n01631175 +n01631354 +n01631512 +n01631663 +n01632047 +n01632308 +n01632458 +n01632601 +n01632777 +n01632952 +n01633406 +n01633781 +n01634227 +n01634522 +n01635027 +n01635176 +n01635480 +n01636127 +n01636352 +n01636510 +n01636829 +n01637112 +n01637338 +n01637615 +n01637932 +n01638194 +n01638329 +n01638722 +n01639187 +n01639765 +n01640846 +n01641206 +n01641391 +n01641577 +n01641739 +n01641930 +n01642097 +n01642257 +n01642391 +n01642539 +n01642943 +n01643255 +n01643507 +n01643896 +n01644373 +n01644900 +n01645466 +n01645776 +n01646292 +n01646388 +n01646555 +n01646648 +n01646802 +n01646902 +n01647033 +n01647180 +n01647303 +n01647466 +n01647640 +n01648139 +n01648356 +n01648620 +n01649170 +n01649412 +n01649556 +n01649726 +n01650167 +n01650690 +n01650901 +n01651059 +n01651285 +n01651487 +n01651641 +n01651778 +n01652026 +n01652297 +n01653026 +n01653223 +n01653509 +n01653773 +n01654083 +n01654637 +n01654863 +n01655344 +n01661091 +n01661592 +n01661818 +n01662060 +n01662622 +n01662784 +n01663401 +n01663782 +n01664065 +n01664369 +n01664492 +n01664674 +n01664990 +n01665541 +n01665932 +n01666228 +n01666585 +n01667114 +n01667432 +n01667778 +n01668091 +n01668436 +n01668665 +n01668892 +n01669191 +n01669372 +n01669654 +n01670092 +n01670535 +n01670802 +n01671125 +n01671479 +n01671705 +n01672032 +n01672432 +n01672611 +n01673282 +n01674216 +n01674464 +n01674990 +n01675352 +n01675722 +n01676755 +n01677366 +n01677747 +n01678043 +n01678343 +n01678657 +n01679005 +n01679307 +n01679626 +n01679962 +n01680264 +n01680478 +n01680655 +n01680813 +n01680983 +n01681328 +n01681653 +n01681940 +n01682172 +n01682435 +n01682714 +n01683201 +n01683558 +n01684133 +n01684578 +n01684741 +n01685439 +n01685808 +n01686044 +n01686220 +n01686403 +n01686609 +n01686808 +n01687128 +n01687290 +n01687665 +n01687978 +n01688243 +n01688961 +n01689081 +n01689411 +n01689811 +n01690149 +n01690466 +n01691217 +n01691652 +n01691951 +n01692333 +n01692523 +n01692864 +n01693175 +n01693334 +n01693783 +n01694178 +n01694311 +n01694709 +n01694955 +n01695060 +n01696633 +n01697178 +n01697457 +n01697611 +n01697749 +n01697978 +n01698434 +n01698640 +n01698782 +n01699040 +n01699254 +n01699675 +n01701551 +n01701859 +n01702256 +n01702479 +n01703011 +n01703161 +n01703569 +n01704103 +n01704323 +n01704626 +n01705010 +n01705591 +n01705934 +n01707294 +n01708106 +n01708998 +n01709484 +n01709876 +n01710177 +n01711160 +n01712008 +n01712752 +n01713170 +n01713764 +n01714231 +n01715888 +n01717016 +n01717229 +n01717467 +n01718096 +n01718414 +n01719403 +n01721174 +n01721898 +n01722670 +n01722998 +n01723579 +n01724231 +n01724840 +n01725086 +n01725713 +n01726203 +n01726692 +n01727646 +n01728266 +n01728572 +n01728920 +n01729322 +n01729672 +n01729977 +n01730185 +n01730307 +n01730563 +n01730812 +n01730960 +n01731137 +n01731277 +n01731545 +n01731764 +n01731941 +n01732093 +n01732244 +n01732614 +n01732789 +n01732989 +n01733214 +n01733466 +n01733757 +n01733957 +n01734104 +n01734418 +n01734637 +n01734808 +n01735189 +n01735439 +n01735577 +n01735728 +n01736032 +n01736375 +n01736796 +n01737021 +n01737472 +n01737728 +n01737875 +n01738065 +n01738306 +n01738601 +n01738731 +n01739094 +n01739381 +n01739647 +n01739871 +n01740131 +n01740551 +n01740885 +n01741232 +n01741442 +n01741562 +n01741943 +n01742172 +n01742447 +n01742821 +n01743086 +n01743605 +n01743936 +n01744100 +n01744270 +n01744401 +n01744555 +n01745125 +n01745484 +n01745902 +n01746191 +n01746359 +n01746952 +n01747285 +n01747589 +n01747885 +n01748264 +n01748389 +n01748686 +n01748906 +n01749244 +n01749582 +n01749742 +n01749939 +n01750167 +n01750437 +n01750743 +n01751036 +n01751215 +n01751472 +n01751748 +n01752165 +n01752585 +n01752736 +n01753032 +n01753180 +n01753488 +n01753959 +n01754370 +n01754533 +n01754876 +n01755581 +n01755740 +n01755952 +n01756089 +n01756291 +n01756508 +n01756733 +n01756916 +n01757115 +n01757343 +n01757677 +n01757901 +n01758141 +n01758757 +n01758895 +n01767661 +n01768244 +n01769347 +n01770081 +n01770393 +n01770795 +n01771100 +n01771417 +n01771766 +n01772222 +n01772664 +n01773157 +n01773549 +n01773797 +n01774097 +n01774384 +n01774750 +n01775062 +n01775370 +n01775730 +n01776192 +n01776313 +n01776705 +n01777304 +n01777467 +n01777649 +n01777909 +n01778217 +n01778487 +n01778621 +n01778801 +n01779148 +n01779463 +n01779629 +n01779939 +n01780142 +n01780426 +n01780696 +n01781071 +n01781570 +n01781698 +n01781875 +n01782209 +n01782516 +n01783017 +n01783706 +n01784293 +n01784675 +n01785667 +n01786646 +n01787006 +n01787191 +n01787835 +n01788291 +n01788579 +n01788864 +n01789386 +n01789740 +n01790171 +n01790304 +n01790398 +n01790557 +n01790711 +n01790812 +n01791107 +n01791314 +n01791388 +n01791463 +n01791625 +n01791954 +n01792042 +n01792158 +n01792429 +n01792530 +n01792640 +n01792808 +n01792955 +n01793085 +n01793159 +n01793249 +n01793340 +n01793435 +n01793565 +n01793715 +n01794158 +n01794344 +n01794651 +n01795088 +n01795545 +n01795735 +n01795900 +n01796019 +n01796105 +n01796340 +n01796519 +n01796729 +n01797020 +n01797307 +n01797601 +n01797886 +n01798168 +n01798484 +n01798706 +n01798839 +n01798979 +n01799302 +n01799679 +n01800195 +n01800424 +n01800633 +n01801088 +n01801479 +n01801672 +n01801876 +n01802159 +n01802721 +n01803078 +n01803362 +n01803641 +n01803893 +n01804163 +n01804478 +n01804653 +n01804921 +n01805070 +n01805321 +n01805801 +n01806061 +n01806143 +n01806297 +n01806364 +n01806467 +n01806567 +n01806847 +n01807105 +n01807496 +n01807828 +n01808140 +n01808291 +n01808596 +n01809106 +n01809371 +n01809752 +n01810268 +n01810700 +n01811243 +n01811909 +n01812187 +n01812337 +n01812662 +n01812866 +n01813088 +n01813385 +n01813532 +n01813658 +n01813948 +n01814217 +n01814370 +n01814549 +n01814620 +n01814755 +n01814921 +n01815036 +n01815270 +n01815601 +n01816017 +n01816140 +n01816474 +n01816887 +n01817263 +n01817346 +n01817953 +n01818299 +n01818515 +n01818832 +n01819115 +n01819313 +n01819465 +n01819734 +n01820052 +n01820348 +n01820546 +n01820801 +n01821076 +n01821203 +n01821554 +n01821869 +n01822300 +n01822602 +n01823013 +n01823414 +n01823740 +n01824035 +n01824344 +n01824575 +n01824749 +n01825278 +n01825930 +n01826364 +n01826680 +n01826844 +n01827403 +n01827793 +n01828096 +n01828556 +n01828970 +n01829413 +n01829869 +n01830042 +n01830479 +n01830915 +n01831360 +n01831712 +n01832167 +n01832493 +n01832813 +n01833112 +n01833415 +n01833805 +n01834177 +n01834540 +n01835276 +n01835769 +n01835918 +n01836087 +n01836673 +n01837072 +n01837526 +n01838038 +n01838598 +n01839086 +n01839330 +n01839598 +n01839750 +n01839949 +n01840120 +n01840412 +n01840775 +n01841102 +n01841288 +n01841441 +n01841679 +n01841943 +n01842235 +n01842504 +n01842788 +n01843065 +n01843383 +n01843719 +n01844231 +n01844551 +n01844746 +n01844917 +n01845132 +n01845477 +n01846331 +n01847000 +n01847089 +n01847170 +n01847253 +n01847407 +n01847806 +n01847978 +n01848123 +n01848323 +n01848453 +n01848555 +n01848648 +n01848840 +n01848976 +n01849157 +n01849466 +n01849676 +n01849863 +n01850192 +n01850373 +n01850553 +n01850873 +n01851038 +n01851207 +n01851375 +n01851573 +n01851731 +n01851895 +n01852142 +n01852329 +n01852400 +n01852671 +n01852861 +n01853195 +n01853498 +n01853666 +n01853870 +n01854415 +n01854700 +n01854838 +n01855032 +n01855188 +n01855476 +n01855672 +n01856072 +n01856155 +n01856380 +n01856553 +n01856890 +n01857079 +n01857325 +n01857512 +n01857632 +n01857851 +n01858281 +n01858441 +n01858780 +n01858845 +n01858906 +n01859190 +n01859325 +n01859496 +n01859689 +n01859852 +n01860002 +n01860187 +n01860497 +n01860864 +n01861148 +n01861330 +n01861778 +n01862399 +n01871265 +n01871543 +n01871875 +n01872401 +n01872772 +n01873310 +n01874434 +n01874928 +n01875313 +n01875610 +n01876034 +n01876326 +n01876667 +n01877134 +n01877606 +n01877812 +n01878061 +n01878335 +n01878639 +n01878929 +n01879217 +n01879509 +n01879837 +n01880152 +n01880473 +n01880716 +n01880813 +n01881171 +n01881564 +n01881857 +n01882125 +n01882714 +n01883070 +n01883513 +n01883920 +n01884104 +n01884203 +n01884476 +n01884834 +n01885158 +n01885498 +n01886045 +n01886756 +n01887474 +n01887623 +n01887787 +n01887896 +n01888045 +n01888181 +n01888264 +n01888411 +n01889074 +n01889520 +n01889849 +n01890144 +n01890564 +n01890860 +n01891013 +n01891274 +n01891633 +n01892030 +n01892145 +n01892385 +n01892551 +n01892744 +n01893021 +n01893164 +n01893399 +n01893825 +n01894207 +n01894522 +n01894956 +n01896844 +n01897257 +n01897426 +n01897536 +n01897667 +n01898593 +n01899894 +n01900150 +n01903234 +n01903346 +n01903498 +n01904029 +n01904806 +n01904886 +n01905321 +n01905661 +n01906749 +n01907287 +n01907738 +n01908042 +n01908958 +n01909422 +n01909788 +n01909906 +n01910252 +n01910747 +n01911063 +n01911403 +n01911839 +n01912152 +n01912454 +n01912809 +n01913166 +n01913346 +n01913440 +n01914163 +n01914609 +n01914830 +n01915700 +n01915811 +n01916187 +n01916388 +n01916481 +n01916588 +n01916925 +n01917289 +n01917611 +n01917882 +n01918744 +n01919385 +n01920051 +n01920438 +n01921059 +n01922303 +n01922717 +n01922948 +n01923025 +n01923404 +n01923890 +n01924800 +n01924916 +n01925270 +n01925695 +n01925916 +n01926379 +n01926689 +n01927159 +n01927456 +n01927928 +n01928215 +n01928517 +n01928865 +n01929186 +n01930112 +n01930852 +n01931140 +n01931520 +n01931714 +n01932151 +n01932936 +n01933151 +n01933478 +n01933988 +n01934440 +n01934844 +n01935176 +n01935395 +n01936391 +n01936671 +n01936858 +n01937579 +n01937909 +n01938454 +n01938735 +n01940736 +n01941223 +n01941340 +n01942177 +n01942869 +n01943087 +n01943541 +n01943899 +n01944118 +n01944390 +n01944812 +n01944955 +n01945143 +n01945340 +n01945685 +n01945845 +n01946277 +n01946630 +n01946827 +n01947139 +n01947396 +n01947997 +n01948446 +n01948573 +n01949085 +n01949499 +n01949973 +n01950731 +n01951274 +n01951613 +n01952029 +n01952712 +n01953361 +n01953594 +n01953762 +n01954516 +n01955084 +n01955933 +n01956344 +n01956481 +n01956764 +n01957335 +n01958038 +n01958346 +n01958435 +n01958531 +n01959029 +n01959492 +n01959985 +n01960177 +n01960459 +n01961234 +n01961600 +n01961985 +n01962506 +n01962788 +n01963317 +n01963479 +n01963571 +n01964049 +n01964271 +n01964441 +n01964957 +n01965252 +n01965529 +n01965889 +n01966377 +n01966586 +n01967094 +n01967308 +n01967963 +n01968315 +n01968897 +n01969726 +n01970164 +n01970667 +n01971094 +n01971280 +n01971620 +n01971850 +n01972131 +n01972541 +n01973148 +n01974773 +n01975687 +n01976146 +n01976868 +n01976957 +n01977485 +n01978010 +n01978136 +n01978287 +n01978455 +n01978587 +n01978930 +n01979269 +n01979526 +n01979874 +n01980166 +n01980655 +n01981276 +n01981702 +n01982068 +n01982347 +n01982650 +n01983048 +n01983481 +n01983674 +n01983829 +n01984245 +n01984695 +n01985128 +n01985493 +n01985797 +n01986214 +n01986806 +n01987076 +n01987545 +n01987727 +n01988203 +n01988701 +n01988869 +n01989516 +n01989869 +n01990007 +n01990516 +n01990800 +n01991028 +n01991520 +n01992262 +n01992423 +n01992773 +n01993525 +n01993830 +n01994910 +n01995514 +n01995686 +n01996280 +n01996585 +n01997119 +n01997825 +n01998183 +n01998741 +n01999186 +n01999767 +n02000954 +n02002075 +n02002556 +n02002724 +n02003037 +n02003204 +n02003577 +n02003839 +n02004131 +n02004492 +n02004855 +n02005399 +n02005790 +n02006063 +n02006364 +n02006656 +n02006985 +n02007284 +n02007558 +n02008041 +n02008497 +n02008643 +n02008796 +n02009229 +n02009380 +n02009508 +n02009750 +n02009912 +n02010272 +n02010453 +n02010728 +n02011016 +n02011281 +n02011460 +n02011805 +n02011943 +n02012185 +n02012849 +n02013177 +n02013567 +n02013706 +n02014237 +n02014524 +n02014941 +n02015357 +n02015554 +n02015797 +n02016066 +n02016358 +n02016659 +n02016816 +n02016956 +n02017213 +n02017475 +n02017725 +n02018027 +n02018207 +n02018368 +n02018795 +n02019190 +n02019438 +n02019929 +n02020219 +n02020578 +n02021050 +n02021281 +n02021795 +n02022684 +n02023341 +n02023855 +n02023992 +n02024185 +n02024479 +n02024763 +n02025043 +n02025239 +n02025389 +n02026059 +n02026629 +n02026948 +n02027075 +n02027357 +n02027492 +n02027897 +n02028035 +n02028175 +n02028342 +n02028451 +n02028727 +n02028900 +n02029087 +n02029378 +n02029706 +n02030035 +n02030224 +n02030287 +n02030568 +n02030837 +n02030996 +n02031298 +n02031585 +n02031934 +n02032222 +n02032355 +n02032480 +n02032769 +n02033041 +n02033208 +n02033324 +n02033561 +n02033779 +n02033882 +n02034129 +n02034295 +n02034661 +n02034971 +n02035210 +n02035402 +n02035656 +n02036053 +n02036228 +n02036711 +n02037110 +n02037464 +n02037869 +n02038141 +n02038466 +n02038993 +n02039171 +n02039497 +n02039780 +n02040266 +n02040505 +n02041085 +n02041246 +n02041678 +n02041875 +n02042046 +n02042180 +n02042472 +n02042759 +n02043063 +n02043333 +n02043808 +n02044178 +n02044517 +n02044778 +n02044908 +n02045369 +n02045596 +n02045864 +n02046171 +n02046759 +n02046939 +n02047045 +n02047260 +n02047411 +n02047517 +n02047614 +n02047975 +n02048115 +n02048353 +n02048698 +n02049088 +n02049532 +n02050004 +n02050313 +n02050442 +n02050586 +n02050809 +n02051059 +n02051474 +n02051845 +n02052204 +n02052365 +n02052775 +n02053083 +n02053425 +n02053584 +n02054036 +n02054502 +n02054711 +n02055107 +n02055658 +n02055803 +n02056228 +n02056570 +n02056728 +n02057035 +n02057330 +n02057731 +n02057898 +n02058221 +n02058594 +n02058747 +n02059162 +n02059541 +n02059852 +n02060133 +n02060411 +n02060569 +n02060889 +n02061217 +n02061560 +n02061853 +n02062017 +n02062430 +n02062744 +n02063224 +n02063662 +n02064000 +n02064338 +n02064816 +n02065026 +n02065263 +n02065407 +n02065726 +n02066245 +n02066707 +n02067240 +n02067603 +n02067768 +n02068206 +n02068541 +n02068974 +n02069412 +n02069701 +n02069974 +n02070174 +n02070430 +n02070624 +n02070776 +n02071028 +n02071294 +n02071636 +n02072040 +n02072493 +n02072798 +n02073250 +n02073831 +n02074367 +n02074726 +n02075296 +n02075612 +n02075927 +n02076196 +n02076402 +n02076779 +n02077152 +n02077384 +n02077658 +n02077787 +n02077923 +n02078292 +n02078574 +n02078738 +n02079005 +n02079389 +n02079851 +n02080146 +n02080415 +n02080713 +n02081060 +n02081571 +n02081798 +n02081927 +n02082056 +n02082190 +n02082791 +n02083346 +n02083672 +n02083780 +n02084071 +n02084732 +n02084861 +n02085019 +n02085118 +n02085272 +n02085374 +n02085620 +n02085782 +n02085936 +n02086079 +n02086240 +n02086346 +n02086478 +n02086646 +n02086753 +n02086910 +n02087046 +n02087122 +n02087314 +n02087394 +n02087551 +n02088094 +n02088238 +n02088364 +n02088466 +n02088632 +n02088745 +n02088839 +n02088992 +n02089078 +n02089232 +n02089468 +n02089555 +n02089725 +n02089867 +n02089973 +n02090129 +n02090253 +n02090379 +n02090475 +n02090622 +n02090721 +n02090827 +n02091032 +n02091134 +n02091244 +n02091467 +n02091635 +n02091831 +n02092002 +n02092173 +n02092339 +n02092468 +n02093056 +n02093256 +n02093428 +n02093647 +n02093754 +n02093859 +n02093991 +n02094114 +n02094258 +n02094433 +n02094562 +n02094721 +n02094931 +n02095050 +n02095212 +n02095314 +n02095412 +n02095570 +n02095727 +n02095889 +n02096051 +n02096177 +n02096294 +n02096437 +n02096585 +n02096756 +n02097047 +n02097130 +n02097209 +n02097298 +n02097474 +n02097658 +n02097786 +n02097967 +n02098105 +n02098286 +n02098413 +n02098550 +n02098806 +n02098906 +n02099029 +n02099267 +n02099429 +n02099601 +n02099712 +n02099849 +n02099997 +n02100236 +n02100399 +n02100583 +n02100735 +n02100877 +n02101006 +n02101108 +n02101388 +n02101556 +n02101670 +n02101861 +n02102040 +n02102177 +n02102318 +n02102480 +n02102605 +n02102806 +n02102973 +n02103181 +n02103406 +n02103841 +n02104029 +n02104184 +n02104280 +n02104365 +n02104523 +n02104882 +n02105056 +n02105162 +n02105251 +n02105412 +n02105505 +n02105641 +n02105855 +n02106030 +n02106166 +n02106382 +n02106550 +n02106662 +n02106854 +n02106966 +n02107142 +n02107312 +n02107420 +n02107574 +n02107683 +n02107908 +n02108000 +n02108089 +n02108254 +n02108422 +n02108551 +n02108672 +n02108915 +n02109047 +n02109150 +n02109256 +n02109391 +n02109525 +n02109687 +n02109811 +n02109961 +n02110063 +n02110185 +n02110341 +n02110532 +n02110627 +n02110806 +n02110958 +n02111129 +n02111277 +n02111500 +n02111626 +n02111889 +n02112018 +n02112137 +n02112350 +n02112497 +n02112706 +n02112826 +n02113023 +n02113186 +n02113335 +n02113624 +n02113712 +n02113799 +n02113892 +n02113978 +n02114100 +n02114367 +n02114548 +n02114712 +n02114855 +n02115012 +n02115096 +n02115335 +n02115641 +n02115913 +n02116185 +n02116450 +n02116738 +n02117135 +n02117512 +n02117646 +n02117900 +n02118176 +n02118333 +n02118643 +n02118707 +n02119022 +n02119247 +n02119359 +n02119477 +n02119634 +n02119789 +n02120079 +n02120278 +n02120505 +n02120997 +n02121620 +n02121808 +n02122298 +n02122430 +n02122510 +n02122580 +n02122725 +n02122810 +n02122878 +n02122948 +n02123045 +n02123159 +n02123242 +n02123394 +n02123478 +n02123597 +n02123785 +n02123917 +n02124075 +n02124157 +n02124313 +n02124484 +n02124623 +n02125010 +n02125081 +n02125311 +n02125494 +n02125689 +n02125872 +n02126028 +n02126139 +n02126317 +n02126640 +n02126787 +n02127052 +n02127292 +n02127381 +n02127482 +n02127586 +n02127678 +n02127808 +n02128385 +n02128598 +n02128669 +n02128757 +n02128925 +n02129165 +n02129463 +n02129530 +n02129604 +n02129837 +n02129923 +n02129991 +n02130086 +n02130308 +n02130545 +n02130925 +n02131653 +n02132136 +n02132320 +n02132466 +n02132580 +n02132788 +n02133161 +n02133400 +n02133704 +n02134084 +n02134418 +n02134971 +n02135220 +n02135610 +n02135844 +n02136103 +n02136285 +n02136452 +n02136794 +n02137015 +n02137302 +n02137549 +n02137722 +n02137888 +n02138169 +n02138441 +n02138647 +n02138777 +n02139199 +n02139671 +n02140049 +n02140179 +n02140268 +n02140491 +n02140858 +n02141306 +n02141611 +n02141713 +n02142407 +n02142734 +n02142898 +n02143142 +n02143439 +n02143891 +n02144251 +n02144593 +n02144936 +n02145424 +n02145910 +n02146201 +n02146371 +n02146700 +n02146879 +n02147173 +n02147328 +n02147591 +n02147947 +n02148088 +n02148512 +n02148835 +n02148991 +n02149420 +n02149653 +n02149861 +n02150134 +n02150482 +n02150885 +n02151230 +n02152740 +n02152881 +n02152991 +n02153109 +n02153203 +n02153809 +n02156732 +n02156871 +n02157206 +n02157285 +n02159955 +n02160947 +n02161225 +n02161338 +n02161457 +n02161588 +n02162561 +n02163008 +n02163297 +n02164464 +n02165105 +n02165456 +n02165877 +n02166229 +n02166567 +n02166826 +n02167151 +n02167505 +n02167820 +n02167944 +n02168245 +n02168427 +n02168699 +n02169023 +n02169218 +n02169497 +n02169705 +n02169974 +n02170400 +n02170599 +n02170738 +n02170993 +n02171164 +n02171453 +n02171869 +n02172182 +n02172518 +n02172678 +n02172761 +n02172870 +n02173113 +n02173373 +n02173784 +n02174001 +n02174355 +n02174659 +n02175014 +n02175569 +n02175916 +n02176261 +n02176439 +n02176747 +n02176916 +n02177196 +n02177506 +n02177775 +n02177972 +n02178411 +n02178717 +n02179012 +n02179192 +n02179340 +n02179891 +n02180233 +n02180427 +n02180875 +n02181235 +n02181477 +n02181724 +n02182045 +n02182355 +n02182642 +n02182930 +n02183096 +n02183507 +n02183857 +n02184473 +n02184589 +n02184720 +n02185167 +n02185481 +n02186153 +n02186717 +n02187150 +n02187279 +n02187554 +n02187900 +n02188699 +n02189363 +n02189670 +n02190166 +n02190790 +n02191273 +n02191773 +n02191979 +n02192252 +n02192513 +n02192814 +n02193009 +n02193163 +n02194249 +n02194750 +n02195091 +n02195526 +n02195819 +n02196119 +n02196344 +n02196896 +n02197185 +n02197689 +n02197877 +n02198129 +n02198532 +n02198859 +n02199170 +n02199502 +n02200198 +n02200509 +n02200630 +n02200850 +n02201000 +n02201497 +n02201626 +n02202006 +n02202124 +n02202287 +n02202678 +n02203152 +n02203592 +n02203978 +n02204249 +n02204722 +n02204907 +n02205219 +n02205673 +n02206270 +n02206856 +n02207179 +n02207345 +n02207449 +n02207647 +n02207805 +n02208280 +n02208498 +n02208848 +n02208979 +n02209111 +n02209354 +n02209624 +n02209964 +n02210427 +n02210921 +n02211444 +n02211627 +n02211896 +n02212062 +n02212602 +n02212958 +n02213107 +n02213239 +n02213543 +n02213663 +n02213788 +n02214096 +n02214341 +n02214499 +n02214660 +n02214773 +n02215161 +n02215621 +n02215770 +n02216211 +n02216365 +n02216740 +n02217563 +n02217839 +n02218134 +n02218371 +n02218713 +n02219015 +n02219486 +n02220055 +n02220225 +n02220518 +n02220804 +n02221083 +n02221414 +n02221571 +n02221715 +n02221820 +n02222035 +n02222321 +n02222582 +n02223266 +n02223520 +n02224023 +n02224713 +n02225081 +n02225798 +n02226183 +n02226429 +n02226821 +n02226970 +n02227247 +n02227604 +n02227966 +n02228341 +n02228697 +n02229156 +n02229544 +n02229765 +n02230023 +n02230187 +n02230480 +n02230634 +n02231052 +n02231487 +n02231803 +n02232223 +n02233338 +n02233943 +n02234355 +n02234570 +n02234848 +n02235205 +n02236044 +n02236241 +n02236355 +n02236896 +n02237424 +n02237581 +n02237868 +n02238235 +n02238358 +n02238594 +n02238887 +n02239192 +n02239528 +n02239774 +n02240068 +n02240517 +n02241008 +n02241426 +n02241569 +n02241799 +n02242137 +n02242455 +n02243209 +n02243562 +n02243878 +n02244173 +n02244515 +n02244797 +n02245111 +n02245443 +n02246011 +n02246628 +n02246941 +n02247216 +n02247511 +n02247655 +n02248062 +n02248368 +n02248510 +n02248887 +n02249134 +n02249515 +n02249809 +n02250280 +n02250822 +n02251067 +n02251233 +n02251593 +n02251775 +n02252226 +n02252799 +n02252972 +n02253127 +n02253264 +n02253494 +n02253715 +n02253913 +n02254246 +n02254697 +n02254901 +n02255023 +n02255391 +n02256172 +n02256656 +n02257003 +n02257284 +n02257715 +n02257985 +n02258198 +n02258508 +n02258629 +n02259212 +n02259377 +n02259708 +n02259987 +n02260421 +n02260863 +n02261063 +n02261419 +n02261757 +n02262178 +n02262449 +n02262803 +n02263378 +n02264021 +n02264232 +n02264363 +n02264591 +n02264885 +n02265330 +n02266050 +n02266269 +n02266421 +n02266864 +n02267208 +n02267483 +n02268148 +n02268443 +n02268853 +n02269196 +n02269340 +n02269522 +n02269657 +n02270011 +n02270200 +n02270623 +n02270945 +n02271222 +n02271570 +n02271897 +n02272286 +n02272552 +n02272871 +n02273392 +n02274024 +n02274259 +n02274822 +n02275560 +n02275773 +n02276078 +n02276258 +n02276355 +n02276749 +n02276902 +n02277094 +n02277268 +n02277422 +n02277742 +n02278024 +n02278210 +n02278463 +n02278839 +n02278980 +n02279257 +n02279637 +n02279972 +n02280458 +n02280649 +n02281015 +n02281136 +n02281267 +n02281406 +n02281787 +n02282257 +n02282385 +n02282553 +n02282903 +n02283077 +n02283201 +n02283617 +n02283951 +n02284224 +n02284611 +n02284884 +n02285179 +n02285548 +n02285801 +n02286089 +n02286425 +n02286654 +n02287004 +n02287352 +n02287622 +n02287799 +n02287987 +n02288122 +n02288268 +n02288789 +n02289307 +n02289610 +n02289988 +n02290340 +n02290664 +n02290870 +n02291220 +n02291572 +n02291748 +n02292085 +n02292401 +n02292692 +n02293352 +n02293868 +n02294097 +n02294407 +n02294577 +n02295064 +n02295390 +n02295870 +n02296021 +n02296276 +n02296612 +n02296912 +n02297294 +n02297442 +n02297819 +n02297938 +n02298095 +n02298218 +n02298541 +n02299039 +n02299157 +n02299378 +n02299505 +n02299846 +n02300173 +n02300554 +n02300797 +n02301452 +n02301935 +n02302244 +n02302459 +n02302620 +n02302969 +n02303284 +n02303585 +n02303777 +n02304036 +n02304432 +n02304657 +n02304797 +n02305085 +n02305407 +n02305636 +n02305929 +n02306433 +n02306825 +n02307176 +n02307325 +n02307515 +n02307681 +n02307910 +n02308033 +n02308139 +n02308471 +n02308618 +n02308735 +n02309120 +n02309242 +n02309337 +n02309841 +n02310000 +n02310149 +n02310334 +n02310585 +n02310717 +n02310941 +n02311060 +n02311617 +n02311748 +n02312006 +n02312175 +n02312325 +n02312427 +n02312640 +n02312912 +n02313008 +n02313360 +n02313709 +n02315487 +n02315821 +n02316707 +n02317335 +n02317781 +n02318167 +n02318687 +n02319095 +n02319308 +n02319555 +n02319829 +n02320127 +n02320465 +n02321170 +n02321529 +n02322047 +n02322992 +n02323449 +n02323902 +n02324045 +n02324431 +n02324514 +n02324587 +n02324850 +n02325366 +n02325722 +n02325884 +n02326074 +n02326432 +n02326763 +n02326862 +n02327028 +n02327175 +n02327435 +n02327656 +n02327842 +n02328009 +n02328150 +n02328429 +n02328820 +n02328942 +n02329401 +n02330245 +n02331046 +n02331309 +n02331842 +n02332156 +n02332447 +n02332755 +n02332954 +n02333190 +n02333546 +n02333733 +n02333819 +n02333909 +n02334201 +n02334460 +n02334728 +n02335127 +n02335231 +n02336011 +n02336275 +n02336641 +n02336826 +n02337001 +n02337171 +n02337332 +n02337598 +n02337902 +n02338145 +n02338449 +n02338722 +n02338901 +n02339282 +n02339376 +n02339922 +n02340186 +n02340358 +n02340640 +n02340930 +n02341288 +n02341475 +n02341616 +n02341974 +n02342250 +n02342534 +n02342885 +n02343058 +n02343320 +n02343772 +n02344175 +n02344270 +n02344408 +n02344528 +n02344918 +n02345078 +n02345340 +n02345600 +n02345774 +n02345997 +n02346170 +n02346627 +n02346998 +n02347274 +n02347573 +n02347744 +n02348173 +n02348788 +n02349205 +n02349390 +n02349557 +n02349847 +n02350105 +n02350357 +n02350670 +n02350989 +n02351343 +n02351870 +n02352002 +n02352290 +n02352591 +n02352932 +n02353172 +n02353411 +n02353861 +n02354162 +n02354320 +n02354621 +n02354781 +n02355227 +n02355477 +n02356381 +n02356612 +n02356798 +n02356977 +n02357111 +n02357401 +n02357585 +n02357911 +n02358091 +n02358390 +n02358584 +n02358712 +n02358890 +n02359047 +n02359324 +n02359556 +n02359667 +n02359915 +n02360282 +n02360480 +n02360781 +n02360933 +n02361090 +n02361337 +n02361587 +n02361706 +n02361850 +n02362194 +n02363005 +n02363245 +n02363351 +n02363996 +n02364520 +n02364673 +n02364840 +n02365108 +n02365480 +n02366002 +n02366301 +n02366579 +n02366959 +n02367492 +n02367812 +n02368116 +n02368399 +n02368821 +n02369293 +n02369555 +n02369680 +n02369935 +n02370137 +n02370525 +n02370806 +n02371344 +n02372140 +n02372584 +n02372952 +n02373336 +n02374149 +n02374451 +n02375302 +n02375438 +n02375757 +n02375862 +n02376542 +n02376679 +n02376791 +n02376918 +n02377063 +n02377181 +n02377291 +n02377388 +n02377480 +n02377603 +n02377703 +n02378149 +n02378299 +n02378415 +n02378541 +n02378625 +n02378755 +n02378870 +n02378969 +n02379081 +n02379183 +n02379329 +n02379430 +n02379630 +n02379743 +n02379908 +n02380052 +n02380335 +n02380464 +n02380583 +n02380745 +n02380875 +n02381004 +n02381119 +n02381261 +n02381364 +n02381460 +n02381609 +n02381831 +n02382039 +n02382132 +n02382204 +n02382338 +n02382437 +n02382635 +n02382750 +n02382850 +n02382948 +n02383231 +n02384741 +n02384858 +n02385002 +n02385098 +n02385214 +n02385580 +n02385676 +n02385776 +n02385898 +n02386014 +n02386141 +n02386224 +n02386310 +n02386496 +n02386746 +n02386853 +n02386968 +n02387093 +n02387254 +n02387346 +n02387452 +n02387722 +n02387887 +n02387983 +n02388143 +n02388276 +n02388453 +n02388588 +n02388735 +n02388832 +n02388917 +n02389026 +n02389128 +n02389261 +n02389346 +n02389559 +n02389779 +n02389865 +n02389943 +n02390015 +n02390101 +n02390258 +n02390454 +n02390640 +n02390738 +n02390834 +n02390938 +n02391049 +n02391234 +n02391373 +n02391508 +n02391617 +n02391994 +n02392434 +n02392555 +n02392824 +n02393161 +n02393580 +n02393807 +n02393940 +n02394477 +n02395003 +n02395406 +n02395694 +n02395855 +n02395931 +n02396014 +n02396088 +n02396157 +n02396427 +n02396796 +n02397096 +n02397529 +n02397744 +n02397987 +n02398521 +n02399000 +n02401031 +n02402010 +n02402175 +n02402425 +n02403003 +n02403153 +n02403231 +n02403325 +n02403454 +n02403740 +n02403820 +n02403920 +n02404028 +n02404186 +n02404432 +n02404573 +n02404906 +n02405101 +n02405302 +n02405440 +n02405577 +n02405692 +n02405799 +n02405929 +n02406046 +n02406174 +n02406432 +n02406533 +n02406647 +n02406749 +n02406859 +n02406952 +n02407071 +n02407172 +n02407276 +n02407390 +n02407521 +n02407625 +n02407763 +n02407959 +n02408429 +n02408660 +n02408817 +n02409038 +n02409202 +n02409508 +n02409870 +n02410011 +n02410141 +n02410509 +n02410702 +n02410900 +n02411206 +n02411705 +n02411999 +n02412080 +n02412210 +n02412440 +n02412629 +n02412700 +n02412787 +n02412909 +n02412977 +n02413050 +n02413131 +n02413484 +n02413593 +n02413717 +n02413824 +n02413917 +n02414043 +n02414209 +n02414290 +n02414442 +n02414578 +n02414763 +n02414904 +n02415130 +n02415253 +n02415435 +n02415577 +n02415829 +n02416104 +n02416519 +n02416820 +n02416880 +n02416964 +n02417070 +n02417242 +n02417387 +n02417534 +n02417663 +n02417785 +n02417914 +n02418064 +n02418465 +n02418770 +n02419056 +n02419336 +n02419634 +n02419796 +n02420509 +n02420828 +n02421136 +n02421449 +n02421792 +n02422106 +n02422391 +n02422699 +n02423022 +n02423218 +n02423362 +n02423589 +n02424085 +n02424305 +n02424486 +n02424589 +n02424695 +n02424909 +n02425086 +n02425228 +n02425532 +n02425887 +n02426176 +n02426481 +n02426813 +n02427032 +n02427183 +n02427470 +n02427576 +n02427724 +n02428089 +n02428349 +n02428508 +n02428842 +n02429456 +n02430045 +n02430559 +n02430643 +n02430748 +n02430830 +n02431122 +n02431337 +n02431441 +n02431542 +n02431628 +n02431785 +n02431976 +n02432291 +n02432511 +n02432704 +n02432983 +n02433318 +n02433546 +n02433729 +n02433925 +n02434190 +n02434415 +n02434712 +n02434954 +n02435216 +n02435517 +n02435853 +n02436224 +n02436353 +n02436645 +n02437136 +n02437312 +n02437482 +n02437616 +n02437971 +n02438173 +n02438272 +n02438580 +n02439033 +n02439398 +n02441326 +n02441942 +n02442172 +n02442336 +n02442446 +n02442572 +n02442668 +n02442845 +n02443015 +n02443114 +n02443346 +n02443484 +n02443808 +n02443959 +n02444251 +n02444819 +n02445004 +n02445171 +n02445394 +n02445715 +n02446206 +n02446352 +n02446645 +n02447021 +n02447366 +n02447762 +n02448060 +n02448318 +n02448633 +n02448885 +n02449183 +n02449350 +n02449699 +n02450034 +n02450295 +n02450426 +n02450561 +n02450677 +n02450829 +n02451125 +n02451415 +n02451575 +n02453108 +n02453611 +n02454379 +n02454794 +n02455135 +n02455428 +n02455720 +n02456008 +n02456275 +n02456962 +n02457408 +n02457945 +n02458135 +n02458517 +n02459190 +n02460009 +n02460451 +n02460817 +n02461128 +n02461830 +n02462213 +n02469248 +n02469472 +n02469914 +n02470238 +n02470325 +n02470709 +n02470899 +n02471300 +n02471762 +n02472293 +n02472987 +n02473307 +n02473554 +n02473720 +n02473857 +n02473983 +n02474110 +n02474282 +n02474605 +n02474777 +n02475078 +n02475358 +n02475669 +n02476219 +n02476567 +n02476870 +n02477028 +n02477187 +n02477329 +n02477516 +n02477782 +n02478239 +n02478875 +n02479332 +n02480153 +n02480495 +n02480855 +n02481103 +n02481235 +n02481366 +n02481500 +n02481823 +n02482060 +n02482286 +n02482474 +n02482650 +n02483092 +n02483362 +n02483708 +n02484322 +n02484473 +n02484975 +n02485225 +n02485371 +n02485536 +n02485688 +n02485988 +n02486261 +n02486410 +n02486657 +n02486908 +n02487079 +n02487347 +n02487547 +n02487675 +n02487847 +n02488003 +n02488291 +n02488415 +n02488702 +n02488894 +n02489166 +n02489589 +n02490219 +n02490597 +n02490811 +n02491107 +n02491329 +n02491474 +n02492035 +n02492356 +n02492660 +n02492948 +n02493224 +n02493509 +n02493793 +n02494079 +n02494383 +n02495242 +n02496052 +n02496913 +n02497673 +n02498153 +n02498743 +n02499022 +n02499316 +n02499568 +n02499808 +n02500267 +n02500596 +n02501583 +n02501923 +n02502006 +n02502514 +n02502807 +n02503127 +n02503517 +n02503756 +n02504013 +n02504458 +n02504770 +n02505063 +n02505238 +n02505485 +n02505998 +n02506947 +n02507148 +n02507649 +n02508021 +n02508213 +n02508346 +n02508742 +n02509197 +n02509515 +n02509815 +n02510455 +n02511730 +n02512053 +n02512752 +n02512830 +n02512938 +n02513248 +n02513355 +n02513560 +n02513727 +n02513805 +n02513939 +n02514041 +n02515214 +n02515713 +n02516188 +n02516776 +n02517442 +n02517938 +n02518324 +n02518622 +n02519148 +n02519340 +n02519472 +n02519686 +n02519862 +n02520147 +n02520525 +n02520810 +n02521646 +n02522399 +n02522637 +n02522722 +n02522866 +n02523110 +n02523427 +n02523877 +n02524202 +n02524524 +n02524659 +n02524928 +n02525382 +n02525703 +n02526121 +n02526425 +n02526818 +n02527057 +n02527271 +n02527622 +n02528163 +n02529293 +n02529772 +n02530052 +n02530188 +n02530421 +n02530637 +n02530831 +n02530999 +n02531114 +n02531625 +n02532028 +n02532272 +n02532451 +n02532602 +n02532786 +n02532918 +n02533209 +n02533545 +n02533834 +n02534165 +n02534559 +n02534734 +n02535080 +n02535163 +n02535258 +n02535537 +n02535759 +n02536165 +n02536456 +n02536864 +n02537085 +n02537319 +n02537525 +n02537716 +n02538010 +n02538216 +n02538406 +n02538562 +n02538985 +n02539424 +n02539573 +n02539894 +n02540412 +n02540983 +n02541257 +n02541687 +n02542017 +n02542432 +n02542958 +n02543255 +n02543565 +n02544274 +n02545841 +n02546028 +n02546331 +n02546627 +n02547014 +n02547733 +n02548247 +n02548689 +n02548884 +n02549248 +n02549376 +n02549989 +n02550203 +n02550460 +n02550655 +n02551134 +n02551668 +n02552171 +n02553028 +n02554730 +n02555863 +n02556373 +n02556846 +n02557182 +n02557318 +n02557591 +n02557749 +n02557909 +n02558206 +n02558860 +n02559144 +n02559383 +n02559862 +n02560110 +n02561108 +n02561381 +n02561514 +n02561661 +n02561803 +n02561937 +n02562315 +n02562796 +n02562971 +n02563079 +n02563182 +n02563648 +n02563792 +n02563949 +n02564270 +n02564403 +n02564720 +n02564935 +n02565072 +n02565324 +n02565573 +n02566109 +n02566489 +n02566665 +n02567334 +n02567633 +n02568087 +n02568447 +n02568959 +n02569484 +n02569631 +n02569905 +n02570164 +n02570484 +n02570838 +n02571167 +n02571652 +n02571810 +n02572196 +n02572484 +n02573249 +n02573704 +n02574271 +n02574910 +n02575325 +n02575590 +n02576223 +n02576575 +n02576906 +n02577041 +n02577164 +n02577403 +n02577662 +n02577952 +n02578233 +n02578454 +n02578771 +n02578928 +n02579303 +n02579557 +n02579762 +n02579928 +n02580336 +n02580679 +n02580830 +n02581108 +n02581482 +n02581642 +n02581957 +n02582220 +n02582349 +n02582721 +n02583567 +n02583890 +n02584145 +n02584449 +n02585872 +n02586238 +n02586543 +n02587051 +n02587300 +n02587479 +n02587618 +n02587877 +n02588286 +n02588794 +n02588945 +n02589062 +n02589196 +n02589316 +n02589623 +n02589796 +n02590094 +n02590495 +n02590702 +n02590987 +n02591330 +n02591613 +n02591911 +n02592055 +n02592371 +n02592734 +n02593019 +n02593191 +n02593453 +n02593679 +n02594250 +n02594942 +n02595056 +n02595339 +n02595702 +n02596067 +n02596252 +n02596381 +n02596720 +n02597004 +n02597367 +n02597608 +n02597818 +n02597972 +n02598134 +n02598573 +n02598878 +n02599052 +n02599347 +n02599557 +n02599958 +n02600298 +n02600503 +n02600798 +n02601344 +n02601767 +n02601921 +n02602059 +n02602405 +n02602760 +n02603317 +n02603540 +n02603862 +n02604157 +n02604480 +n02604954 +n02605316 +n02605703 +n02605936 +n02606052 +n02606384 +n02606751 +n02607072 +n02607201 +n02607470 +n02607862 +n02608284 +n02608547 +n02608860 +n02608996 +n02609302 +n02609823 +n02610066 +n02610373 +n02610664 +n02610980 +n02611561 +n02611898 +n02612167 +n02613181 +n02613572 +n02613820 +n02614140 +n02614482 +n02614653 +n02614978 +n02615298 +n02616128 +n02616397 +n02616851 +n02617537 +n02618094 +n02618513 +n02618827 +n02619165 +n02619550 +n02619861 +n02620167 +n02620578 +n02621258 +n02621908 +n02622249 +n02622547 +n02622712 +n02622955 +n02623445 +n02624167 +n02624551 +n02624807 +n02624987 +n02625258 +n02625612 +n02625851 +n02626089 +n02626265 +n02626471 +n02626762 +n02627037 +n02627292 +n02627532 +n02627835 +n02628062 +n02628259 +n02628600 +n02629230 +n02629716 +n02630281 +n02630615 +n02630739 +n02631041 +n02631330 +n02631475 +n02631628 +n02631775 +n02632039 +n02632494 +n02633422 +n02633677 +n02633977 +n02634545 +n02635154 +n02635580 +n02636170 +n02636405 +n02636550 +n02636854 +n02637179 +n02637475 +n02637977 +n02638596 +n02639087 +n02639605 +n02639922 +n02640242 +n02640626 +n02640857 +n02641379 +n02642107 +n02642644 +n02643112 +n02643316 +n02643566 +n02643836 +n02644113 +n02644360 +n02644501 +n02644665 +n02644817 +n02645538 +n02645691 +n02645953 +n02646667 +n02646892 +n02648035 +n02648625 +n02648916 +n02649218 +n02649546 +n02650050 +n02650413 +n02650541 +n02651060 +n02652132 +n02652668 +n02653145 +n02653497 +n02653786 +n02654112 +n02654425 +n02654745 +n02655020 +n02655523 +n02655848 +n02656032 +n02656301 +n02656670 +n02656969 +n02657368 +n02657694 +n02658079 +n02658531 +n02658811 +n02659176 +n02659478 +n02659808 +n02660091 +n02660208 +n02660519 +n02660640 +n02661017 +n02661473 +n02661618 +n02662239 +n02662397 +n02662559 +n02662825 +n02662993 +n02663211 +n02663485 +n02663849 +n02664285 +n02664642 +n02665250 +n02665985 +n02666196 +n02666501 +n02666624 +n02666943 +n02667093 +n02667244 +n02667379 +n02667478 +n02667576 +n02667693 +n02668393 +n02668613 +n02669295 +n02669442 +n02669534 +n02669723 +n02670186 +n02670382 +n02670683 +n02670935 +n02671780 +n02672152 +n02672371 +n02672831 +n02675077 +n02675219 +n02675522 +n02676097 +n02676261 +n02676566 +n02676670 +n02676938 +n02677028 +n02677136 +n02677436 +n02677718 +n02678010 +n02678384 +n02678897 +n02679142 +n02679257 +n02679961 +n02680110 +n02680512 +n02680638 +n02680754 +n02681392 +n02682311 +n02682407 +n02682569 +n02682811 +n02682922 +n02683183 +n02683323 +n02683454 +n02683558 +n02683791 +n02684248 +n02684356 +n02684515 +n02684649 +n02684962 +n02685082 +n02685253 +n02685365 +n02685701 +n02685995 +n02686121 +n02686227 +n02686379 +n02686568 +n02687172 +n02687423 +n02687682 +n02687821 +n02687992 +n02688273 +n02688443 +n02689144 +n02689274 +n02689434 +n02689748 +n02689819 +n02690373 +n02690715 +n02691156 +n02692086 +n02692232 +n02692513 +n02692680 +n02692877 +n02693246 +n02693413 +n02693540 +n02694045 +n02694279 +n02694426 +n02694662 +n02694966 +n02695627 +n02695762 +n02696165 +n02696246 +n02696569 +n02696843 +n02697022 +n02697221 +n02697576 +n02697675 +n02697876 +n02698244 +n02698473 +n02698634 +n02699494 +n02699629 +n02699770 +n02699915 +n02700064 +n02700258 +n02700895 +n02701002 +n02701260 +n02701730 +n02702989 +n02703124 +n02703275 +n02704645 +n02704792 +n02704949 +n02705201 +n02705429 +n02705944 +n02706221 +n02706806 +n02708093 +n02708224 +n02708433 +n02708555 +n02708711 +n02708885 +n02709101 +n02709367 +n02709637 +n02709763 +n02709908 +n02710044 +n02710201 +n02710324 +n02710429 +n02710600 +n02711237 +n02711780 +n02712545 +n02712643 +n02713003 +n02713218 +n02713364 +n02713496 +n02714315 +n02714535 +n02714751 +n02715229 +n02715513 +n02715712 +n02716626 +n02720048 +n02720576 +n02721813 +n02723165 +n02724722 +n02725872 +n02726017 +n02726210 +n02726305 +n02726681 +n02727016 +n02727141 +n02727426 +n02727825 +n02728440 +n02729222 +n02729837 +n02729965 +n02730265 +n02730568 +n02730930 +n02731251 +n02731398 +n02731629 +n02731900 +n02732072 +n02732572 +n02732827 +n02733213 +n02733524 +n02734725 +n02734835 +n02735268 +n02735361 +n02735538 +n02735688 +n02736396 +n02736798 +n02737351 +n02737660 +n02738031 +n02738271 +n02738449 +n02738535 +n02738741 +n02738859 +n02738978 +n02739123 +n02739427 +n02739550 +n02739668 +n02739889 +n02740061 +n02740300 +n02740533 +n02740764 +n02741367 +n02741475 +n02742070 +n02742194 +n02742322 +n02742468 +n02742753 +n02743426 +n02744323 +n02744844 +n02744961 +n02745492 +n02745611 +n02745816 +n02746008 +n02746225 +n02746365 +n02746595 +n02746683 +n02746978 +n02747063 +n02747177 +n02747672 +n02747802 +n02748183 +n02748359 +n02748491 +n02749169 +n02749292 +n02749479 +n02749670 +n02749790 +n02749953 +n02750070 +n02750169 +n02750320 +n02750652 +n02751067 +n02751215 +n02751295 +n02751490 +n02752199 +n02752496 +n02752615 +n02752810 +n02752917 +n02753044 +n02753394 +n02753710 +n02754103 +n02754656 +n02755140 +n02755352 +n02755529 +n02755675 +n02755823 +n02755984 +n02756098 +n02756854 +n02756977 +n02757061 +n02757337 +n02757462 +n02757714 +n02757810 +n02757927 +n02758134 +n02758490 +n02758863 +n02758960 +n02759257 +n02759387 +n02759700 +n02759963 +n02760099 +n02760199 +n02760298 +n02760429 +n02760658 +n02760855 +n02761034 +n02761206 +n02761392 +n02761557 +n02761696 +n02761834 +n02762169 +n02762371 +n02762508 +n02762725 +n02762909 +n02763083 +n02763198 +n02763306 +n02763604 +n02763714 +n02763901 +n02764044 +n02764398 +n02764505 +n02764614 +n02764779 +n02764935 +n02765028 +n02766168 +n02766320 +n02766534 +n02766792 +n02767038 +n02767147 +n02767433 +n02767665 +n02767956 +n02768114 +n02768226 +n02768433 +n02768655 +n02768973 +n02769075 +n02769290 +n02769669 +n02769748 +n02769963 +n02770078 +n02770211 +n02770585 +n02770721 +n02770830 +n02771004 +n02771166 +n02771286 +n02771547 +n02771750 +n02772101 +n02772435 +n02772554 +n02772700 +n02773037 +n02773838 +n02774152 +n02774630 +n02774921 +n02775039 +n02775178 +n02775483 +n02775689 +n02775813 +n02775897 +n02776007 +n02776205 +n02776505 +n02776631 +n02776825 +n02776978 +n02777100 +n02777292 +n02777402 +n02777638 +n02777734 +n02777927 +n02778131 +n02778294 +n02778456 +n02778588 +n02778669 +n02779435 +n02779609 +n02779719 +n02779971 +n02780315 +n02780445 +n02780588 +n02780704 +n02780815 +n02781121 +n02781213 +n02781338 +n02781517 +n02781764 +n02782093 +n02782432 +n02782602 +n02782681 +n02782778 +n02783035 +n02783161 +n02783324 +n02783459 +n02783900 +n02783994 +n02784124 +n02784998 +n02785648 +n02786058 +n02786198 +n02786331 +n02786463 +n02786611 +n02786736 +n02786837 +n02787120 +n02787269 +n02787435 +n02787622 +n02788021 +n02788148 +n02788386 +n02788462 +n02788572 +n02788689 +n02789487 +n02790669 +n02790823 +n02790996 +n02791124 +n02791270 +n02791532 +n02791665 +n02791795 +n02792409 +n02792552 +n02792948 +n02793089 +n02793199 +n02793296 +n02793414 +n02793495 +n02793684 +n02793842 +n02793930 +n02794008 +n02794156 +n02794368 +n02794474 +n02794664 +n02794779 +n02794972 +n02795169 +n02795528 +n02795670 +n02795783 +n02795978 +n02796207 +n02796318 +n02796412 +n02796623 +n02796995 +n02797295 +n02797535 +n02797692 +n02797881 +n02799071 +n02799175 +n02799323 +n02799897 +n02800213 +n02800497 +n02800675 +n02800940 +n02801047 +n02801184 +n02801450 +n02801525 +n02801823 +n02801938 +n02802215 +n02802426 +n02802544 +n02802721 +n02802990 +n02803349 +n02803539 +n02803666 +n02803809 +n02803934 +n02804123 +n02804252 +n02804414 +n02804515 +n02804610 +n02805283 +n02805845 +n02805983 +n02806088 +n02806379 +n02806530 +n02806762 +n02806875 +n02806992 +n02807133 +n02807523 +n02807616 +n02807731 +n02808185 +n02808304 +n02808440 +n02808829 +n02808968 +n02809105 +n02809241 +n02809364 +n02809491 +n02809605 +n02809736 +n02810139 +n02810270 +n02810471 +n02810782 +n02811059 +n02811204 +n02811350 +n02811468 +n02811618 +n02811719 +n02811936 +n02812201 +n02812342 +n02812631 +n02812785 +n02812949 +n02813252 +n02813399 +n02813544 +n02813645 +n02813752 +n02813981 +n02814116 +n02814338 +n02814428 +n02814533 +n02814774 +n02814860 +n02815478 +n02815749 +n02815834 +n02815950 +n02816494 +n02816656 +n02816768 +n02817031 +n02817251 +n02817386 +n02817516 +n02817650 +n02817799 +n02818135 +n02818254 +n02818687 +n02818832 +n02819697 +n02820085 +n02820210 +n02820556 +n02820675 +n02821202 +n02821415 +n02821543 +n02821627 +n02821943 +n02822064 +n02822220 +n02822399 +n02822579 +n02822762 +n02822865 +n02823124 +n02823335 +n02823428 +n02823510 +n02823586 +n02823750 +n02823848 +n02823964 +n02824058 +n02824152 +n02824319 +n02824448 +n02825153 +n02825240 +n02825442 +n02825657 +n02825872 +n02825961 +n02826068 +n02826259 +n02826459 +n02826589 +n02826683 +n02826812 +n02826886 +n02827148 +n02827606 +n02828115 +n02828299 +n02828427 +n02828884 +n02829246 +n02829353 +n02829510 +n02829596 +n02830157 +n02831237 +n02831335 +n02831595 +n02831724 +n02831894 +n02831998 +n02833040 +n02833140 +n02833275 +n02833403 +n02833793 +n02834027 +n02834397 +n02834506 +n02834642 +n02834778 +n02835271 +n02835412 +n02835551 +n02835724 +n02835829 +n02835915 +n02836035 +n02836174 +n02836268 +n02836392 +n02836513 +n02836607 +n02836900 +n02837134 +n02837567 +n02837789 +n02837887 +n02838014 +n02838178 +n02838345 +n02838577 +n02838728 +n02838958 +n02839110 +n02839351 +n02839592 +n02839910 +n02840134 +n02840245 +n02840515 +n02840619 +n02841063 +n02841187 +n02841315 +n02841506 +n02841641 +n02841847 +n02842133 +n02842573 +n02842809 +n02843029 +n02843158 +n02843276 +n02843465 +n02843553 +n02843684 +n02843777 +n02843909 +n02844056 +n02844214 +n02844307 +n02844714 +n02845130 +n02845293 +n02845985 +n02846141 +n02846260 +n02846511 +n02846619 +n02846733 +n02846874 +n02847461 +n02847631 +n02847852 +n02848118 +n02848216 +n02848523 +n02848806 +n02848921 +n02849154 +n02849885 +n02850060 +n02850358 +n02850732 +n02850950 +n02851099 +n02851795 +n02851939 +n02852043 +n02852173 +n02852360 +n02853016 +n02853218 +n02853336 +n02853745 +n02853870 +n02854378 +n02854532 +n02854630 +n02854739 +n02854926 +n02855089 +n02855390 +n02855701 +n02855793 +n02855925 +n02856013 +n02856237 +n02856362 +n02857365 +n02857477 +n02857644 +n02857907 +n02858304 +n02859184 +n02859343 +n02859443 +n02859557 +n02859729 +n02859955 +n02860415 +n02860640 +n02860847 +n02861022 +n02861147 +n02861286 +n02861387 +n02861509 +n02861658 +n02861777 +n02861886 +n02862048 +n02862916 +n02863014 +n02863176 +n02863340 +n02863426 +n02863536 +n02863638 +n02863750 +n02864122 +n02864504 +n02864593 +n02864987 +n02865351 +n02865665 +n02865931 +n02866106 +n02866386 +n02866578 +n02867401 +n02867592 +n02867715 +n02867966 +n02868240 +n02868429 +n02868546 +n02868638 +n02868975 +n02869155 +n02869249 +n02869563 +n02869737 +n02869837 +n02870526 +n02870676 +n02870772 +n02870880 +n02871005 +n02871147 +n02871314 +n02871439 +n02871525 +n02871631 +n02871824 +n02871963 +n02872333 +n02872529 +n02872752 +n02873520 +n02873623 +n02873733 +n02873839 +n02874086 +n02874214 +n02874336 +n02874442 +n02874537 +n02874642 +n02874750 +n02875436 +n02875626 +n02875948 +n02876084 +n02876326 +n02876457 +n02876657 +n02877266 +n02877513 +n02877642 +n02877765 +n02877962 +n02878107 +n02878222 +n02878425 +n02878534 +n02878628 +n02878796 +n02879087 +n02879309 +n02879422 +n02879517 +n02879718 +n02880189 +n02880393 +n02880546 +n02880842 +n02880940 +n02881193 +n02881546 +n02881757 +n02881906 +n02882190 +n02882301 +n02882483 +n02882647 +n02882894 +n02883004 +n02883101 +n02883205 +n02883344 +n02884225 +n02884450 +n02884859 +n02884994 +n02885108 +n02885233 +n02885338 +n02885462 +n02885882 +n02886321 +n02886434 +n02886599 +n02887079 +n02887209 +n02887489 +n02887832 +n02887970 +n02888270 +n02888429 +n02888569 +n02888898 +n02889425 +n02889646 +n02889856 +n02889996 +n02890188 +n02890351 +n02890513 +n02890662 +n02890804 +n02890940 +n02891188 +n02891788 +n02892201 +n02892304 +n02892392 +n02892499 +n02892626 +n02892767 +n02892948 +n02893269 +n02893418 +n02893608 +n02893692 +n02893941 +n02894024 +n02894158 +n02894337 +n02894605 +n02894847 +n02895008 +n02895154 +n02895328 +n02895438 +n02896074 +n02896294 +n02896442 +n02896694 +n02896856 +n02896949 +n02897097 +n02897389 +n02897820 +n02898093 +n02898173 +n02898269 +n02898369 +n02898585 +n02898711 +n02899439 +n02900160 +n02900459 +n02900594 +n02900705 +n02900857 +n02900987 +n02901114 +n02901259 +n02901377 +n02901481 +n02901620 +n02901793 +n02901901 +n02902079 +n02902687 +n02902816 +n02902916 +n02903006 +n02903126 +n02903204 +n02903727 +n02903852 +n02904109 +n02904233 +n02904505 +n02904640 +n02904803 +n02904927 +n02905036 +n02905152 +n02905886 +n02906734 +n02906963 +n02907082 +n02907296 +n02907391 +n02907656 +n02907873 +n02908123 +n02908217 +n02908773 +n02908951 +n02909053 +n02909165 +n02909285 +n02909706 +n02909870 +n02910145 +n02910241 +n02910353 +n02910542 +n02910701 +n02910864 +n02910964 +n02911332 +n02911485 +n02912065 +n02912319 +n02912557 +n02912894 +n02913152 +n02914991 +n02915904 +n02916065 +n02916179 +n02916350 +n02916936 +n02917067 +n02917377 +n02917521 +n02917607 +n02917742 +n02917964 +n02918112 +n02918330 +n02918455 +n02918595 +n02918831 +n02918964 +n02919148 +n02919308 +n02919414 +n02919648 +n02919792 +n02919890 +n02919976 +n02920083 +n02920164 +n02920259 +n02920369 +n02920503 +n02920658 +n02921029 +n02921195 +n02921292 +n02921406 +n02921592 +n02921756 +n02921884 +n02922159 +n02922292 +n02922461 +n02922578 +n02922798 +n02922877 +n02923129 +n02923535 +n02923682 +n02923915 +n02924116 +n02925009 +n02925107 +n02925385 +n02925519 +n02925666 +n02926426 +n02926591 +n02927053 +n02927161 +n02927764 +n02927887 +n02928049 +n02928299 +n02928413 +n02928608 +n02929184 +n02929289 +n02929462 +n02929582 +n02929923 +n02930080 +n02930214 +n02930339 +n02930645 +n02930766 +n02931013 +n02931148 +n02931294 +n02931417 +n02931836 +n02932019 +n02932400 +n02932523 +n02932693 +n02932891 +n02933112 +n02933340 +n02933462 +n02933649 +n02933750 +n02933990 +n02934168 +n02934451 +n02935017 +n02935387 +n02935490 +n02935658 +n02935891 +n02936176 +n02936281 +n02936402 +n02936570 +n02936714 +n02936921 +n02937010 +n02937336 +n02937958 +n02938218 +n02938321 +n02938886 +n02939185 +n02939763 +n02939866 +n02940289 +n02940385 +n02940570 +n02940706 +n02941095 +n02941228 +n02941845 +n02942015 +n02942147 +n02942349 +n02942460 +n02942699 +n02943241 +n02943465 +n02943686 +n02943871 +n02943964 +n02944075 +n02944146 +n02944256 +n02944459 +n02944579 +n02944826 +n02945161 +n02945813 +n02945964 +n02946127 +n02946270 +n02946348 +n02946509 +n02946753 +n02946824 +n02946921 +n02947212 +n02947660 +n02947818 +n02947977 +n02948072 +n02948293 +n02948403 +n02948557 +n02948834 +n02948942 +n02949084 +n02949202 +n02949356 +n02949542 +n02950018 +n02950120 +n02950186 +n02950256 +n02950482 +n02950632 +n02950826 +n02950943 +n02951358 +n02951585 +n02951703 +n02951843 +n02952109 +n02952237 +n02952374 +n02952485 +n02952585 +n02952674 +n02952798 +n02952935 +n02953056 +n02953197 +n02953455 +n02953552 +n02953673 +n02953850 +n02954163 +n02954340 +n02954938 +n02955065 +n02955247 +n02955540 +n02955767 +n02956393 +n02956699 +n02956795 +n02956883 +n02957008 +n02957135 +n02957252 +n02957427 +n02957755 +n02957862 +n02958343 +n02959942 +n02960352 +n02960690 +n02960903 +n02961035 +n02961225 +n02961451 +n02961544 +n02961947 +n02962061 +n02962200 +n02962414 +n02962843 +n02962938 +n02963159 +n02963302 +n02963503 +n02963692 +n02963821 +n02963987 +n02964075 +n02964196 +n02964295 +n02964634 +n02964843 +n02964934 +n02965024 +n02965122 +n02965216 +n02965300 +n02965529 +n02965783 +n02966068 +n02966193 +n02966545 +n02966687 +n02966786 +n02966942 +n02967081 +n02967170 +n02967294 +n02967407 +n02967540 +n02967626 +n02967782 +n02967991 +n02968074 +n02968210 +n02968333 +n02968473 +n02969010 +n02969163 +n02969323 +n02969527 +n02969634 +n02969886 +n02970408 +n02970534 +n02970685 +n02970849 +n02971167 +n02971356 +n02971473 +n02971579 +n02971691 +n02971940 +n02972397 +n02972714 +n02972934 +n02973017 +n02973236 +n02973805 +n02973904 +n02974003 +n02974348 +n02974454 +n02974565 +n02974697 +n02975212 +n02975589 +n02975994 +n02976123 +n02976249 +n02976350 +n02976455 +n02976552 +n02976641 +n02976815 +n02976939 +n02977058 +n02977330 +n02977438 +n02977619 +n02977936 +n02978055 +n02978205 +n02978367 +n02978478 +n02978753 +n02978881 +n02979074 +n02979186 +n02979290 +n02979399 +n02979516 +n02979836 +n02980036 +n02980203 +n02980441 +n02980625 +n02981024 +n02981198 +n02981321 +n02981565 +n02981792 +n02981911 +n02982232 +n02982416 +n02982515 +n02982599 +n02983072 +n02983189 +n02983357 +n02983507 +n02983904 +n02984061 +n02984203 +n02984469 +n02984699 +n02985137 +n02985606 +n02985828 +n02985963 +n02986066 +n02986160 +n02986348 +n02987047 +n02987379 +n02987492 +n02987706 +n02987823 +n02987950 +n02988066 +n02988156 +n02988304 +n02988486 +n02988679 +n02988963 +n02989099 +n02990373 +n02990758 +n02991048 +n02991302 +n02991847 +n02992032 +n02992211 +n02992368 +n02992529 +n02992795 +n02993194 +n02993368 +n02993546 +n02994573 +n02994743 +n02995345 +n02995871 +n02995998 +n02997391 +n02997607 +n02997910 +n02998003 +n02998107 +n02998563 +n02998696 +n02998841 +n02999138 +n02999410 +n02999936 +n03000134 +n03000247 +n03000530 +n03000684 +n03001115 +n03001282 +n03001540 +n03001627 +n03002096 +n03002210 +n03002341 +n03002555 +n03002711 +n03002816 +n03002948 +n03003091 +n03003633 +n03004275 +n03004409 +n03004531 +n03004620 +n03004713 +n03004824 +n03005033 +n03005147 +n03005285 +n03005515 +n03005619 +n03006626 +n03006788 +n03006903 +n03007130 +n03007297 +n03007444 +n03007591 +n03008177 +n03008817 +n03008976 +n03009111 +n03009269 +n03009794 +n03010473 +n03010656 +n03010795 +n03010915 +n03011018 +n03011355 +n03011741 +n03012013 +n03012159 +n03012373 +n03012499 +n03012644 +n03012734 +n03012897 +n03013006 +n03013438 +n03013580 +n03013850 +n03014440 +n03014705 +n03015149 +n03015254 +n03015478 +n03015631 +n03015851 +n03016209 +n03016389 +n03016609 +n03016737 +n03016868 +n03016953 +n03017070 +n03017168 +n03017698 +n03017835 +n03018209 +n03018349 +n03018614 +n03018712 +n03018848 +n03019198 +n03019304 +n03019434 +n03019685 +n03019806 +n03019938 +n03020034 +n03020416 +n03020692 +n03021228 +n03024064 +n03024233 +n03024333 +n03024518 +n03025070 +n03025165 +n03025250 +n03025886 +n03026506 +n03026907 +n03027001 +n03027108 +n03027250 +n03027505 +n03027625 +n03028079 +n03028596 +n03028785 +n03029066 +n03029197 +n03029296 +n03029445 +n03029925 +n03030262 +n03030353 +n03030557 +n03030880 +n03031012 +n03031152 +n03031422 +n03031756 +n03032252 +n03032453 +n03032811 +n03033267 +n03033362 +n03033986 +n03034244 +n03034405 +n03034516 +n03034663 +n03035252 +n03035510 +n03035715 +n03035832 +n03036022 +n03036149 +n03036244 +n03036341 +n03036469 +n03036701 +n03036866 +n03037108 +n03037228 +n03037404 +n03037590 +n03037709 +n03038041 +n03038281 +n03038480 +n03038685 +n03038870 +n03039015 +n03039259 +n03039353 +n03039493 +n03039827 +n03039947 +n03040229 +n03040376 +n03040836 +n03041114 +n03041265 +n03041449 +n03041632 +n03041810 +n03042139 +n03042384 +n03042490 +n03042697 +n03042829 +n03042984 +n03043173 +n03043274 +n03043423 +n03043693 +n03043798 +n03043958 +n03044671 +n03044801 +n03044934 +n03045074 +n03045228 +n03045337 +n03045698 +n03045800 +n03046029 +n03046133 +n03046257 +n03046802 +n03046921 +n03047052 +n03047171 +n03047690 +n03047799 +n03047941 +n03048883 +n03049066 +n03049326 +n03049457 +n03049782 +n03049924 +n03050026 +n03050453 +n03050546 +n03050655 +n03050864 +n03051041 +n03051249 +n03051396 +n03051540 +n03052464 +n03052917 +n03053047 +n03053976 +n03054491 +n03054605 +n03054901 +n03055159 +n03055418 +n03055670 +n03055857 +n03056097 +n03056215 +n03056288 +n03056493 +n03056583 +n03056873 +n03057021 +n03057541 +n03057636 +n03057724 +n03057841 +n03057920 +n03058107 +n03058603 +n03058949 +n03059103 +n03059236 +n03059366 +n03059685 +n03059934 +n03060728 +n03061050 +n03061211 +n03061345 +n03061505 +n03061674 +n03061819 +n03061893 +n03062015 +n03062122 +n03062245 +n03062336 +n03062651 +n03062798 +n03062985 +n03063073 +n03063199 +n03063338 +n03063485 +n03063599 +n03063689 +n03063834 +n03063968 +n03064250 +n03064350 +n03064562 +n03064758 +n03064935 +n03065243 +n03065424 +n03065708 +n03066232 +n03066359 +n03066464 +n03066849 +n03067093 +n03067212 +n03067339 +n03067518 +n03068181 +n03068998 +n03069752 +n03070059 +n03070193 +n03070396 +n03070587 +n03070854 +n03071021 +n03071160 +n03071288 +n03071552 +n03072056 +n03072201 +n03072440 +n03072682 +n03073296 +n03073384 +n03073545 +n03073694 +n03073977 +n03074380 +n03074855 +n03075097 +n03075248 +n03075370 +n03075500 +n03075634 +n03075768 +n03075946 +n03076411 +n03076623 +n03076708 +n03077442 +n03077616 +n03077741 +n03078287 +n03078506 +n03078670 +n03078802 +n03078995 +n03079136 +n03079230 +n03079494 +n03079616 +n03079741 +n03080309 +n03080497 +n03080633 +n03080731 +n03080904 +n03081859 +n03081986 +n03082127 +n03082280 +n03082450 +n03082656 +n03082807 +n03082979 +n03084420 +n03084834 +n03085013 +n03085219 +n03085333 +n03085602 +n03085781 +n03085915 +n03086183 +n03086457 +n03086580 +n03086670 +n03086868 +n03087069 +n03087245 +n03087366 +n03087521 +n03087643 +n03087816 +n03088389 +n03088580 +n03088707 +n03089477 +n03089624 +n03089753 +n03089879 +n03090000 +n03090172 +n03090437 +n03090710 +n03090856 +n03091044 +n03091223 +n03091374 +n03091907 +n03092053 +n03092166 +n03092314 +n03092476 +n03092656 +n03092883 +n03093427 +n03093792 +n03094159 +n03094503 +n03095699 +n03095965 +n03096439 +n03096960 +n03097362 +n03097535 +n03097673 +n03098140 +n03098515 +n03098688 +n03098806 +n03098959 +n03099147 +n03099274 +n03099454 +n03099622 +n03099771 +n03099945 +n03100240 +n03100346 +n03100490 +n03100897 +n03101156 +n03101302 +n03101375 +n03101517 +n03101664 +n03101796 +n03101986 +n03102371 +n03102516 +n03102654 +n03102859 +n03103128 +n03103396 +n03103563 +n03103904 +n03104019 +n03104512 +n03105088 +n03105214 +n03105306 +n03105467 +n03105645 +n03105810 +n03105974 +n03106722 +n03106898 +n03107046 +n03107488 +n03107716 +n03108455 +n03108624 +n03108759 +n03108853 +n03109033 +n03109150 +n03109253 +n03109693 +n03109881 +n03110202 +n03110669 +n03111041 +n03111177 +n03111296 +n03111690 +n03112240 +n03112719 +n03112869 +n03113152 +n03113505 +n03113657 +n03113835 +n03114041 +n03114236 +n03114379 +n03114504 +n03114743 +n03114839 +n03115014 +n03115180 +n03115400 +n03115663 +n03115762 +n03115897 +n03116008 +n03116163 +n03116530 +n03116767 +n03117199 +n03117642 +n03118346 +n03118969 +n03119203 +n03119396 +n03119510 +n03120198 +n03120491 +n03120778 +n03121040 +n03121190 +n03121298 +n03121431 +n03121897 +n03122073 +n03122202 +n03122295 +n03122748 +n03123553 +n03123666 +n03123809 +n03123917 +n03124043 +n03124170 +n03124313 +n03124474 +n03124590 +n03125057 +n03125588 +n03125729 +n03125870 +n03126090 +n03126385 +n03126580 +n03126707 +n03126927 +n03127024 +n03127203 +n03127408 +n03127531 +n03127747 +n03127925 +n03128085 +n03128248 +n03128427 +n03128519 +n03129001 +n03129471 +n03129636 +n03129753 +n03129848 +n03130066 +n03130233 +n03130563 +n03130761 +n03130866 +n03131193 +n03131574 +n03131669 +n03131967 +n03132076 +n03132261 +n03132438 +n03132666 +n03132776 +n03133050 +n03133415 +n03133878 +n03134118 +n03134232 +n03134394 +n03134739 +n03134853 +n03135030 +n03135532 +n03135656 +n03135788 +n03135917 +n03136051 +n03136254 +n03136369 +n03136504 +n03137473 +n03137579 +n03138128 +n03138217 +n03138344 +n03138669 +n03139089 +n03139464 +n03139640 +n03139998 +n03140126 +n03140292 +n03140431 +n03140546 +n03140652 +n03140771 +n03140900 +n03141065 +n03141327 +n03141455 +n03141612 +n03141702 +n03141823 +n03142099 +n03142205 +n03142325 +n03142431 +n03142679 +n03143400 +n03143572 +n03143754 +n03144156 +n03144873 +n03144982 +n03145147 +n03145277 +n03145384 +n03145522 +n03145719 +n03145843 +n03146219 +n03146342 +n03146449 +n03146560 +n03146687 +n03146777 +n03146846 +n03147084 +n03147156 +n03147280 +n03147509 +n03148324 +n03148518 +n03148727 +n03148808 +n03149135 +n03149401 +n03149686 +n03149810 +n03150232 +n03150511 +n03150661 +n03150795 +n03151077 +n03152303 +n03152951 +n03153246 +n03153585 +n03153948 +n03154073 +n03154316 +n03154446 +n03154616 +n03154745 +n03154895 +n03155178 +n03155502 +n03155915 +n03156071 +n03156279 +n03156405 +n03156767 +n03157348 +n03158186 +n03158414 +n03158668 +n03158796 +n03158885 +n03159535 +n03159640 +n03160001 +n03160186 +n03160309 +n03160740 +n03161016 +n03161450 +n03161893 +n03162297 +n03162460 +n03162556 +n03162714 +n03162818 +n03163222 +n03163381 +n03163488 +n03163798 +n03163973 +n03164192 +n03164344 +n03164605 +n03164722 +n03164929 +n03165096 +n03165211 +n03165466 +n03165616 +n03165823 +n03165955 +n03166120 +n03166514 +n03166600 +n03166685 +n03166809 +n03166951 +n03167153 +n03167978 +n03168107 +n03168217 +n03168543 +n03168663 +n03168774 +n03168933 +n03169063 +n03169176 +n03170292 +n03170459 +n03170635 +n03170872 +n03171228 +n03171356 +n03171635 +n03171910 +n03172038 +n03172738 +n03172965 +n03173270 +n03173387 +n03173929 +n03174079 +n03174450 +n03174731 +n03175081 +n03175189 +n03175301 +n03175457 +n03175604 +n03175843 +n03175983 +n03176238 +n03176386 +n03176594 +n03176763 +n03177059 +n03177165 +n03177708 +n03178000 +n03178173 +n03178430 +n03178538 +n03178674 +n03179701 +n03179910 +n03180011 +n03180384 +n03180504 +n03180732 +n03180865 +n03180969 +n03181293 +n03181667 +n03182140 +n03182232 +n03182912 +n03183080 +n03185868 +n03186199 +n03186285 +n03186818 +n03187037 +n03187153 +n03187268 +n03187595 +n03187751 +n03188290 +n03188531 +n03188725 +n03188871 +n03189083 +n03189311 +n03189818 +n03190458 +n03191286 +n03191451 +n03191561 +n03191776 +n03192543 +n03192907 +n03193107 +n03193260 +n03193423 +n03193597 +n03193754 +n03194170 +n03194297 +n03194812 +n03194992 +n03195332 +n03195485 +n03195799 +n03195959 +n03196062 +n03196217 +n03196324 +n03196598 +n03196990 +n03197201 +n03197337 +n03197446 +n03198223 +n03198500 +n03199358 +n03199488 +n03199647 +n03199775 +n03199901 +n03200231 +n03200357 +n03200539 +n03200701 +n03200906 +n03201035 +n03201208 +n03201529 +n03201638 +n03201776 +n03201895 +n03201996 +n03202354 +n03202481 +n03202760 +n03202940 +n03203089 +n03203806 +n03204134 +n03204306 +n03204436 +n03204558 +n03204955 +n03205143 +n03205304 +n03205458 +n03205574 +n03205669 +n03205903 +n03206023 +n03206158 +n03206282 +n03206405 +n03206602 +n03206718 +n03206908 +n03207305 +n03207548 +n03207630 +n03207743 +n03207835 +n03207941 +n03208556 +n03208938 +n03209359 +n03209477 +n03209666 +n03209910 +n03210245 +n03210372 +n03210552 +n03210683 +n03211117 +n03211413 +n03211616 +n03211789 +n03212114 +n03212247 +n03212406 +n03212811 +n03213014 +n03213361 +n03213538 +n03213715 +n03213826 +n03214253 +n03214450 +n03214582 +n03214966 +n03215076 +n03215191 +n03215337 +n03215508 +n03215749 +n03215930 +n03216199 +n03216402 +n03216562 +n03216710 +n03216828 +n03217653 +n03217739 +n03217889 +n03218198 +n03218446 +n03219010 +n03219135 +n03219483 +n03219612 +n03219859 +n03219966 +n03220095 +n03220237 +n03220513 +n03220692 +n03221059 +n03221351 +n03221540 +n03221720 +n03222176 +n03222318 +n03222516 +n03222722 +n03222857 +n03223162 +n03223299 +n03223441 +n03223553 +n03223686 +n03223923 +n03224490 +n03224603 +n03224753 +n03224893 +n03225108 +n03225458 +n03225616 +n03225777 +n03225988 +n03226090 +n03226254 +n03226375 +n03226538 +n03226880 +n03227010 +n03227184 +n03227317 +n03227721 +n03227856 +n03228016 +n03228254 +n03228365 +n03228533 +n03228692 +n03228796 +n03228967 +n03229115 +n03229244 +n03229526 +n03231160 +n03231368 +n03231819 +n03232309 +n03232417 +n03232543 +n03232815 +n03232923 +n03233123 +n03233624 +n03233744 +n03233905 +n03234164 +n03234952 +n03235042 +n03235180 +n03235327 +n03235796 +n03235979 +n03236093 +n03236217 +n03236423 +n03236580 +n03236735 +n03237212 +n03237340 +n03237416 +n03237639 +n03237839 +n03237992 +n03238131 +n03238286 +n03238586 +n03238762 +n03238879 +n03239054 +n03239259 +n03239607 +n03239726 +n03240140 +n03240683 +n03240892 +n03241093 +n03241335 +n03241496 +n03241903 +n03242120 +n03242264 +n03242390 +n03242506 +n03242995 +n03243218 +n03243625 +n03244047 +n03244231 +n03244388 +n03244775 +n03244919 +n03245271 +n03245421 +n03245724 +n03245889 +n03246197 +n03246312 +n03246454 +n03246653 +n03246933 +n03247083 +n03247351 +n03247495 +n03248835 +n03249342 +n03249569 +n03249956 +n03250089 +n03250279 +n03250405 +n03250588 +n03250847 +n03250952 +n03251100 +n03251280 +n03251533 +n03251766 +n03251932 +n03252231 +n03252324 +n03252422 +n03252637 +n03252787 +n03253071 +n03253187 +n03253279 +n03253714 +n03253796 +n03253886 +n03254046 +n03254189 +n03254374 +n03254625 +n03254737 +n03254862 +n03255030 +n03255167 +n03255322 +n03255488 +n03255899 +n03256032 +n03256166 +n03256472 +n03256631 +n03256788 +n03256928 +n03257065 +n03257210 +n03257586 +n03258192 +n03258330 +n03258456 +n03258577 +n03258905 +n03259009 +n03259280 +n03259401 +n03259505 +n03260206 +n03260504 +n03260733 +n03260849 +n03261019 +n03261263 +n03261395 +n03261603 +n03261776 +n03262072 +n03262248 +n03262519 +n03262717 +n03262809 +n03262932 +n03263076 +n03263338 +n03263640 +n03263758 +n03264906 +n03265032 +n03265754 +n03266195 +n03266371 +n03266620 +n03266749 +n03267113 +n03267468 +n03267696 +n03267821 +n03268142 +n03268311 +n03268645 +n03268790 +n03268918 +n03269073 +n03269203 +n03269401 +n03270165 +n03270695 +n03270854 +n03271030 +n03271260 +n03271376 +n03271574 +n03271765 +n03271865 +n03272010 +n03272125 +n03272239 +n03272383 +n03272562 +n03272810 +n03272940 +n03273061 +n03273551 +n03273740 +n03273913 +n03274265 +n03274435 +n03274561 +n03274796 +n03275125 +n03275311 +n03275566 +n03275681 +n03275864 +n03276179 +n03276696 +n03276839 +n03277004 +n03277149 +n03277459 +n03277602 +n03277771 +n03278248 +n03278914 +n03279153 +n03279364 +n03279508 +n03279804 +n03279918 +n03280216 +n03280394 +n03280644 +n03281145 +n03281524 +n03281673 +n03282060 +n03282295 +n03282401 +n03283221 +n03283413 +n03283827 +n03284308 +n03284482 +n03284743 +n03284886 +n03284981 +n03285578 +n03285730 +n03285912 +n03286572 +n03287351 +n03287733 +n03288003 +n03288500 +n03288643 +n03288742 +n03288886 +n03289660 +n03289985 +n03290096 +n03290195 +n03290653 +n03291413 +n03291551 +n03291741 +n03291819 +n03291963 +n03292085 +n03292362 +n03292475 +n03292603 +n03292736 +n03292960 +n03293095 +n03293741 +n03293863 +n03294048 +n03294604 +n03294833 +n03295012 +n03295140 +n03295246 +n03295928 +n03296081 +n03296217 +n03296328 +n03296478 +n03296963 +n03297103 +n03297226 +n03297495 +n03297644 +n03297735 +n03298089 +n03298352 +n03298716 +n03298858 +n03299406 +n03300216 +n03300443 +n03301175 +n03301291 +n03301389 +n03301568 +n03301833 +n03301940 +n03302671 +n03302790 +n03302938 +n03303217 +n03303669 +n03303831 +n03304197 +n03304323 +n03304465 +n03305300 +n03305522 +n03305953 +n03306385 +n03306869 +n03307037 +n03307573 +n03307792 +n03308152 +n03308481 +n03308614 +n03309110 +n03309356 +n03309465 +n03309687 +n03309808 +n03313333 +n03314227 +n03314378 +n03314608 +n03314780 +n03314884 +n03315644 +n03315805 +n03315990 +n03316105 +n03316406 +n03316873 +n03317233 +n03317510 +n03317673 +n03317788 +n03317889 +n03318136 +n03318294 +n03318865 +n03318983 +n03319167 +n03319457 +n03319576 +n03319745 +n03320046 +n03320262 +n03320421 +n03320519 +n03320845 +n03320959 +n03321103 +n03321419 +n03321563 +n03321843 +n03321954 +n03322570 +n03322704 +n03322836 +n03322940 +n03323096 +n03323211 +n03323319 +n03323703 +n03324629 +n03324814 +n03324928 +n03325088 +n03325288 +n03325403 +n03325584 +n03325691 +n03325941 +n03326073 +n03326371 +n03326475 +n03326660 +n03326795 +n03326948 +n03327133 +n03327234 +n03327553 +n03327691 +n03327841 +n03328201 +n03329302 +n03329536 +n03329663 +n03330002 +n03330665 +n03330792 +n03330947 +n03331077 +n03331244 +n03331599 +n03332005 +n03332173 +n03332271 +n03332393 +n03332591 +n03332784 +n03332989 +n03333129 +n03333252 +n03333349 +n03333610 +n03333711 +n03333851 +n03334017 +n03334291 +n03334382 +n03334492 +n03334912 +n03335030 +n03335333 +n03335461 +n03335846 +n03336168 +n03336282 +n03336575 +n03336742 +n03336839 +n03337140 +n03337383 +n03337494 +n03337822 +n03338287 +n03338821 +n03339296 +n03339529 +n03339643 +n03340009 +n03340723 +n03340923 +n03341035 +n03341153 +n03341297 +n03341606 +n03342015 +n03342127 +n03342262 +n03342432 +n03342657 +n03342863 +n03342961 +n03343047 +n03343234 +n03343354 +n03343560 +n03343737 +n03343853 +n03344305 +n03344393 +n03344509 +n03344642 +n03344784 +n03344935 +n03345487 +n03345837 +n03346135 +n03346289 +n03346455 +n03347037 +n03347472 +n03347617 +n03348142 +n03348868 +n03349020 +n03349296 +n03349367 +n03349469 +n03349599 +n03349771 +n03349892 +n03350204 +n03350352 +n03350456 +n03350602 +n03351151 +n03351262 +n03351434 +n03351979 +n03352232 +n03352366 +n03352628 +n03352961 +n03353281 +n03353951 +n03354207 +n03354903 +n03355468 +n03355768 +n03355925 +n03356038 +n03356279 +n03356446 +n03356559 +n03356858 +n03356982 +n03357081 +n03357267 +n03357716 +n03358172 +n03358380 +n03358726 +n03358841 +n03359137 +n03359285 +n03359436 +n03359566 +n03360133 +n03360300 +n03360431 +n03360622 +n03360731 +n03361109 +n03361297 +n03361380 +n03361550 +n03361683 +n03362639 +n03362771 +n03362890 +n03363363 +n03363549 +n03363749 +n03364008 +n03364156 +n03364599 +n03364937 +n03365231 +n03365374 +n03365592 +n03365991 +n03366464 +n03366721 +n03366823 +n03366974 +n03367059 +n03367321 +n03367410 +n03367545 +n03367875 +n03367969 +n03368048 +n03368352 +n03369276 +n03369407 +n03369512 +n03369866 +n03370387 +n03370646 +n03371875 +n03372029 +n03372549 +n03372822 +n03372933 +n03373237 +n03373611 +n03373943 +n03374102 +n03374282 +n03374372 +n03374473 +n03374570 +n03374649 +n03374838 +n03375171 +n03375329 +n03375575 +n03376159 +n03376279 +n03376595 +n03376771 +n03376938 +n03378005 +n03378174 +n03378342 +n03378442 +n03378593 +n03378765 +n03379051 +n03379204 +n03379343 +n03379719 +n03379828 +n03379989 +n03380301 +n03380647 +n03380724 +n03380867 +n03381126 +n03381231 +n03381450 +n03381565 +n03381776 +n03382104 +n03382292 +n03382413 +n03382533 +n03382708 +n03382856 +n03382969 +n03383099 +n03383211 +n03383378 +n03383468 +n03383562 +n03383821 +n03384167 +n03384352 +n03384891 +n03385295 +n03385557 +n03386011 +n03386343 +n03386544 +n03386726 +n03386870 +n03387323 +n03387653 +n03388043 +n03388183 +n03388323 +n03388549 +n03388711 +n03388990 +n03389611 +n03389761 +n03389889 +n03389983 +n03390075 +n03390327 +n03390673 +n03390786 +n03390983 +n03391301 +n03391613 +n03391770 +n03392648 +n03392741 +n03393017 +n03393199 +n03393324 +n03393761 +n03393912 +n03394149 +n03394272 +n03394480 +n03394649 +n03394916 +n03395256 +n03395401 +n03395514 +n03395859 +n03396074 +n03396580 +n03396654 +n03396997 +n03397087 +n03397266 +n03397412 +n03397532 +n03397947 +n03398153 +n03398228 +n03399579 +n03399677 +n03399761 +n03399971 +n03400231 +n03400972 +n03401129 +n03401279 +n03401721 +n03402188 +n03402369 +n03402511 +n03402785 +n03402941 +n03403643 +n03404012 +n03404149 +n03404251 +n03404360 +n03404449 +n03404900 +n03405111 +n03405265 +n03405595 +n03405725 +n03406759 +n03406966 +n03407369 +n03407865 +n03408054 +n03408264 +n03408340 +n03408444 +n03409297 +n03409393 +n03409591 +n03409920 +n03410022 +n03410147 +n03410303 +n03410423 +n03410571 +n03410740 +n03410938 +n03411079 +n03411208 +n03411339 +n03411927 +n03412058 +n03412220 +n03412387 +n03412511 +n03412906 +n03413124 +n03413264 +n03413428 +n03413684 +n03413828 +n03414029 +n03414162 +n03414676 +n03415252 +n03415486 +n03415626 +n03415749 +n03415868 +n03416094 +n03416489 +n03416640 +n03416775 +n03416900 +n03417042 +n03417202 +n03417345 +n03417749 +n03417970 +n03418158 +n03418242 +n03418402 +n03418618 +n03418749 +n03418915 +n03419014 +n03420345 +n03420801 +n03420935 +n03421117 +n03421324 +n03421485 +n03421669 +n03421768 +n03421960 +n03422072 +n03422484 +n03422589 +n03422771 +n03423099 +n03423224 +n03423306 +n03423479 +n03423568 +n03423719 +n03423877 +n03424204 +n03424325 +n03424489 +n03424630 +n03424862 +n03425241 +n03425325 +n03425413 +n03425595 +n03425769 +n03426134 +n03426285 +n03426462 +n03426574 +n03426871 +n03427202 +n03427296 +n03428090 +n03428226 +n03428349 +n03429003 +n03429137 +n03429288 +n03429682 +n03429771 +n03429914 +n03430091 +n03430313 +n03430418 +n03430551 +n03430959 +n03431243 +n03431570 +n03431745 +n03432061 +n03432129 +n03432360 +n03432509 +n03433247 +n03433637 +n03433877 +n03434188 +n03434285 +n03434830 +n03435593 +n03435743 +n03435991 +n03436075 +n03436182 +n03436417 +n03436549 +n03436656 +n03436772 +n03436891 +n03436990 +n03437184 +n03437295 +n03437430 +n03437581 +n03437741 +n03437829 +n03437941 +n03438071 +n03438257 +n03438661 +n03438780 +n03438863 +n03439348 +n03439631 +n03439814 +n03440216 +n03440682 +n03440876 +n03441112 +n03441345 +n03441465 +n03441582 +n03442288 +n03442487 +n03442597 +n03442756 +n03443005 +n03443149 +n03443371 +n03443543 +n03443912 +n03444034 +n03445326 +n03445617 +n03445777 +n03445924 +n03446070 +n03446268 +n03446832 +n03447075 +n03447358 +n03447447 +n03447721 +n03447894 +n03448031 +n03448590 +n03448696 +n03448956 +n03449217 +n03449309 +n03449451 +n03449564 +n03449858 +n03450230 +n03450516 +n03450734 +n03450881 +n03450974 +n03451120 +n03451253 +n03451365 +n03451711 +n03451798 +n03452267 +n03452449 +n03452594 +n03452741 +n03453231 +n03453320 +n03453443 +n03454110 +n03454211 +n03454442 +n03454536 +n03454707 +n03454885 +n03455355 +n03455488 +n03455642 +n03455802 +n03456024 +n03456186 +n03456299 +n03456447 +n03456548 +n03456665 +n03457008 +n03457451 +n03457686 +n03457902 +n03458271 +n03458422 +n03459328 +n03459591 +n03459775 +n03459914 +n03460040 +n03460147 +n03460297 +n03460455 +n03460899 +n03461288 +n03461385 +n03461651 +n03461882 +n03461988 +n03462110 +n03462315 +n03462747 +n03462972 +n03463185 +n03463381 +n03463666 +n03464053 +n03464467 +n03464628 +n03464952 +n03465040 +n03465151 +n03465320 +n03465426 +n03465500 +n03465605 +n03465718 +n03465818 +n03466162 +n03466493 +n03466600 +n03466839 +n03466947 +n03467068 +n03467254 +n03467380 +n03467517 +n03467796 +n03467887 +n03467984 +n03468570 +n03468696 +n03468821 +n03469031 +n03469175 +n03469493 +n03469832 +n03469903 +n03470005 +n03470222 +n03470387 +n03470629 +n03470948 +n03471030 +n03471190 +n03471347 +n03471779 +n03472232 +n03472535 +n03472672 +n03472796 +n03472937 +n03473078 +n03473227 +n03473465 +n03473817 +n03473966 +n03474167 +n03474352 +n03474779 +n03474896 +n03475581 +n03475674 +n03475823 +n03475961 +n03476083 +n03476313 +n03476542 +n03476684 +n03476991 +n03477143 +n03477303 +n03477410 +n03477512 +n03477773 +n03477902 +n03478589 +n03478756 +n03478907 +n03479121 +n03479266 +n03479397 +n03479502 +n03480579 +n03480719 +n03480973 +n03481172 +n03481521 +n03482001 +n03482128 +n03482252 +n03482405 +n03482523 +n03482877 +n03483086 +n03483230 +n03483316 +n03483531 +n03483637 +n03483823 +n03483971 +n03484083 +n03484487 +n03484576 +n03484809 +n03484931 +n03485198 +n03485309 +n03485407 +n03485575 +n03485794 +n03487090 +n03487331 +n03487444 +n03487533 +n03487642 +n03487774 +n03487886 +n03488111 +n03488188 +n03488438 +n03488603 +n03488784 +n03488887 +n03489048 +n03489162 +n03490006 +n03490119 +n03490324 +n03490449 +n03490649 +n03490784 +n03490884 +n03491032 +n03491724 +n03491988 +n03492087 +n03492250 +n03492542 +n03492922 +n03493219 +n03493792 +n03493911 +n03494278 +n03494537 +n03494706 +n03495039 +n03495258 +n03495570 +n03495671 +n03495941 +n03496183 +n03496296 +n03496486 +n03496612 +n03496892 +n03497100 +n03497352 +n03497657 +n03498441 +n03498536 +n03498662 +n03498781 +n03498866 +n03498962 +n03499354 +n03499468 +n03499907 +n03500090 +n03500209 +n03500295 +n03500389 +n03500457 +n03500557 +n03500699 +n03500838 +n03500971 +n03501152 +n03501288 +n03501520 +n03501614 +n03502200 +n03502331 +n03502509 +n03502777 +n03502897 +n03503097 +n03503233 +n03503358 +n03503477 +n03503567 +n03503718 +n03503997 +n03504205 +n03504293 +n03504723 +n03505015 +n03505133 +n03505383 +n03505504 +n03505667 +n03505764 +n03506028 +n03506184 +n03506370 +n03506560 +n03506727 +n03506880 +n03507241 +n03507458 +n03507658 +n03507963 +n03508101 +n03508485 +n03508881 +n03509394 +n03509608 +n03509843 +n03510072 +n03510244 +n03510384 +n03510487 +n03510583 +n03510866 +n03510987 +n03511175 +n03511333 +n03512030 +n03512147 +n03512452 +n03512624 +n03512911 +n03513137 +n03513376 +n03514129 +n03514340 +n03514451 +n03514693 +n03514894 +n03515338 +n03515934 +n03516266 +n03516367 +n03516647 +n03516844 +n03516996 +n03517509 +n03517647 +n03517760 +n03517899 +n03517982 +n03518135 +n03518230 +n03518305 +n03518445 +n03518631 +n03518829 +n03518943 +n03519081 +n03519226 +n03519387 +n03519674 +n03519848 +n03520493 +n03521076 +n03521431 +n03521544 +n03521675 +n03521771 +n03521899 +n03522003 +n03522100 +n03522634 +n03522863 +n03522990 +n03523134 +n03523398 +n03523506 +n03523987 +n03524150 +n03524287 +n03524425 +n03524574 +n03524745 +n03524976 +n03525074 +n03525252 +n03525454 +n03525693 +n03525827 +n03526062 +n03527149 +n03527444 +n03527565 +n03527675 +n03528100 +n03528263 +n03528523 +n03528901 +n03529175 +n03529444 +n03529629 +n03529860 +n03530189 +n03530511 +n03530642 +n03530910 +n03531281 +n03531447 +n03531546 +n03531691 +n03531982 +n03532342 +n03532672 +n03532919 +n03533014 +n03533392 +n03533486 +n03533654 +n03533845 +n03534580 +n03534695 +n03534776 +n03535024 +n03535284 +n03535647 +n03535780 +n03536122 +n03536568 +n03536761 +n03537085 +n03537241 +n03537412 +n03537550 +n03538037 +n03538179 +n03538300 +n03538406 +n03538542 +n03538634 +n03538817 +n03538957 +n03539103 +n03539293 +n03539433 +n03539546 +n03539678 +n03539754 +n03540090 +n03540267 +n03540476 +n03540595 +n03540914 +n03541091 +n03541269 +n03541393 +n03541537 +n03541696 +n03541923 +n03542333 +n03542605 +n03542727 +n03542860 +n03543012 +n03543112 +n03543254 +n03543394 +n03543511 +n03543603 +n03543735 +n03543945 +n03544143 +n03544238 +n03544360 +n03545150 +n03545470 +n03545585 +n03545756 +n03545961 +n03546112 +n03546235 +n03546340 +n03547054 +n03547229 +n03547397 +n03547530 +n03547861 +n03548086 +n03548195 +n03548320 +n03548402 +n03548533 +n03548626 +n03548930 +n03549199 +n03549350 +n03549473 +n03549589 +n03549732 +n03549897 +n03550153 +n03550289 +n03550420 +n03551084 +n03551395 +n03551582 +n03551790 +n03552001 +n03552449 +n03552749 +n03553019 +n03553248 +n03553486 +n03554375 +n03554460 +n03554645 +n03555006 +n03555217 +n03555426 +n03555564 +n03555662 +n03555862 +n03555996 +n03556173 +n03556679 +n03556811 +n03556992 +n03557270 +n03557360 +n03557590 +n03557692 +n03557840 +n03558007 +n03558176 +n03558404 +n03558633 +n03558739 +n03559373 +n03559531 +n03559999 +n03560430 +n03560860 +n03561047 +n03561169 +n03561573 +n03562565 +n03563200 +n03563460 +n03563710 +n03563967 +n03564849 +n03565288 +n03565565 +n03565710 +n03565830 +n03565991 +n03566193 +n03566329 +n03566555 +n03566730 +n03566860 +n03567066 +n03567635 +n03567788 +n03567912 +n03568117 +n03568818 +n03569014 +n03569174 +n03569293 +n03569494 +n03571280 +n03571439 +n03571625 +n03571853 +n03571942 +n03572107 +n03572205 +n03572321 +n03572631 +n03573574 +n03573848 +n03574243 +n03574416 +n03574555 +n03574816 +n03575958 +n03576215 +n03576443 +n03576955 +n03577090 +n03577312 +n03577474 +n03577672 +n03577818 +n03578055 +n03578251 +n03578656 +n03578981 +n03579538 +n03579982 +n03580518 +n03580615 +n03580845 +n03580990 +n03581125 +n03581531 +n03581897 +n03582508 +n03582959 +n03583419 +n03583621 +n03584254 +n03584400 +n03584829 +n03585073 +n03585337 +n03585438 +n03585551 +n03585682 +n03585778 +n03585875 +n03586219 +n03586631 +n03586911 +n03587205 +n03588216 +n03588841 +n03588951 +n03589313 +n03589513 +n03589672 +n03589791 +n03590306 +n03590475 +n03590588 +n03590841 +n03590932 +n03591116 +n03591313 +n03591592 +n03591798 +n03591901 +n03592245 +n03592669 +n03592773 +n03592931 +n03593122 +n03593222 +n03593526 +n03593862 +n03594010 +n03594148 +n03594277 +n03594523 +n03594734 +n03594945 +n03595055 +n03595264 +n03595409 +n03595523 +n03595614 +n03595860 +n03596099 +n03596285 +n03596543 +n03597147 +n03597317 +n03597916 +n03598151 +n03598299 +n03598385 +n03598515 +n03598646 +n03598783 +n03598930 +n03599486 +n03599964 +n03600285 +n03600475 +n03600722 +n03600977 +n03601442 +n03601638 +n03601840 +n03602081 +n03602194 +n03602365 +n03602686 +n03602790 +n03602883 +n03603442 +n03603594 +n03603722 +n03604156 +n03604311 +n03604400 +n03604536 +n03604629 +n03604763 +n03604843 +n03605417 +n03605504 +n03605598 +n03605722 +n03605915 +n03606106 +n03606251 +n03606347 +n03606465 +n03607029 +n03607186 +n03607527 +n03607659 +n03607923 +n03608504 +n03609147 +n03609235 +n03609397 +n03609542 +n03609786 +n03609959 +n03610098 +n03610418 +n03610524 +n03610682 +n03610836 +n03610992 +n03612010 +n03612814 +n03612965 +n03613294 +n03613592 +n03614007 +n03614383 +n03614532 +n03614782 +n03614887 +n03615300 +n03615406 +n03615563 +n03615655 +n03615790 +n03616091 +n03616225 +n03616428 +n03616763 +n03616979 +n03617095 +n03617312 +n03617480 +n03617594 +n03617834 +n03618101 +n03618339 +n03618546 +n03618678 +n03618797 +n03618982 +n03619050 +n03619196 +n03619275 +n03619396 +n03619650 +n03619793 +n03619890 +n03620052 +n03620353 +n03620967 +n03621049 +n03621377 +n03621694 +n03622058 +n03622401 +n03622526 +n03622839 +n03622931 +n03623198 +n03623338 +n03623556 +n03624134 +n03624400 +n03624767 +n03625355 +n03625539 +n03625646 +n03625943 +n03626115 +n03626272 +n03626418 +n03626502 +n03626760 +n03627232 +n03627954 +n03628071 +n03628215 +n03628421 +n03628511 +n03628728 +n03628831 +n03628984 +n03629100 +n03629231 +n03629520 +n03629643 +n03630262 +n03630383 +n03631177 +n03631811 +n03631922 +n03632100 +n03632577 +n03632729 +n03632852 +n03632963 +n03633091 +n03633341 +n03633632 +n03633886 +n03634034 +n03634899 +n03635032 +n03635108 +n03635330 +n03635516 +n03635668 +n03635932 +n03636248 +n03636649 +n03637027 +n03637181 +n03637318 +n03637480 +n03637787 +n03637898 +n03638014 +n03638180 +n03638623 +n03638743 +n03638883 +n03639077 +n03639230 +n03639497 +n03639675 +n03639880 +n03640850 +n03640988 +n03641569 +n03641947 +n03642144 +n03642341 +n03642444 +n03642573 +n03642806 +n03643149 +n03643253 +n03643491 +n03643737 +n03643907 +n03644073 +n03644378 +n03644858 +n03645011 +n03645168 +n03645290 +n03645577 +n03646020 +n03646148 +n03646296 +n03646809 +n03646916 +n03647423 +n03647520 +n03648219 +n03648431 +n03648667 +n03649003 +n03649161 +n03649288 +n03649674 +n03649797 +n03649909 +n03650551 +n03651388 +n03651605 +n03651843 +n03652100 +n03652389 +n03652729 +n03652826 +n03652932 +n03653110 +n03653220 +n03653454 +n03653583 +n03653740 +n03653833 +n03653975 +n03654576 +n03654826 +n03655072 +n03655470 +n03655720 +n03656484 +n03656957 +n03657121 +n03657239 +n03657511 +n03658102 +n03658185 +n03658635 +n03658858 +n03659292 +n03659686 +n03659809 +n03659950 +n03660124 +n03660562 +n03660909 +n03661043 +n03661340 +n03662301 +n03662452 +n03662601 +n03662719 +n03662887 +n03663433 +n03663531 +n03663910 +n03664159 +n03664675 +n03664840 +n03664943 +n03665232 +n03665366 +n03665851 +n03665924 +n03666238 +n03666362 +n03666591 +n03666917 +n03667060 +n03667235 +n03667552 +n03667664 +n03667829 +n03668067 +n03668279 +n03668488 +n03668803 +n03669245 +n03669534 +n03669886 +n03670208 +n03671914 +n03672521 +n03672827 +n03673027 +n03673270 +n03673450 +n03673767 +n03674270 +n03674440 +n03674731 +n03674842 +n03675076 +n03675235 +n03675445 +n03675558 +n03675907 +n03676087 +n03676483 +n03676623 +n03676759 +n03677115 +n03677682 +n03677766 +n03678558 +n03678729 +n03678879 +n03679384 +n03679712 +n03680248 +n03680355 +n03680512 +n03680734 +n03680858 +n03680942 +n03681477 +n03681813 +n03682380 +n03682487 +n03682877 +n03683079 +n03683341 +n03683457 +n03683606 +n03683708 +n03683995 +n03684143 +n03684224 +n03684489 +n03684611 +n03684740 +n03684823 +n03685307 +n03685486 +n03685640 +n03685820 +n03686130 +n03686363 +n03686470 +n03686924 +n03687137 +n03687928 +n03688066 +n03688192 +n03688405 +n03688504 +n03688605 +n03688707 +n03688832 +n03688943 +n03689157 +n03689570 +n03690168 +n03690279 +n03690473 +n03690851 +n03690938 +n03691459 +n03691817 +n03692004 +n03692136 +n03692272 +n03692379 +n03692522 +n03692842 +n03693293 +n03693474 +n03693707 +n03693860 +n03694196 +n03694356 +n03694639 +n03694761 +n03694949 +n03695122 +n03695452 +n03695616 +n03695753 +n03695857 +n03695957 +n03696065 +n03696301 +n03696445 +n03696568 +n03696746 +n03696909 +n03697007 +n03697366 +n03697552 +n03697812 +n03697913 +n03698123 +n03698226 +n03698360 +n03698604 +n03698723 +n03698815 +n03699280 +n03699591 +n03699754 +n03699975 +n03700963 +n03701191 +n03701391 +n03701640 +n03701790 +n03702248 +n03702440 +n03702582 +n03703075 +n03703203 +n03703463 +n03703590 +n03703730 +n03703862 +n03703945 +n03704549 +n03704834 +n03705379 +n03705808 +n03706229 +n03706415 +n03706653 +n03706939 +n03707171 +n03707372 +n03707597 +n03707766 +n03708036 +n03708425 +n03708843 +n03708962 +n03709206 +n03709363 +n03709545 +n03709644 +n03709823 +n03709960 +n03710079 +n03710193 +n03710294 +n03710421 +n03710528 +n03710637 +n03710721 +n03710937 +n03711044 +n03711711 +n03711999 +n03712111 +n03712337 +n03712444 +n03712887 +n03712981 +n03713069 +n03713151 +n03713436 +n03714235 +n03715114 +n03715275 +n03715386 +n03715669 +n03715892 +n03716228 +n03716887 +n03716966 +n03717131 +n03717285 +n03717447 +n03717622 +n03718212 +n03718335 +n03718458 +n03718581 +n03718699 +n03718789 +n03718935 +n03719053 +n03719343 +n03719560 +n03719743 +n03720005 +n03720163 +n03720665 +n03720891 +n03721047 +n03721252 +n03721384 +n03721590 +n03722007 +n03722288 +n03722646 +n03722944 +n03723153 +n03723267 +n03723439 +n03723781 +n03723885 +n03724066 +n03724176 +n03724417 +n03724538 +n03724623 +n03724756 +n03724870 +n03725035 +n03725506 +n03725600 +n03725717 +n03725869 +n03726116 +n03726233 +n03726371 +n03726516 +n03726760 +n03726993 +n03727067 +n03727465 +n03727605 +n03727837 +n03727946 +n03728437 +n03728982 +n03729131 +n03729308 +n03729402 +n03729482 +n03729647 +n03729826 +n03729951 +n03730153 +n03730334 +n03730494 +n03730655 +n03730788 +n03730893 +n03731019 +n03731483 +n03731695 +n03731882 +n03732020 +n03732114 +n03732458 +n03732543 +n03732658 +n03733131 +n03733281 +n03733465 +n03733547 +n03733644 +n03733805 +n03733925 +n03735637 +n03735963 +n03736064 +n03736147 +n03736269 +n03736372 +n03736470 +n03736970 +n03738066 +n03738241 +n03738472 +n03739518 +n03739693 +n03742019 +n03742115 +n03742238 +n03743016 +n03743279 +n03743902 +n03744276 +n03744684 +n03744840 +n03745146 +n03745487 +n03745571 +n03746005 +n03746155 +n03746330 +n03746486 +n03748162 +n03749504 +n03749634 +n03749807 +n03750206 +n03750437 +n03750614 +n03751065 +n03751269 +n03751458 +n03751590 +n03751757 +n03752071 +n03752185 +n03752398 +n03752922 +n03753077 +n03753514 +n03757604 +n03758089 +n03758220 +n03758894 +n03758992 +n03759243 +n03759432 +n03759661 +n03759954 +n03760310 +n03760671 +n03760944 +n03761084 +n03761588 +n03761731 +n03762238 +n03762332 +n03762434 +n03762602 +n03762982 +n03763727 +n03763968 +n03764276 +n03764606 +n03764736 +n03764822 +n03764995 +n03765128 +n03765467 +n03765561 +n03765934 +n03766044 +n03766218 +n03766322 +n03766508 +n03766600 +n03766697 +n03766935 +n03767112 +n03767203 +n03767459 +n03767745 +n03767966 +n03768132 +n03768683 +n03768823 +n03768916 +n03769610 +n03769722 +n03769881 +n03770085 +n03770224 +n03770316 +n03770439 +n03770520 +n03770679 +n03770834 +n03770954 +n03772077 +n03772269 +n03772584 +n03772674 +n03773035 +n03773504 +n03773835 +n03774327 +n03774461 +n03775071 +n03775199 +n03775388 +n03775546 +n03775636 +n03775747 +n03775847 +n03776167 +n03776460 +n03776877 +n03776997 +n03777126 +n03777568 +n03777754 +n03778459 +n03778817 +n03779000 +n03779128 +n03779246 +n03779370 +n03779884 +n03780047 +n03780799 +n03781055 +n03781244 +n03781467 +n03781594 +n03781683 +n03781787 +n03782006 +n03782190 +n03782794 +n03782929 +n03783304 +n03783430 +n03783575 +n03783873 +n03784139 +n03784270 +n03784793 +n03784896 +n03785016 +n03785142 +n03785237 +n03785499 +n03785721 +n03786096 +n03786194 +n03786313 +n03786621 +n03786715 +n03786901 +n03787032 +n03787523 +n03788047 +n03788195 +n03788365 +n03788498 +n03788601 +n03788914 +n03789171 +n03789400 +n03789603 +n03789794 +n03789946 +n03790230 +n03790512 +n03790755 +n03790953 +n03791053 +n03791235 +n03792048 +n03792334 +n03792526 +n03792782 +n03792972 +n03793489 +n03793850 +n03794056 +n03794136 +n03794798 +n03795123 +n03795269 +n03795758 +n03795976 +n03796181 +n03796401 +n03796522 +n03796605 +n03796848 +n03796974 +n03797062 +n03797182 +n03797264 +n03797390 +n03797896 +n03798061 +n03798442 +n03798610 +n03798982 +n03799113 +n03799240 +n03799375 +n03799610 +n03799876 +n03800371 +n03800485 +n03800563 +n03800772 +n03800933 +n03801353 +n03801533 +n03801671 +n03801760 +n03801880 +n03802007 +n03802228 +n03802393 +n03802643 +n03802800 +n03802973 +n03803116 +n03803284 +n03803780 +n03804211 +n03804744 +n03805180 +n03805280 +n03805374 +n03805503 +n03805725 +n03805933 +n03807334 +n03809211 +n03809312 +n03809603 +n03809686 +n03809802 +n03810412 +n03810952 +n03811295 +n03811444 +n03811847 +n03811965 +n03812263 +n03812382 +n03812789 +n03812924 +n03813078 +n03813176 +n03813946 +n03814528 +n03814639 +n03814727 +n03814817 +n03814906 +n03815149 +n03815278 +n03815482 +n03815615 +n03816005 +n03816136 +n03816394 +n03816530 +n03816849 +n03817191 +n03817331 +n03817522 +n03817647 +n03818001 +n03818343 +n03819047 +n03819336 +n03819448 +n03819595 +n03819994 +n03820154 +n03820318 +n03820728 +n03820950 +n03821145 +n03821424 +n03821518 +n03822171 +n03822361 +n03822504 +n03822656 +n03822767 +n03823111 +n03823216 +n03823312 +n03823673 +n03823906 +n03824197 +n03824284 +n03824381 +n03824589 +n03824713 +n03824999 +n03825080 +n03825271 +n03825442 +n03825673 +n03825788 +n03825913 +n03826039 +n03826186 +n03827420 +n03827536 +n03828020 +n03829340 +n03829857 +n03829954 +n03831203 +n03831382 +n03831757 +n03832144 +n03832673 +n03833907 +n03834040 +n03834472 +n03834604 +n03835197 +n03835729 +n03835941 +n03836062 +n03836451 +n03836602 +n03836906 +n03836976 +n03837422 +n03837606 +n03837698 +n03837869 +n03838024 +n03838298 +n03838748 +n03838899 +n03839172 +n03839276 +n03839424 +n03839671 +n03839795 +n03840327 +n03840681 +n03840823 +n03841011 +n03841143 +n03841290 +n03841666 +n03842012 +n03842156 +n03842276 +n03842377 +n03842585 +n03842754 +n03842986 +n03843092 +n03843316 +n03843438 +n03843555 +n03843883 +n03844045 +n03844233 +n03844550 +n03844673 +n03844815 +n03844965 +n03845107 +n03845190 +n03845990 +n03846100 +n03846234 +n03846431 +n03846677 +n03846772 +n03846970 +n03847471 +n03847823 +n03848033 +n03848168 +n03848348 +n03848537 +n03849275 +n03849412 +n03849679 +n03849814 +n03849943 +n03850053 +n03850245 +n03850492 +n03850613 +n03851341 +n03851787 +n03852280 +n03852544 +n03852688 +n03853291 +n03853924 +n03854065 +n03854421 +n03854506 +n03854722 +n03854815 +n03855214 +n03855333 +n03855464 +n03855604 +n03855756 +n03855908 +n03856012 +n03856335 +n03856465 +n03856728 +n03857026 +n03857156 +n03857291 +n03857687 +n03857828 +n03858085 +n03858183 +n03858418 +n03858533 +n03858837 +n03859000 +n03859170 +n03859280 +n03859495 +n03859608 +n03859958 +n03860234 +n03860404 +n03861048 +n03861271 +n03861430 +n03861596 +n03861842 +n03862379 +n03862676 +n03862862 +n03863108 +n03863262 +n03863657 +n03863783 +n03863923 +n03864139 +n03864356 +n03864692 +n03865288 +n03865371 +n03865557 +n03865820 +n03865949 +n03866082 +n03867854 +n03868044 +n03868242 +n03868324 +n03868406 +n03868643 +n03868763 +n03868863 +n03869838 +n03869976 +n03870105 +n03870290 +n03870546 +n03870672 +n03870980 +n03871083 +n03871371 +n03871524 +n03871628 +n03871724 +n03871860 +n03872016 +n03872167 +n03872273 +n03873416 +n03873699 +n03873848 +n03873996 +n03874138 +n03874293 +n03874487 +n03874599 +n03874823 +n03875218 +n03875806 +n03875955 +n03876111 +n03876231 +n03877351 +n03877472 +n03877674 +n03877845 +n03878066 +n03878211 +n03878294 +n03878418 +n03878511 +n03878674 +n03878828 +n03878963 +n03879456 +n03879705 +n03880032 +n03880129 +n03880323 +n03880531 +n03881305 +n03881404 +n03881534 +n03882611 +n03882960 +n03883054 +n03883385 +n03883524 +n03883664 +n03883773 +n03883944 +n03884397 +n03884554 +n03884639 +n03884778 +n03884926 +n03885028 +n03885194 +n03885293 +n03885410 +n03885535 +n03885669 +n03885788 +n03885904 +n03886053 +n03886641 +n03886762 +n03886940 +n03887185 +n03887330 +n03887512 +n03887697 +n03887899 +n03888022 +n03888257 +n03888605 +n03888808 +n03888998 +n03889397 +n03889503 +n03889626 +n03889726 +n03889871 +n03890093 +n03890233 +n03890358 +n03890514 +n03891051 +n03891251 +n03891332 +n03891538 +n03892178 +n03892425 +n03892557 +n03892728 +n03893935 +n03894051 +n03894379 +n03894677 +n03894933 +n03895038 +n03895170 +n03895866 +n03896103 +n03896233 +n03896419 +n03896526 +n03896628 +n03896984 +n03897130 +n03897634 +n03897943 +n03898129 +n03898271 +n03898395 +n03898633 +n03898787 +n03899100 +n03899612 +n03899768 +n03899933 +n03900028 +n03900194 +n03900301 +n03900393 +n03900979 +n03901229 +n03901338 +n03901750 +n03901974 +n03902125 +n03902220 +n03902482 +n03902756 +n03903133 +n03903290 +n03903424 +n03903733 +n03903868 +n03904060 +n03904183 +n03904433 +n03904657 +n03904782 +n03904909 +n03905361 +n03905540 +n03905730 +n03905947 +n03906106 +n03906224 +n03906463 +n03906590 +n03906789 +n03906894 +n03906997 +n03907475 +n03907654 +n03907908 +n03908111 +n03908204 +n03908456 +n03908618 +n03908714 +n03909020 +n03909160 +n03909406 +n03909516 +n03909658 +n03911406 +n03911513 +n03911658 +n03911767 +n03911866 +n03912218 +n03912821 +n03913343 +n03913930 +n03914106 +n03914337 +n03914438 +n03914583 +n03914831 +n03915118 +n03915320 +n03915437 +n03915900 +n03916031 +n03916289 +n03916385 +n03916470 +n03916720 +n03917048 +n03917198 +n03917327 +n03917814 +n03918074 +n03918480 +n03918737 +n03919096 +n03919289 +n03919430 +n03919808 +n03920288 +n03920384 +n03920641 +n03920737 +n03920867 +n03923379 +n03923564 +n03923692 +n03923918 +n03924069 +n03924407 +n03924532 +n03924679 +n03926148 +n03926412 +n03926876 +n03927091 +n03927299 +n03927539 +n03927792 +n03928116 +n03928589 +n03928814 +n03928994 +n03929091 +n03929202 +n03929443 +n03929660 +n03929855 +n03930229 +n03930313 +n03930431 +n03930515 +n03930630 +n03931044 +n03931765 +n03931885 +n03931980 +n03932080 +n03932670 +n03933391 +n03933933 +n03934042 +n03934229 +n03934311 +n03934565 +n03934656 +n03934890 +n03935116 +n03935234 +n03935335 +n03935883 +n03936269 +n03936466 +n03937543 +n03937835 +n03937931 +n03938037 +n03938244 +n03938401 +n03938522 +n03938725 +n03939062 +n03939178 +n03939281 +n03939440 +n03939565 +n03939677 +n03939844 +n03940256 +n03940894 +n03941013 +n03941231 +n03941417 +n03941586 +n03941684 +n03941887 +n03942028 +n03942600 +n03942813 +n03942920 +n03943115 +n03943266 +n03943623 +n03943714 +n03943833 +n03943920 +n03944024 +n03944138 +n03944341 +n03945459 +n03945615 +n03945817 +n03945928 +n03946076 +n03946162 +n03947111 +n03947343 +n03947466 +n03947798 +n03947888 +n03948242 +n03948459 +n03948830 +n03948950 +n03949145 +n03949317 +n03949761 +n03950228 +n03950359 +n03950537 +n03950647 +n03950899 +n03951068 +n03951213 +n03951453 +n03951800 +n03951971 +n03952150 +n03952576 +n03953020 +n03953416 +n03953901 +n03954393 +n03954731 +n03955296 +n03955489 +n03955809 +n03955941 +n03956157 +n03956331 +n03956531 +n03956623 +n03956785 +n03956922 +n03957315 +n03957420 +n03957762 +n03957991 +n03958227 +n03958338 +n03958630 +n03958752 +n03959014 +n03959123 +n03959227 +n03959701 +n03960374 +n03960490 +n03961394 +n03961630 +n03961711 +n03961828 +n03961939 +n03962525 +n03962685 +n03962852 +n03962932 +n03963028 +n03963198 +n03963294 +n03963483 +n03963645 +n03964495 +n03964611 +n03965456 +n03965907 +n03966206 +n03966325 +n03966582 +n03966751 +n03966976 +n03967270 +n03967396 +n03967562 +n03967942 +n03968293 +n03968479 +n03968581 +n03968728 +n03969510 +n03970156 +n03970363 +n03970546 +n03971218 +n03971321 +n03971960 +n03972146 +n03972372 +n03972524 +n03973003 +n03973285 +n03973402 +n03973520 +n03973628 +n03973839 +n03973945 +n03974070 +n03974915 +n03975035 +n03975657 +n03975788 +n03975926 +n03976105 +n03976268 +n03976467 +n03976657 +n03977158 +n03977266 +n03977430 +n03977592 +n03977966 +n03978421 +n03978575 +n03978686 +n03978815 +n03978966 +n03979377 +n03979492 +n03980026 +n03980478 +n03980874 +n03980986 +n03981094 +n03981340 +n03981566 +n03981760 +n03981924 +n03982232 +n03982331 +n03982430 +n03982642 +n03982767 +n03982895 +n03983396 +n03983499 +n03983612 +n03983712 +n03983928 +n03984125 +n03984234 +n03984381 +n03984643 +n03984759 +n03985069 +n03985232 +n03985441 +n03985881 +n03986071 +n03986224 +n03986355 +n03986562 +n03986704 +n03986857 +n03986949 +n03987266 +n03987376 +n03987674 +n03987865 +n03987990 +n03988170 +n03988758 +n03988926 +n03989199 +n03989349 +n03989447 +n03989665 +n03989777 +n03989898 +n03990474 +n03991062 +n03991202 +n03991321 +n03991443 +n03991646 +n03991837 +n03992325 +n03992436 +n03992509 +n03992703 +n03992975 +n03993053 +n03993180 +n03993403 +n03993703 +n03993878 +n03994008 +n03994297 +n03994417 +n03994614 +n03994757 +n03995018 +n03995265 +n03995372 +n03995535 +n03995661 +n03995856 +n03996004 +n03996145 +n03996416 +n03996849 +n03997274 +n03997484 +n03997875 +n03998194 +n03998333 +n03998673 +n03999064 +n03999160 +n03999621 +n03999992 +n04000311 +n04000480 +n04000592 +n04000716 +n04000998 +n04001132 +n04001265 +n04001397 +n04001499 +n04001661 +n04001845 +n04002262 +n04002371 +n04002629 +n04003241 +n04003359 +n04003856 +n04004099 +n04004210 +n04004475 +n04004767 +n04004990 +n04005197 +n04005630 +n04005912 +n04006067 +n04006227 +n04006330 +n04006411 +n04007415 +n04007664 +n04008385 +n04008634 +n04009552 +n04009801 +n04009923 +n04010057 +n04010779 +n04010927 +n04011827 +n04012084 +n04012482 +n04012665 +n04013060 +n04013176 +n04013600 +n04013729 +n04014297 +n04015204 +n04015786 +n04015908 +n04016240 +n04016479 +n04016576 +n04016684 +n04016846 +n04017571 +n04017807 +n04018155 +n04018399 +n04018667 +n04019101 +n04019335 +n04019541 +n04019696 +n04019881 +n04020087 +n04020298 +n04020744 +n04020912 +n04021028 +n04021164 +n04021362 +n04021503 +n04021704 +n04021798 +n04022332 +n04022434 +n04022708 +n04022866 +n04023021 +n04023119 +n04023249 +n04023422 +n04023695 +n04023962 +n04024137 +n04024274 +n04024862 +n04024983 +n04025508 +n04025633 +n04026053 +n04026180 +n04026417 +n04026813 +n04026918 +n04027023 +n04027367 +n04027706 +n04027820 +n04027935 +n04028074 +n04028221 +n04028315 +n04028581 +n04028764 +n04029416 +n04029647 +n04029734 +n04029913 +n04030054 +n04030161 +n04030274 +n04030414 +n04030518 +n04030846 +n04030965 +n04031884 +n04032509 +n04032603 +n04032936 +n04033287 +n04033425 +n04033557 +n04033801 +n04033901 +n04033995 +n04034262 +n04034367 +n04035231 +n04035634 +n04035748 +n04035836 +n04035912 +n04036155 +n04036303 +n04036776 +n04036963 +n04037076 +n04037220 +n04037298 +n04037443 +n04037873 +n04037964 +n04038231 +n04038338 +n04038440 +n04038727 +n04039041 +n04039209 +n04039381 +n04039742 +n04039848 +n04040247 +n04040373 +n04040540 +n04040759 +n04041069 +n04041243 +n04041408 +n04041544 +n04041747 +n04042076 +n04042204 +n04042358 +n04042632 +n04042795 +n04042985 +n04043168 +n04043411 +n04043733 +n04044307 +n04044498 +n04044716 +n04044955 +n04045085 +n04045255 +n04045397 +n04045644 +n04045787 +n04045941 +n04046091 +n04046277 +n04046400 +n04046590 +n04046974 +n04047139 +n04047401 +n04047733 +n04047834 +n04048441 +n04049303 +n04049405 +n04049585 +n04049753 +n04050066 +n04050313 +n04050600 +n04050933 +n04051269 +n04051439 +n04051549 +n04051705 +n04051825 +n04052235 +n04052346 +n04052442 +n04052658 +n04052757 +n04053508 +n04053677 +n04053767 +n04054361 +n04054566 +n04054670 +n04055180 +n04055447 +n04055700 +n04055861 +n04056073 +n04056180 +n04056413 +n04056932 +n04057047 +n04057215 +n04057435 +n04057673 +n04057846 +n04057981 +n04058096 +n04058239 +n04058486 +n04058594 +n04058721 +n04059157 +n04059298 +n04059399 +n04059516 +n04059947 +n04060198 +n04060448 +n04060647 +n04060904 +n04061681 +n04061793 +n04061969 +n04062179 +n04062428 +n04062644 +n04062807 +n04063154 +n04063373 +n04063868 +n04064213 +n04064401 +n04064747 +n04064862 +n04065272 +n04065464 +n04065789 +n04065909 +n04066023 +n04066270 +n04066388 +n04066476 +n04066767 +n04067143 +n04067231 +n04067353 +n04067472 +n04067658 +n04067818 +n04067921 +n04068441 +n04068601 +n04069166 +n04069276 +n04069434 +n04069582 +n04069777 +n04070003 +n04070207 +n04070415 +n04070545 +n04070727 +n04070964 +n04071102 +n04071263 +n04071393 +n04072193 +n04072551 +n04072960 +n04073425 +n04073948 +n04074185 +n04074963 +n04075291 +n04075468 +n04075715 +n04075813 +n04075916 +n04076052 +n04076284 +n04076713 +n04077430 +n04077594 +n04077734 +n04077889 +n04078002 +n04078574 +n04078955 +n04079106 +n04079244 +n04079603 +n04079933 +n04080138 +n04080454 +n04080705 +n04080833 +n04081281 +n04081699 +n04081844 +n04082344 +n04082562 +n04082710 +n04082886 +n04083113 +n04083309 +n04083649 +n04083800 +n04084517 +n04084682 +n04084889 +n04085017 +n04085574 +n04085873 +n04086066 +n04086273 +n04086446 +n04086663 +n04086794 +n04086937 +n04087126 +n04087432 +n04087709 +n04087826 +n04088229 +n04088343 +n04088441 +n04088696 +n04088797 +n04089152 +n04089376 +n04089666 +n04089836 +n04089976 +n04090263 +n04090548 +n04090781 +n04091097 +n04091466 +n04091584 +n04091693 +n04092168 +n04093157 +n04093223 +n04093625 +n04093775 +n04093915 +n04094060 +n04094250 +n04094438 +n04094608 +n04094720 +n04094859 +n04095109 +n04095210 +n04095342 +n04095577 +n04095938 +n04096066 +n04096733 +n04096848 +n04097085 +n04097373 +n04097622 +n04097760 +n04097866 +n04098169 +n04098260 +n04098399 +n04098513 +n04098795 +n04099003 +n04099175 +n04099429 +n04099969 +n04100174 +n04100519 +n04101375 +n04101497 +n04101701 +n04101860 +n04102037 +n04102162 +n04102285 +n04102406 +n04102618 +n04102760 +n04102872 +n04102962 +n04103094 +n04103206 +n04103364 +n04103665 +n04103769 +n04103918 +n04104147 +n04104384 +n04104500 +n04104770 +n04104925 +n04105068 +n04105438 +n04105704 +n04105893 +n04107598 +n04107743 +n04107984 +n04108268 +n04108822 +n04108999 +n04110068 +n04110178 +n04110281 +n04110439 +n04110654 +n04110841 +n04110955 +n04111190 +n04111414 +n04111531 +n04111668 +n04111962 +n04112147 +n04112252 +n04112430 +n04112579 +n04112654 +n04112752 +n04112921 +n04113038 +n04113194 +n04113316 +n04113406 +n04113641 +n04113765 +n04113968 +n04114069 +n04114301 +n04114428 +n04114719 +n04114844 +n04114996 +n04115144 +n04115256 +n04115456 +n04115542 +n04115802 +n04115996 +n04116098 +n04116294 +n04116389 +n04116512 +n04117216 +n04117464 +n04117639 +n04118021 +n04118538 +n04118635 +n04118776 +n04119091 +n04119230 +n04119360 +n04119478 +n04119630 +n04119751 +n04120489 +n04120695 +n04120842 +n04121228 +n04121342 +n04121426 +n04121511 +n04121728 +n04122262 +n04122349 +n04122492 +n04122578 +n04122685 +n04122825 +n04123026 +n04123123 +n04123228 +n04123317 +n04123448 +n04123567 +n04123740 +n04124098 +n04124202 +n04124370 +n04124488 +n04124573 +n04124887 +n04125021 +n04125116 +n04125257 +n04125541 +n04125692 +n04125853 +n04126066 +n04126244 +n04126541 +n04126659 +n04126852 +n04126980 +n04127117 +n04127249 +n04127395 +n04127521 +n04127633 +n04127904 +n04128413 +n04128499 +n04128710 +n04128837 +n04129490 +n04129688 +n04129766 +n04130143 +n04130257 +n04130566 +n04130907 +n04131015 +n04131113 +n04131208 +n04131368 +n04131499 +n04131690 +n04131811 +n04131929 +n04132158 +n04132465 +n04132603 +n04132829 +n04132985 +n04133114 +n04133789 +n04134008 +n04134170 +n04134523 +n04134632 +n04135024 +n04135118 +n04135315 +n04135710 +n04135933 +n04136045 +n04136161 +n04136333 +n04136510 +n04136800 +n04137089 +n04137217 +n04137355 +n04137444 +n04137773 +n04137897 +n04138131 +n04138261 +n04138869 +n04138977 +n04139140 +n04139395 +n04139859 +n04140064 +n04140539 +n04140631 +n04140777 +n04140853 +n04141076 +n04141198 +n04141327 +n04141712 +n04141838 +n04141975 +n04142175 +n04142327 +n04142434 +n04142731 +n04142999 +n04143140 +n04143365 +n04143897 +n04144241 +n04144539 +n04144651 +n04145863 +n04146050 +n04146343 +n04146504 +n04146614 +n04146862 +n04146976 +n04147183 +n04147291 +n04147495 +n04147793 +n04147916 +n04148054 +n04148285 +n04148464 +n04148579 +n04148703 +n04149083 +n04149374 +n04149813 +n04150153 +n04150273 +n04150371 +n04150980 +n04151108 +n04151581 +n04151940 +n04152387 +n04152593 +n04153025 +n04153330 +n04153751 +n04154152 +n04154340 +n04154565 +n04154753 +n04154854 +n04154938 +n04155068 +n04155177 +n04155457 +n04155625 +n04155735 +n04155889 +n04156040 +n04156140 +n04156297 +n04156411 +n04156591 +n04156814 +n04156946 +n04157099 +n04157320 +n04158002 +n04158138 +n04158250 +n04158672 +n04158807 +n04158956 +n04160036 +n04160261 +n04160372 +n04160586 +n04160847 +n04161010 +n04161358 +n04161981 +n04162433 +n04162706 +n04163530 +n04164002 +n04164199 +n04164406 +n04164757 +n04164868 +n04165409 +n04165675 +n04165945 +n04166111 +n04166281 +n04166436 +n04167346 +n04167489 +n04167661 +n04168084 +n04168199 +n04168472 +n04168541 +n04168840 +n04169437 +n04169597 +n04170037 +n04170384 +n04170515 +n04170694 +n04170933 +n04171208 +n04171459 +n04171629 +n04171831 +n04172107 +n04172230 +n04172342 +n04172512 +n04172607 +n04172776 +n04172904 +n04173046 +n04173172 +n04173511 +n04173907 +n04174026 +n04174101 +n04174234 +n04174500 +n04174705 +n04175039 +n04175147 +n04175574 +n04176068 +n04176190 +n04176295 +n04176528 +n04177041 +n04177329 +n04177545 +n04177654 +n04177755 +n04177820 +n04177931 +n04178190 +n04178329 +n04178668 +n04179126 +n04179712 +n04179824 +n04179913 +n04180063 +n04180229 +n04180888 +n04181083 +n04181228 +n04181561 +n04181718 +n04182152 +n04182322 +n04183217 +n04183329 +n04183957 +n04184095 +n04184316 +n04184435 +n04184600 +n04184880 +n04185071 +n04185529 +n04185804 +n04185946 +n04186051 +n04186268 +n04186455 +n04186624 +n04186848 +n04187061 +n04187233 +n04187547 +n04187751 +n04187885 +n04187970 +n04188064 +n04188179 +n04189092 +n04189282 +n04189651 +n04189816 +n04190052 +n04190376 +n04190464 +n04190747 +n04190997 +n04191150 +n04191595 +n04191943 +n04192238 +n04192361 +n04192521 +n04192698 +n04192858 +n04193179 +n04193377 +n04193742 +n04193883 +n04194009 +n04194127 +n04194289 +n04196080 +n04196502 +n04196803 +n04196925 +n04197110 +n04197391 +n04197781 +n04197878 +n04198015 +n04198233 +n04198355 +n04198453 +n04198562 +n04198722 +n04198797 +n04199027 +n04200000 +n04200258 +n04200537 +n04200800 +n04200908 +n04201064 +n04201297 +n04201733 +n04202142 +n04202282 +n04202417 +n04203356 +n04204081 +n04204238 +n04204347 +n04204755 +n04205062 +n04205318 +n04205505 +n04205613 +n04206070 +n04206225 +n04206356 +n04206570 +n04206790 +n04207151 +n04207343 +n04207596 +n04207763 +n04207903 +n04208065 +n04208210 +n04208427 +n04208582 +n04208760 +n04208936 +n04209133 +n04209239 +n04209509 +n04209613 +n04209811 +n04210012 +n04210120 +n04210288 +n04210390 +n04210591 +n04210858 +n04211001 +n04211219 +n04211356 +n04211528 +n04211857 +n04211970 +n04212165 +n04212282 +n04212467 +n04212810 +n04213105 +n04213264 +n04213353 +n04213530 +n04214046 +n04214282 +n04214413 +n04214649 +n04215153 +n04215402 +n04215588 +n04215800 +n04215910 +n04216634 +n04216860 +n04216963 +n04217387 +n04217546 +n04217718 +n04217882 +n04218564 +n04218921 +n04219185 +n04219424 +n04219580 +n04220250 +n04220805 +n04221076 +n04221673 +n04221823 +n04222210 +n04222307 +n04222470 +n04222723 +n04222847 +n04223066 +n04223170 +n04223299 +n04224395 +n04224543 +n04224842 +n04225031 +n04225222 +n04225729 +n04225987 +n04226322 +n04226464 +n04226537 +n04226826 +n04226962 +n04227050 +n04227144 +n04227519 +n04227787 +n04227900 +n04228054 +n04228215 +n04228422 +n04228581 +n04228693 +n04229007 +n04229107 +n04229480 +n04229620 +n04229737 +n04229816 +n04229959 +n04230387 +n04230487 +n04230603 +n04230707 +n04230808 +n04231272 +n04231693 +n04231905 +n04232153 +n04232312 +n04232437 +n04232800 +n04233027 +n04233124 +n04233295 +n04233715 +n04233832 +n04234160 +n04234260 +n04234455 +n04234670 +n04234763 +n04234887 +n04235291 +n04235646 +n04235771 +n04235860 +n04236001 +n04236377 +n04236702 +n04236809 +n04236935 +n04237174 +n04237287 +n04237423 +n04238128 +n04238321 +n04238617 +n04238763 +n04238953 +n04239074 +n04239218 +n04239333 +n04239436 +n04239639 +n04239786 +n04239900 +n04240434 +n04240752 +n04240867 +n04241042 +n04241249 +n04241394 +n04241573 +n04242084 +n04242315 +n04242408 +n04242587 +n04242704 +n04243003 +n04243142 +n04243251 +n04243546 +n04243941 +n04244379 +n04244847 +n04244997 +n04245218 +n04245412 +n04245508 +n04245847 +n04246060 +n04246271 +n04246459 +n04246731 +n04246855 +n04247011 +n04247440 +n04247544 +n04247630 +n04247736 +n04247876 +n04248209 +n04248396 +n04248507 +n04248851 +n04249415 +n04249582 +n04249882 +n04250224 +n04250473 +n04250599 +n04250692 +n04250850 +n04251144 +n04251701 +n04251791 +n04252077 +n04252225 +n04252331 +n04252560 +n04252653 +n04253057 +n04253168 +n04253304 +n04253931 +n04254009 +n04254120 +n04254450 +n04254680 +n04254777 +n04255163 +n04255346 +n04255499 +n04255586 +n04255670 +n04255768 +n04255899 +n04256318 +n04256520 +n04256758 +n04256891 +n04257223 +n04257684 +n04257790 +n04257986 +n04258138 +n04258333 +n04258438 +n04258618 +n04258732 +n04258859 +n04259202 +n04259468 +n04259630 +n04260192 +n04260364 +n04260589 +n04261116 +n04261281 +n04261369 +n04261506 +n04261638 +n04261767 +n04261868 +n04262161 +n04262530 +n04262678 +n04262869 +n04263257 +n04263336 +n04263502 +n04263760 +n04263950 +n04264134 +n04264233 +n04264361 +n04264485 +n04264628 +n04264765 +n04264914 +n04265275 +n04265428 +n04265904 +n04266014 +n04266162 +n04266375 +n04266486 +n04266849 +n04266968 +n04267091 +n04267165 +n04267246 +n04267435 +n04267577 +n04267985 +n04268142 +n04268275 +n04268418 +n04268565 +n04268799 +n04269086 +n04269270 +n04269502 +n04269668 +n04269822 +n04269944 +n04270147 +n04270371 +n04270576 +n04270891 +n04271148 +n04271531 +n04271793 +n04271891 +n04272054 +n04272389 +n04272782 +n04272928 +n04273064 +n04273285 +n04273569 +n04273659 +n04273796 +n04273972 +n04274686 +n04274985 +n04275093 +n04275175 +n04275283 +n04275548 +n04275661 +n04275904 +n04277352 +n04277493 +n04277669 +n04277826 +n04278247 +n04278353 +n04278447 +n04278605 +n04278932 +n04279063 +n04279172 +n04279353 +n04279462 +n04279858 +n04279987 +n04280259 +n04280373 +n04280487 +n04280845 +n04280970 +n04281260 +n04281375 +n04281571 +n04281998 +n04282231 +n04282494 +n04282872 +n04282992 +n04283096 +n04283255 +n04283378 +n04283585 +n04283784 +n04283905 +n04284002 +n04284341 +n04284438 +n04284572 +n04284869 +n04285008 +n04285146 +n04285622 +n04285803 +n04285965 +n04286128 +n04286575 +n04286960 +n04287351 +n04287451 +n04287747 +n04287898 +n04287986 +n04288165 +n04288272 +n04288533 +n04288673 +n04289027 +n04289195 +n04289449 +n04289576 +n04289690 +n04289827 +n04290079 +n04290259 +n04290507 +n04290615 +n04290762 +n04291069 +n04291242 +n04291759 +n04291992 +n04292080 +n04292221 +n04292414 +n04292572 +n04292921 +n04293119 +n04293258 +n04293744 +n04294212 +n04294426 +n04294614 +n04294879 +n04295081 +n04295353 +n04295571 +n04295777 +n04295881 +n04296562 +n04297098 +n04297750 +n04297847 +n04298053 +n04298661 +n04298765 +n04299215 +n04299370 +n04299963 +n04300358 +n04300509 +n04300643 +n04301000 +n04301242 +n04301474 +n04301760 +n04302200 +n04302863 +n04302988 +n04303095 +n04303258 +n04303357 +n04303497 +n04304215 +n04304375 +n04304680 +n04305016 +n04305210 +n04305323 +n04305471 +n04305572 +n04305947 +n04306080 +n04306592 +n04306847 +n04307419 +n04307767 +n04307878 +n04307986 +n04308084 +n04308273 +n04308397 +n04308583 +n04308807 +n04308915 +n04309049 +n04309348 +n04309548 +n04309833 +n04310018 +n04310157 +n04310507 +n04310604 +n04310721 +n04310904 +n04311004 +n04311174 +n04311595 +n04312020 +n04312154 +n04312432 +n04312654 +n04312756 +n04312916 +n04313220 +n04313503 +n04313628 +n04314107 +n04314216 +n04314522 +n04314632 +n04314914 +n04315342 +n04315713 +n04315828 +n04315948 +n04316498 +n04316815 +n04316924 +n04317063 +n04317175 +n04317325 +n04317420 +n04317833 +n04317976 +n04318131 +n04318787 +n04318892 +n04318982 +n04319545 +n04319774 +n04319937 +n04320405 +n04320598 +n04320871 +n04320973 +n04321121 +n04321453 +n04322026 +n04322531 +n04322692 +n04322801 +n04323519 +n04323819 +n04324120 +n04324297 +n04324387 +n04324515 +n04325041 +n04325208 +n04325704 +n04325804 +n04325968 +n04326547 +n04326676 +n04326799 +n04326896 +n04327204 +n04327544 +n04327682 +n04328054 +n04328186 +n04328329 +n04328580 +n04328703 +n04328946 +n04329477 +n04329681 +n04329834 +n04329958 +n04330109 +n04330189 +n04330267 +n04330340 +n04330669 +n04330746 +n04330896 +n04330998 +n04331277 +n04331443 +n04331639 +n04331765 +n04331892 +n04332074 +n04332243 +n04332580 +n04332987 +n04333129 +n04333869 +n04334105 +n04334365 +n04334504 +n04334599 +n04335209 +n04335435 +n04335693 +n04335886 +n04336792 +n04337157 +n04337287 +n04337503 +n04337650 +n04338517 +n04338963 +n04339062 +n04339191 +n04339638 +n04339879 +n04340019 +n04340521 +n04340750 +n04340935 +n04341133 +n04341288 +n04341414 +n04341686 +n04343511 +n04343630 +n04343740 +n04344003 +n04344734 +n04344873 +n04345028 +n04345201 +n04345787 +n04346003 +n04346157 +n04346328 +n04346428 +n04346511 +n04346679 +n04346855 +n04347119 +n04347519 +n04347754 +n04348070 +n04348184 +n04348359 +n04348988 +n04349189 +n04349306 +n04349401 +n04349913 +n04350104 +n04350235 +n04350458 +n04350581 +n04350688 +n04350769 +n04350905 +n04351550 +n04351699 +n04353573 +n04354026 +n04354182 +n04354387 +n04354487 +n04354589 +n04355115 +n04355267 +n04355338 +n04355511 +n04355684 +n04355821 +n04355933 +n04356056 +n04356595 +n04356772 +n04356925 +n04357121 +n04357314 +n04357531 +n04357930 +n04358117 +n04358256 +n04358491 +n04358707 +n04358874 +n04359034 +n04359124 +n04359217 +n04359335 +n04359500 +n04359589 +n04360501 +n04360798 +n04360914 +n04361095 +n04361260 +n04361937 +n04362624 +n04362821 +n04362972 +n04363082 +n04363210 +n04363412 +n04363671 +n04363777 +n04363874 +n04363991 +n04364160 +n04364397 +n04364545 +n04364827 +n04364994 +n04365112 +n04365229 +n04365328 +n04365484 +n04365751 +n04366033 +n04366116 +n04366367 +n04366832 +n04367011 +n04367371 +n04367480 +n04367746 +n04367950 +n04368109 +n04368235 +n04368365 +n04368496 +n04368695 +n04368840 +n04369025 +n04369282 +n04369485 +n04369618 +n04370048 +n04370288 +n04370456 +n04370600 +n04370774 +n04370955 +n04371050 +n04371430 +n04371563 +n04371774 +n04371979 +n04372370 +n04373089 +n04373428 +n04373563 +n04373704 +n04373795 +n04373894 +n04374315 +n04374521 +n04374735 +n04374907 +n04375080 +n04375241 +n04375405 +n04375615 +n04375775 +n04375926 +n04376400 +n04376876 +n04377057 +n04378489 +n04378651 +n04378956 +n04379096 +n04379243 +n04379964 +n04380255 +n04380346 +n04380533 +n04380916 +n04381073 +n04381450 +n04381587 +n04381724 +n04381860 +n04381994 +n04382334 +n04382438 +n04382537 +n04382695 +n04382880 +n04383015 +n04383130 +n04383301 +n04383839 +n04383923 +n04384593 +n04384910 +n04385079 +n04385157 +n04385536 +n04385799 +n04386051 +n04386456 +n04386664 +n04386792 +n04387095 +n04387201 +n04387261 +n04387400 +n04387531 +n04387706 +n04387932 +n04388040 +n04388162 +n04388473 +n04388574 +n04388743 +n04389033 +n04389430 +n04389521 +n04389718 +n04389854 +n04389999 +n04390483 +n04390577 +n04390873 +n04390977 +n04391445 +n04391838 +n04392113 +n04392526 +n04392764 +n04392985 +n04393095 +n04393301 +n04393549 +n04393808 +n04393913 +n04394031 +n04394261 +n04394421 +n04394630 +n04395024 +n04395106 +n04395332 +n04395651 +n04395875 +n04396226 +n04396335 +n04396650 +n04396808 +n04396902 +n04397027 +n04397168 +n04397261 +n04397452 +n04397645 +n04397768 +n04397860 +n04398044 +n04398497 +n04398688 +n04398834 +n04398951 +n04399046 +n04399158 +n04399537 +n04399846 +n04400109 +n04400289 +n04400499 +n04400737 +n04400899 +n04401088 +n04401578 +n04401680 +n04401828 +n04401949 +n04402057 +n04402342 +n04402449 +n04402580 +n04402746 +n04402984 +n04403413 +n04403524 +n04403638 +n04403925 +n04404072 +n04404200 +n04404412 +n04404817 +n04404997 +n04405540 +n04405762 +n04405907 +n04406239 +n04406552 +n04406687 +n04406817 +n04407257 +n04407435 +n04407686 +n04408871 +n04409011 +n04409128 +n04409279 +n04409384 +n04409515 +n04409625 +n04409806 +n04409911 +n04410086 +n04410365 +n04410485 +n04410565 +n04410663 +n04410760 +n04410886 +n04411019 +n04411264 +n04411835 +n04411966 +n04412097 +n04412300 +n04412416 +n04413151 +n04413419 +n04413969 +n04414101 +n04414199 +n04414319 +n04414476 +n04414675 +n04414909 +n04415257 +n04415663 +n04415815 +n04416005 +n04416901 +n04417086 +n04417180 +n04417361 +n04417672 +n04417809 +n04418357 +n04418644 +n04419073 +n04419642 +n04419868 +n04420024 +n04420720 +n04421083 +n04421258 +n04421417 +n04421582 +n04421740 +n04421872 +n04422409 +n04422566 +n04422727 +n04422875 +n04423552 +n04423687 +n04423845 +n04424692 +n04425804 +n04425977 +n04426184 +n04426316 +n04426427 +n04427216 +n04427473 +n04427559 +n04427715 +n04427857 +n04428008 +n04428191 +n04428382 +n04428634 +n04429038 +n04429376 +n04430475 +n04430605 +n04430896 +n04431025 +n04431436 +n04431648 +n04431745 +n04431925 +n04432043 +n04432203 +n04432662 +n04432785 +n04433377 +n04433585 +n04434207 +n04434531 +n04434932 +n04435180 +n04435552 +n04435653 +n04435759 +n04435870 +n04436012 +n04436185 +n04436329 +n04436401 +n04436542 +n04436832 +n04436992 +n04437276 +n04437380 +n04437670 +n04437953 +n04438304 +n04438507 +n04438643 +n04438897 +n04439505 +n04439585 +n04439712 +n04440597 +n04440963 +n04441093 +n04441528 +n04441662 +n04441790 +n04442312 +n04442441 +n04442582 +n04442741 +n04443164 +n04443257 +n04443433 +n04443766 +n04444121 +n04444218 +n04444749 +n04444953 +n04445040 +n04445154 +n04445327 +n04445610 +n04445782 +n04445952 +n04446162 +n04446276 +n04446844 +n04447028 +n04447156 +n04447276 +n04447443 +n04447861 +n04448070 +n04448185 +n04448361 +n04449290 +n04449449 +n04449550 +n04449700 +n04449966 +n04450133 +n04450243 +n04450465 +n04450640 +n04450749 +n04450994 +n04451139 +n04451318 +n04451636 +n04451818 +n04452528 +n04452615 +n04452757 +n04452848 +n04453037 +n04453156 +n04453390 +n04453666 +n04453910 +n04454654 +n04454792 +n04454908 +n04455048 +n04455250 +n04455579 +n04455652 +n04456011 +n04456115 +n04456472 +n04456734 +n04457157 +n04457326 +n04457474 +n04457638 +n04457767 +n04457910 +n04458201 +n04458633 +n04458843 +n04459018 +n04459122 +n04459243 +n04459362 +n04459610 +n04459773 +n04459909 +n04460130 +n04461437 +n04461570 +n04461696 +n04461879 +n04462011 +n04462240 +n04462576 +n04463679 +n04464125 +n04464615 +n04464852 +n04465050 +n04465203 +n04465358 +n04465501 +n04465666 +n04466871 +n04467099 +n04467307 +n04467506 +n04467665 +n04467899 +n04468005 +n04469003 +n04469251 +n04469514 +n04469684 +n04469813 +n04470741 +n04471148 +n04471315 +n04471632 +n04471912 +n04472243 +n04472563 +n04472726 +n04472961 +n04473108 +n04473275 +n04473884 +n04474035 +n04474187 +n04474466 +n04475309 +n04475411 +n04475496 +n04475631 +n04475749 +n04475900 +n04476116 +n04476259 +n04476526 +n04476831 +n04476972 +n04477219 +n04477387 +n04477548 +n04477725 +n04478066 +n04478383 +n04478512 +n04478657 +n04479046 +n04479287 +n04479405 +n04479526 +n04479694 +n04479823 +n04479939 +n04480033 +n04480141 +n04480303 +n04480527 +n04480853 +n04480995 +n04481524 +n04481642 +n04482177 +n04482297 +n04482393 +n04482975 +n04483073 +n04483307 +n04483925 +n04484024 +n04484432 +n04485082 +n04485423 +n04485586 +n04485750 +n04485884 +n04486054 +n04486213 +n04486322 +n04486616 +n04486934 +n04487081 +n04487394 +n04487724 +n04487894 +n04488202 +n04488427 +n04488530 +n04488742 +n04488857 +n04489008 +n04489695 +n04489817 +n04490091 +n04491312 +n04491388 +n04491638 +n04491769 +n04491934 +n04492060 +n04492157 +n04492375 +n04492749 +n04493109 +n04493259 +n04493381 +n04494204 +n04495051 +n04495183 +n04495310 +n04495450 +n04495555 +n04495698 +n04495843 +n04496614 +n04496726 +n04496872 +n04497249 +n04497442 +n04497570 +n04497801 +n04498275 +n04498389 +n04498523 +n04498873 +n04499062 +n04499300 +n04499446 +n04499554 +n04499810 +n04500060 +n04500390 +n04501127 +n04501281 +n04501370 +n04501550 +n04501837 +n04501947 +n04502059 +n04502197 +n04502502 +n04502670 +n04502851 +n04502989 +n04503073 +n04503155 +n04503269 +n04503413 +n04503499 +n04503593 +n04503705 +n04504038 +n04504141 +n04504770 +n04505036 +n04505345 +n04505470 +n04505888 +n04506289 +n04506402 +n04506506 +n04506688 +n04506895 +n04506994 +n04507155 +n04507326 +n04507453 +n04507689 +n04508163 +n04508489 +n04508949 +n04509171 +n04509260 +n04509417 +n04509592 +n04510706 +n04511002 +n04513827 +n04513998 +n04514095 +n04514241 +n04514648 +n04515003 +n04515444 +n04515729 +n04515890 +n04516116 +n04516214 +n04516354 +n04516672 +n04517211 +n04517408 +n04517823 +n04517999 +n04518132 +n04518343 +n04518643 +n04518764 +n04519153 +n04519536 +n04519728 +n04519887 +n04520170 +n04520382 +n04520784 +n04520962 +n04521571 +n04521863 +n04521987 +n04522168 +n04523525 +n04523831 +n04524142 +n04524313 +n04524594 +n04524716 +n04524941 +n04525038 +n04525191 +n04525305 +n04525417 +n04525584 +n04525821 +n04526520 +n04526800 +n04526964 +n04527648 +n04528079 +n04528968 +n04529108 +n04529681 +n04529962 +n04530283 +n04530456 +n04530566 +n04531098 +n04531873 +n04532022 +n04532106 +n04532398 +n04532504 +n04532670 +n04532831 +n04533042 +n04533199 +n04533499 +n04533594 +n04533700 +n04533802 +n04533946 +n04534127 +n04534359 +n04534520 +n04534895 +n04535252 +n04535370 +n04535524 +n04536153 +n04536335 +n04536465 +n04536595 +n04536765 +n04536866 +n04537436 +n04538249 +n04538403 +n04538552 +n04538878 +n04539053 +n04539203 +n04539407 +n04539794 +n04540053 +n04540255 +n04540397 +n04540761 +n04541136 +n04541320 +n04541662 +n04541777 +n04541987 +n04542095 +n04542329 +n04542474 +n04542595 +n04542715 +n04542858 +n04542943 +n04543158 +n04543509 +n04543636 +n04543772 +n04543924 +n04543996 +n04544325 +n04544450 +n04545305 +n04545471 +n04545748 +n04545858 +n04545984 +n04546081 +n04546194 +n04546340 +n04546595 +n04546855 +n04547592 +n04548280 +n04548362 +n04549028 +n04549122 +n04549629 +n04549721 +n04549919 +n04550184 +n04550676 +n04551055 +n04551833 +n04552097 +n04552348 +n04552551 +n04552696 +n04553389 +n04553561 +n04553703 +n04554211 +n04554406 +n04554684 +n04554871 +n04554998 +n04555291 +n04555400 +n04555600 +n04555700 +n04555897 +n04556408 +n04556533 +n04556664 +n04556948 +n04557308 +n04557522 +n04557648 +n04557751 +n04558059 +n04558199 +n04558478 +n04558804 +n04559023 +n04559166 +n04559451 +n04559620 +n04559730 +n04559910 +n04559994 +n04560113 +n04560292 +n04560502 +n04560619 +n04560804 +n04560882 +n04561010 +n04561287 +n04561422 +n04561734 +n04561857 +n04561965 +n04562122 +n04562262 +n04562496 +n04562935 +n04563020 +n04563204 +n04563413 +n04563560 +n04563790 +n04564278 +n04564581 +n04565039 +n04565375 +n04566257 +n04566561 +n04566756 +n04567098 +n04567593 +n04567746 +n04568069 +n04568557 +n04568713 +n04568841 +n04569063 +n04569520 +n04569822 +n04570118 +n04570214 +n04570416 +n04570532 +n04570815 +n04570958 +n04571292 +n04571566 +n04571686 +n04571800 +n04571958 +n04572121 +n04572235 +n04572935 +n04573045 +n04573281 +n04573379 +n04573513 +n04573625 +n04573832 +n04573937 +n04574067 +n04574348 +n04574471 +n04574606 +n04574999 +n04575723 +n04575824 +n04576002 +n04576211 +n04576971 +n04577139 +n04577293 +n04577426 +n04577567 +n04577769 +n04578112 +n04578329 +n04578559 +n04578708 +n04578801 +n04578934 +n04579056 +n04579145 +n04579230 +n04579432 +n04579667 +n04579986 +n04580493 +n04581102 +n04581595 +n04581829 +n04582205 +n04582349 +n04582771 +n04582869 +n04583022 +n04583212 +n04583620 +n04583888 +n04583967 +n04584056 +n04584207 +n04584373 +n04585128 +n04585318 +n04585456 +n04585626 +n04585745 +n04585980 +n04586072 +n04586581 +n04586932 +n04587327 +n04587404 +n04587559 +n04587648 +n04588739 +n04589190 +n04589325 +n04589434 +n04589593 +n04589890 +n04590021 +n04590129 +n04590263 +n04590553 +n04590746 +n04590933 +n04591056 +n04591157 +n04591249 +n04591359 +n04591517 +n04591631 +n04591713 +n04591887 +n04592005 +n04592099 +n04592356 +n04592465 +n04592596 +n04592741 +n04593077 +n04593185 +n04593376 +n04593524 +n04593629 +n04593866 +n04594114 +n04594218 +n04594489 +n04594742 +n04594828 +n04594919 +n04595028 +n04595285 +n04595501 +n04595611 +n04595762 +n04595855 +n04596116 +n04596492 +n04596742 +n04596852 +n04597066 +n04597309 +n04597400 +n04597804 +n04597913 +n04598136 +n04598318 +n04598416 +n04598582 +n04598965 +n04599124 +n04599235 +n04600312 +n04600486 +n04600912 +n04601041 +n04601159 +n04601938 +n04602762 +n04602840 +n04602956 +n04603399 +n04603729 +n04603872 +n04604276 +n04604644 +n04604806 +n04605057 +n04605163 +n04605321 +n04605446 +n04605572 +n04605726 +n04606251 +n04606574 +n04607035 +n04607242 +n04607640 +n04607759 +n04607869 +n04607982 +n04608329 +n04608435 +n04608567 +n04608809 +n04608923 +n04609531 +n04609651 +n04609811 +n04610013 +n04610176 +n04610274 +n04610503 +n04610676 +n04611351 +n04611795 +n04611916 +n04612026 +n04612159 +n04612257 +n04612373 +n04612504 +n04612840 +n04613015 +n04613158 +n04613696 +n04613939 +n04614505 +n04614655 +n04614844 +n04615149 +n04615226 +n04615644 +n04682018 +n04950713 +n04950952 +n04951071 +n04951186 +n04951373 +n04951716 +n04951875 +n04953296 +n04953678 +n04955160 +n04957356 +n04957589 +n04958634 +n04958865 +n04959061 +n04959230 +n04959672 +n04960277 +n04960582 +n04961062 +n04961331 +n04961691 +n04962062 +n04962240 +n04963111 +n04963307 +n04963588 +n04963740 +n04964001 +n04964799 +n04964878 +n04965179 +n04965451 +n04965661 +n04966543 +n04966941 +n04967191 +n04967561 +n04967674 +n04967801 +n04967882 +n04968056 +n04968139 +n04968749 +n04968895 +n04969242 +n04969540 +n04969798 +n04969952 +n04970059 +n04970312 +n04970398 +n04970470 +n04970631 +n04970916 +n04971211 +n04971313 +n04972350 +n04972451 +n04972801 +n04973020 +n04973291 +n04973386 +n04973585 +n04973669 +n04973816 +n04974145 +n04974340 +n04974859 +n04975739 +n04976319 +n04976952 +n04977412 +n04978561 +n04979002 +n04979307 +n04981658 +n05102764 +n05218119 +n05233741 +n05235879 +n05238282 +n05239437 +n05241218 +n05241485 +n05241662 +n05242070 +n05242239 +n05242928 +n05244421 +n05244755 +n05244934 +n05245192 +n05257476 +n05257967 +n05258051 +n05258627 +n05259914 +n05260127 +n05260240 +n05261310 +n05262422 +n05262534 +n05262698 +n05263183 +n05263316 +n05263448 +n05265736 +n05266096 +n05266879 +n05278922 +n05279953 +n05282652 +n05285623 +n05302499 +n05314075 +n05399034 +n05399243 +n05399356 +n05418717 +n05427346 +n05442594 +n05447757 +n05448704 +n05448827 +n05449196 +n05449661 +n05449959 +n05450617 +n05451099 +n05451384 +n05453412 +n05453657 +n05453815 +n05454833 +n05454978 +n05455113 +n05458173 +n05458576 +n05459101 +n05459457 +n05459769 +n05460759 +n05464534 +n05467054 +n05467758 +n05468098 +n05468739 +n05469664 +n05469861 +n05475397 +n05482922 +n05486510 +n05491154 +n05526957 +n05538625 +n05539947 +n05541509 +n05542893 +n05545879 +n05571341 +n05578095 +n05581932 +n05584746 +n05586759 +n05604434 +n05716342 +n06008896 +n06209940 +n06254669 +n06255081 +n06255613 +n06259898 +n06262567 +n06262943 +n06263202 +n06263369 +n06263609 +n06263762 +n06263895 +n06266417 +n06266633 +n06266710 +n06266878 +n06266973 +n06267145 +n06267564 +n06267655 +n06267758 +n06267893 +n06267991 +n06271778 +n06272290 +n06272612 +n06272803 +n06273207 +n06273294 +n06273414 +n06273555 +n06273743 +n06273890 +n06273986 +n06274092 +n06274292 +n06274546 +n06274760 +n06274921 +n06275095 +n06275353 +n06275471 +n06276501 +n06276697 +n06276902 +n06277025 +n06277135 +n06277280 +n06278338 +n06278475 +n06281040 +n06281175 +n06340977 +n06359193 +n06359467 +n06359657 +n06415688 +n06417096 +n06418693 +n06419354 +n06423496 +n06470073 +n06591815 +n06592078 +n06592281 +n06592421 +n06595351 +n06596179 +n06596364 +n06596474 +n06596607 +n06596727 +n06596845 +n06613686 +n06614901 +n06616216 +n06618653 +n06625062 +n06785654 +n06793231 +n06794110 +n06874185 +n06883725 +n06892775 +n06998748 +n07005523 +n07248320 +n07273802 +n07461050 +n07556406 +n07556637 +n07556872 +n07556970 +n07557165 +n07557434 +n07560193 +n07560331 +n07560422 +n07560542 +n07560652 +n07560903 +n07561112 +n07561590 +n07561848 +n07562017 +n07562172 +n07562379 +n07562495 +n07562651 +n07562881 +n07562984 +n07563207 +n07563366 +n07563642 +n07563800 +n07564008 +n07564101 +n07564292 +n07564515 +n07564629 +n07564796 +n07564971 +n07565083 +n07565161 +n07565259 +n07565608 +n07565725 +n07565945 +n07566092 +n07566231 +n07566340 +n07566863 +n07567039 +n07567139 +n07567390 +n07567611 +n07567707 +n07567980 +n07568095 +n07568241 +n07568389 +n07568502 +n07568625 +n07568818 +n07568991 +n07569106 +n07569423 +n07569543 +n07569644 +n07569873 +n07570021 +n07570530 +n07570720 +n07572353 +n07572616 +n07572858 +n07572957 +n07573103 +n07573347 +n07573453 +n07573563 +n07573696 +n07574176 +n07574426 +n07574504 +n07574602 +n07574780 +n07574923 +n07575076 +n07575226 +n07575392 +n07575510 +n07575726 +n07575984 +n07576182 +n07576438 +n07576577 +n07576781 +n07576969 +n07577144 +n07577374 +n07577538 +n07577657 +n07577772 +n07577918 +n07578093 +n07579575 +n07579688 +n07579787 +n07579917 +n07580053 +n07580253 +n07580359 +n07580470 +n07580592 +n07581249 +n07581346 +n07581607 +n07581775 +n07581931 +n07582027 +n07582152 +n07582277 +n07582441 +n07582609 +n07582811 +n07582892 +n07582970 +n07583066 +n07583197 +n07583865 +n07583978 +n07584110 +n07584228 +n07584332 +n07584423 +n07584593 +n07584859 +n07584938 +n07585015 +n07585107 +n07585208 +n07585474 +n07585557 +n07585644 +n07585758 +n07585906 +n07585997 +n07586099 +n07586179 +n07586318 +n07586485 +n07586604 +n07586718 +n07586894 +n07587023 +n07587111 +n07587206 +n07587331 +n07587441 +n07587618 +n07587700 +n07587819 +n07587962 +n07588111 +n07588193 +n07588299 +n07588419 +n07588574 +n07588688 +n07588817 +n07588947 +n07589458 +n07589543 +n07589724 +n07589872 +n07589967 +n07590068 +n07590177 +n07590320 +n07590502 +n07590611 +n07590752 +n07590841 +n07590974 +n07591049 +n07591162 +n07591236 +n07591330 +n07591473 +n07591586 +n07591813 +n07591961 +n07592094 +n07592317 +n07592400 +n07592481 +n07592656 +n07592768 +n07592922 +n07593004 +n07593107 +n07593199 +n07593471 +n07593774 +n07593972 +n07594066 +n07594155 +n07594250 +n07594737 +n07594840 +n07595051 +n07595180 +n07595368 +n07595649 +n07595751 +n07595914 +n07596046 +n07596160 +n07596362 +n07596452 +n07596566 +n07596684 +n07596967 +n07597145 +n07597263 +n07597365 +n07598256 +n07598529 +n07598622 +n07598734 +n07598928 +n07599068 +n07599161 +n07599242 +n07599383 +n07599468 +n07599554 +n07599649 +n07599783 +n07599911 +n07599998 +n07600177 +n07600285 +n07600394 +n07600506 +n07600696 +n07600895 +n07601025 +n07601175 +n07601290 +n07601407 +n07601572 +n07601686 +n07601809 +n07602650 +n07604956 +n07605040 +n07605198 +n07605282 +n07605380 +n07605474 +n07605597 +n07605693 +n07605804 +n07605944 +n07606058 +n07606191 +n07606278 +n07606419 +n07606538 +n07606669 +n07606764 +n07606933 +n07607027 +n07607138 +n07607361 +n07607492 +n07607605 +n07607707 +n07607832 +n07607967 +n07608098 +n07608245 +n07608339 +n07608429 +n07608533 +n07608641 +n07608721 +n07608866 +n07608980 +n07609083 +n07609215 +n07609316 +n07609407 +n07609549 +n07609632 +n07609728 +n07609840 +n07610295 +n07610502 +n07610620 +n07610746 +n07610890 +n07611046 +n07611148 +n07611267 +n07611358 +n07611733 +n07611839 +n07611991 +n07612137 +n07612273 +n07612367 +n07612530 +n07612632 +n07612996 +n07613158 +n07613266 +n07613480 +n07613671 +n07613815 +n07614103 +n07614198 +n07614348 +n07614500 +n07614730 +n07614825 +n07615052 +n07615190 +n07615289 +n07615460 +n07615569 +n07615671 +n07615774 +n07615954 +n07616046 +n07616174 +n07616265 +n07616386 +n07616487 +n07616590 +n07616748 +n07616906 +n07617051 +n07617188 +n07617344 +n07617447 +n07617526 +n07617611 +n07617708 +n07617839 +n07617932 +n07618029 +n07618119 +n07618281 +n07618432 +n07618587 +n07618684 +n07618871 +n07619004 +n07619208 +n07619301 +n07619409 +n07619508 +n07619881 +n07620047 +n07620145 +n07620327 +n07620597 +n07620689 +n07621264 +n07621497 +n07621618 +n07623136 +n07624466 +n07624666 +n07624757 +n07624924 +n07625061 +n07625324 +n07627931 +n07628068 +n07628181 +n07631926 +n07639069 +n07641928 +n07642361 +n07642471 +n07642742 +n07642833 +n07642933 +n07643026 +n07643200 +n07643306 +n07643474 +n07643577 +n07643679 +n07643764 +n07643891 +n07643981 +n07644244 +n07648913 +n07648997 +n07650792 +n07650903 +n07651025 +n07654148 +n07654298 +n07655067 +n07655263 +n07663899 +n07665438 +n07666176 +n07672914 +n07678586 +n07678729 +n07678953 +n07679034 +n07679140 +n07679356 +n07680168 +n07680313 +n07680416 +n07680517 +n07680655 +n07680761 +n07680932 +n07681264 +n07681355 +n07681450 +n07681691 +n07681805 +n07681926 +n07682197 +n07682316 +n07682477 +n07682624 +n07682808 +n07682952 +n07683039 +n07683138 +n07683265 +n07683360 +n07683490 +n07683617 +n07683786 +n07684084 +n07684164 +n07684289 +n07684422 +n07684517 +n07684600 +n07684938 +n07685031 +n07685118 +n07685218 +n07685303 +n07685399 +n07685546 +n07685730 +n07685918 +n07686021 +n07686202 +n07686299 +n07686461 +n07686634 +n07686720 +n07686873 +n07687053 +n07687211 +n07687381 +n07687469 +n07687626 +n07687789 +n07688021 +n07688130 +n07688265 +n07688412 +n07688624 +n07688757 +n07688898 +n07689003 +n07689217 +n07689313 +n07689490 +n07689624 +n07689757 +n07689842 +n07690019 +n07690152 +n07690273 +n07690431 +n07690511 +n07690585 +n07690739 +n07690892 +n07691091 +n07691237 +n07691539 +n07691650 +n07691758 +n07691863 +n07691954 +n07692114 +n07692248 +n07692405 +n07692517 +n07692614 +n07692887 +n07693048 +n07693223 +n07693439 +n07693590 +n07693725 +n07693889 +n07693972 +n07694169 +n07694403 +n07694516 +n07694659 +n07694839 +n07695187 +n07695284 +n07695410 +n07695504 +n07695652 +n07695742 +n07695878 +n07695965 +n07696403 +n07696527 +n07696625 +n07696728 +n07696839 +n07696977 +n07697100 +n07697313 +n07697408 +n07697537 +n07697699 +n07697825 +n07698250 +n07698401 +n07698543 +n07698672 +n07698782 +n07700003 +n07703889 +n07704054 +n07704205 +n07704305 +n07705931 +n07707451 +n07708124 +n07708398 +n07708512 +n07708685 +n07708798 +n07709046 +n07709172 +n07709333 +n07709701 +n07709881 +n07710007 +n07710283 +n07710616 +n07710952 +n07711080 +n07711232 +n07711371 +n07711569 +n07711683 +n07711799 +n07711907 +n07712063 +n07712267 +n07712382 +n07712559 +n07712748 +n07712856 +n07712959 +n07713074 +n07713267 +n07713395 +n07713763 +n07713895 +n07714078 +n07714188 +n07714287 +n07714448 +n07714571 +n07714802 +n07714895 +n07714990 +n07715103 +n07715221 +n07715407 +n07715561 +n07715721 +n07716034 +n07716203 +n07716358 +n07716504 +n07716649 +n07716750 +n07716906 +n07717070 +n07717410 +n07717556 +n07717714 +n07717858 +n07718068 +n07718195 +n07718329 +n07718472 +n07718671 +n07718747 +n07718920 +n07719058 +n07719213 +n07719330 +n07719437 +n07719616 +n07719756 +n07719839 +n07719980 +n07720084 +n07720185 +n07720277 +n07720442 +n07720615 +n07720875 +n07721018 +n07721118 +n07721195 +n07721325 +n07721456 +n07721678 +n07721833 +n07721942 +n07722052 +n07722217 +n07722390 +n07722485 +n07722666 +n07722763 +n07722888 +n07723039 +n07723177 +n07723330 +n07723559 +n07723753 +n07723968 +n07724078 +n07724173 +n07724269 +n07724492 +n07724654 +n07724819 +n07724943 +n07725158 +n07725255 +n07725376 +n07725531 +n07725663 +n07725789 +n07725888 +n07726009 +n07726095 +n07726230 +n07726386 +n07726525 +n07726672 +n07726796 +n07727048 +n07727140 +n07727252 +n07727377 +n07727458 +n07727578 +n07727741 +n07727868 +n07728053 +n07728181 +n07728284 +n07728391 +n07728585 +n07728708 +n07728804 +n07729000 +n07729142 +n07729225 +n07729384 +n07729485 +n07729828 +n07729926 +n07730033 +n07730207 +n07730320 +n07730406 +n07730562 +n07730708 +n07730855 +n07731006 +n07731122 +n07731284 +n07731436 +n07731587 +n07731767 +n07731952 +n07732168 +n07732302 +n07732433 +n07732525 +n07732636 +n07732747 +n07732904 +n07733005 +n07733124 +n07733217 +n07733394 +n07733567 +n07733712 +n07733847 +n07734017 +n07734183 +n07734292 +n07734417 +n07734555 +n07734744 +n07734879 +n07735052 +n07735179 +n07735294 +n07735404 +n07735510 +n07735687 +n07735803 +n07735981 +n07736087 +n07736256 +n07736371 +n07736527 +n07736692 +n07736813 +n07736971 +n07737081 +n07737594 +n07737745 +n07738105 +n07738224 +n07739035 +n07739125 +n07739344 +n07739506 +n07739923 +n07740033 +n07740115 +n07740220 +n07740342 +n07740461 +n07740597 +n07740744 +n07740855 +n07740954 +n07741138 +n07741235 +n07741357 +n07741461 +n07741623 +n07741706 +n07741804 +n07741888 +n07742012 +n07742224 +n07742313 +n07742415 +n07742513 +n07742605 +n07742704 +n07743224 +n07743384 +n07743544 +n07743723 +n07743902 +n07744057 +n07744246 +n07744430 +n07744559 +n07744682 +n07744811 +n07745046 +n07745197 +n07745357 +n07745466 +n07745661 +n07745940 +n07746038 +n07746186 +n07746334 +n07746551 +n07746749 +n07746910 +n07747055 +n07747607 +n07747811 +n07747951 +n07748157 +n07748276 +n07748416 +n07748574 +n07748753 +n07748912 +n07749095 +n07749192 +n07749312 +n07749446 +n07749582 +n07749731 +n07749870 +n07749969 +n07750146 +n07750299 +n07750449 +n07750586 +n07750736 +n07750872 +n07751004 +n07751148 +n07751280 +n07751451 +n07751737 +n07751858 +n07751977 +n07752109 +n07752264 +n07752377 +n07752514 +n07752602 +n07752664 +n07752782 +n07752874 +n07752966 +n07753113 +n07753275 +n07753448 +n07753592 +n07753743 +n07753980 +n07754155 +n07754279 +n07754451 +n07754684 +n07754894 +n07755089 +n07755262 +n07755411 +n07755619 +n07755707 +n07755929 +n07756096 +n07756325 +n07756499 +n07756641 +n07756838 +n07756951 +n07757132 +n07757312 +n07757511 +n07757602 +n07757753 +n07757874 +n07757990 +n07758125 +n07758260 +n07758407 +n07758582 +n07758680 +n07758950 +n07759194 +n07759324 +n07759424 +n07759576 +n07759691 +n07759816 +n07760070 +n07760153 +n07760297 +n07760395 +n07760501 +n07760673 +n07760755 +n07760859 +n07761141 +n07761309 +n07761611 +n07761777 +n07761954 +n07762114 +n07762244 +n07762373 +n07762534 +n07762740 +n07762913 +n07763107 +n07763290 +n07763483 +n07763629 +n07763792 +n07763987 +n07764155 +n07764315 +n07764486 +n07764630 +n07764847 +n07765073 +n07765208 +n07765361 +n07765517 +n07765612 +n07765728 +n07765862 +n07765999 +n07766173 +n07766409 +n07766530 +n07766723 +n07766891 +n07767002 +n07767171 +n07767344 +n07767549 +n07767709 +n07767847 +n07768068 +n07768139 +n07768230 +n07768318 +n07768423 +n07768590 +n07768694 +n07768858 +n07769102 +n07769306 +n07769465 +n07769584 +n07769731 +n07769886 +n07770034 +n07770180 +n07770439 +n07770571 +n07770763 +n07770869 +n07771082 +n07771212 +n07771405 +n07771539 +n07771731 +n07771891 +n07772026 +n07772147 +n07772274 +n07772413 +n07772788 +n07772935 +n07773428 +n07774182 +n07774295 +n07774479 +n07774596 +n07774719 +n07774842 +n07775050 +n07775197 +n07783827 +n07785487 +n07800091 +n07800487 +n07800636 +n07800740 +n07801007 +n07801091 +n07801342 +n07801508 +n07801709 +n07801779 +n07801892 +n07802026 +n07802152 +n07802246 +n07802417 +n07802767 +n07802863 +n07802963 +n07803093 +n07803213 +n07803310 +n07803408 +n07803545 +n07803779 +n07803895 +n07803992 +n07804152 +n07804323 +n07804543 +n07804657 +n07804771 +n07804900 +n07805006 +n07805254 +n07805389 +n07805478 +n07805594 +n07805731 +n07805966 +n07806043 +n07806120 +n07806221 +n07806633 +n07806774 +n07806879 +n07807002 +n07807171 +n07807317 +n07807472 +n07807594 +n07807710 +n07807834 +n07807922 +n07808022 +n07808166 +n07808268 +n07808352 +n07808479 +n07808587 +n07808675 +n07808806 +n07808904 +n07809096 +n07809368 +n07810531 +n07810907 +n07811416 +n07812046 +n07812184 +n07812662 +n07812790 +n07812913 +n07813107 +n07813324 +n07813495 +n07813579 +n07813717 +n07813833 +n07814007 +n07814203 +n07814390 +n07814487 +n07814634 +n07814790 +n07814925 +n07815163 +n07815294 +n07815424 +n07815588 +n07815839 +n07815956 +n07816052 +n07816164 +n07816296 +n07816398 +n07816575 +n07816726 +n07816839 +n07817024 +n07817160 +n07817315 +n07817465 +n07817599 +n07817758 +n07817871 +n07818029 +n07818133 +n07818277 +n07818422 +n07818572 +n07818689 +n07818825 +n07818995 +n07819166 +n07819303 +n07819480 +n07819682 +n07819769 +n07819896 +n07820036 +n07820145 +n07820297 +n07820497 +n07820683 +n07820814 +n07820960 +n07821107 +n07821260 +n07821404 +n07821610 +n07821758 +n07821919 +n07822053 +n07822197 +n07822323 +n07822518 +n07822687 +n07822845 +n07823105 +n07823280 +n07823369 +n07823460 +n07823591 +n07823698 +n07823814 +n07823951 +n07824191 +n07824268 +n07824383 +n07824502 +n07824702 +n07824863 +n07824988 +n07825194 +n07825399 +n07825496 +n07825597 +n07825717 +n07825850 +n07825972 +n07826091 +n07826250 +n07826340 +n07826453 +n07826544 +n07826653 +n07826930 +n07827130 +n07827284 +n07827410 +n07827554 +n07827750 +n07827896 +n07828041 +n07828156 +n07828275 +n07828378 +n07828642 +n07828987 +n07829248 +n07829331 +n07829412 +n07830493 +n07830593 +n07830690 +n07830841 +n07830986 +n07831146 +n07831267 +n07831450 +n07831663 +n07831821 +n07831955 +n07832099 +n07832202 +n07832307 +n07832416 +n07832592 +n07832741 +n07832902 +n07833333 +n07833535 +n07833672 +n07833816 +n07833951 +n07834065 +n07834160 +n07834286 +n07834507 +n07834618 +n07834774 +n07834872 +n07835051 +n07835173 +n07835331 +n07835457 +n07835547 +n07835701 +n07835823 +n07835921 +n07836077 +n07836269 +n07836456 +n07836600 +n07836731 +n07836838 +n07837002 +n07837110 +n07837234 +n07837362 +n07837545 +n07837630 +n07837755 +n07837912 +n07838073 +n07838233 +n07838441 +n07838551 +n07838659 +n07838811 +n07838905 +n07839055 +n07839172 +n07839312 +n07839478 +n07839593 +n07839730 +n07839864 +n07840027 +n07840124 +n07840219 +n07840304 +n07840395 +n07840520 +n07840672 +n07840804 +n07841037 +n07841345 +n07841495 +n07841639 +n07841800 +n07841907 +n07842044 +n07842130 +n07842202 +n07842308 +n07842433 +n07842605 +n07842753 +n07842972 +n07843117 +n07843220 +n07843348 +n07843464 +n07843636 +n07843775 +n07844042 +n07844604 +n07844786 +n07844867 +n07845087 +n07845166 +n07845335 +n07845421 +n07845495 +n07845571 +n07845702 +n07845775 +n07845863 +n07846014 +n07846143 +n07846274 +n07846359 +n07846471 +n07846557 +n07846688 +n07846802 +n07846938 +n07847047 +n07847198 +n07847453 +n07847585 +n07847706 +n07847827 +n07847917 +n07848093 +n07848196 +n07848338 +n07848771 +n07848936 +n07849026 +n07849186 +n07849336 +n07849506 +n07849619 +n07849733 +n07849912 +n07850083 +n07850219 +n07850329 +n07851054 +n07851298 +n07851443 +n07851554 +n07851641 +n07851767 +n07851926 +n07852045 +n07852229 +n07852302 +n07852376 +n07852452 +n07852532 +n07852614 +n07852712 +n07852833 +n07852919 +n07853125 +n07853232 +n07853345 +n07853445 +n07853560 +n07853648 +n07853762 +n07853852 +n07853946 +n07854066 +n07854184 +n07854266 +n07854348 +n07854455 +n07854614 +n07854707 +n07854813 +n07854982 +n07855105 +n07855188 +n07855317 +n07855413 +n07855510 +n07855603 +n07855721 +n07855812 +n07855907 +n07856045 +n07856186 +n07856270 +n07856756 +n07856895 +n07856992 +n07857076 +n07857170 +n07857356 +n07857598 +n07857731 +n07857959 +n07858114 +n07858197 +n07858336 +n07858484 +n07858595 +n07858841 +n07858978 +n07859142 +n07859284 +n07859583 +n07859796 +n07859951 +n07860103 +n07860208 +n07860331 +n07860447 +n07860548 +n07860629 +n07860805 +n07860988 +n07861158 +n07861247 +n07861334 +n07861557 +n07861681 +n07861813 +n07861983 +n07862095 +n07862244 +n07862348 +n07862461 +n07862611 +n07862770 +n07862946 +n07863107 +n07863229 +n07863374 +n07863547 +n07863644 +n07863802 +n07863935 +n07864065 +n07864198 +n07864317 +n07864475 +n07864638 +n07864756 +n07864934 +n07865105 +n07865196 +n07865484 +n07865575 +n07865700 +n07865788 +n07866015 +n07866151 +n07866277 +n07866409 +n07866571 +n07866723 +n07866868 +n07867021 +n07867164 +n07867324 +n07867421 +n07867616 +n07867751 +n07867883 +n07868045 +n07868200 +n07868340 +n07868508 +n07868684 +n07868830 +n07868955 +n07869111 +n07869291 +n07869391 +n07869522 +n07869611 +n07869775 +n07869937 +n07870069 +n07870167 +n07870313 +n07870478 +n07870620 +n07870734 +n07870894 +n07871065 +n07871234 +n07871335 +n07871436 +n07871588 +n07871720 +n07871810 +n07872593 +n07872748 +n07873057 +n07873198 +n07873348 +n07873464 +n07873679 +n07873807 +n07874063 +n07874159 +n07874259 +n07874343 +n07874441 +n07874531 +n07874674 +n07874780 +n07874995 +n07875086 +n07875152 +n07875267 +n07875436 +n07875560 +n07875693 +n07875835 +n07875926 +n07876026 +n07876189 +n07876281 +n07876460 +n07876550 +n07876651 +n07876775 +n07876893 +n07877187 +n07877299 +n07877675 +n07877849 +n07877961 +n07878145 +n07878283 +n07878479 +n07878647 +n07878785 +n07878926 +n07879072 +n07879174 +n07879350 +n07879450 +n07879560 +n07879659 +n07879821 +n07879953 +n07880080 +n07880213 +n07880325 +n07880458 +n07880751 +n07880880 +n07880968 +n07881117 +n07881205 +n07881404 +n07881525 +n07881625 +n07881800 +n07882420 +n07882497 +n07882886 +n07883031 +n07883156 +n07883251 +n07883384 +n07883510 +n07883661 +n07884567 +n07885705 +n07886057 +n07886176 +n07886317 +n07886463 +n07886572 +n07886849 +n07887099 +n07887192 +n07887304 +n07887461 +n07887634 +n07887967 +n07888058 +n07888229 +n07888378 +n07888465 +n07888816 +n07888909 +n07889193 +n07889274 +n07889510 +n07889814 +n07889990 +n07890068 +n07890226 +n07890352 +n07890540 +n07890617 +n07890750 +n07890890 +n07890970 +n07891095 +n07891189 +n07891309 +n07891433 +n07891726 +n07892418 +n07892512 +n07892813 +n07893253 +n07893425 +n07893528 +n07893642 +n07893792 +n07893891 +n07894102 +n07894298 +n07894451 +n07894551 +n07894703 +n07894799 +n07894965 +n07895100 +n07895237 +n07895435 +n07895595 +n07895710 +n07895839 +n07895962 +n07896060 +n07896165 +n07896287 +n07896422 +n07896560 +n07896661 +n07896765 +n07896893 +n07896994 +n07897116 +n07897200 +n07897438 +n07897600 +n07897750 +n07897865 +n07897975 +n07898117 +n07898247 +n07898333 +n07898443 +n07898617 +n07898745 +n07898895 +n07899003 +n07899108 +n07899292 +n07899434 +n07899533 +n07899660 +n07899769 +n07899899 +n07899976 +n07900225 +n07900406 +n07900616 +n07900734 +n07900825 +n07900958 +n07901355 +n07901457 +n07901587 +n07902121 +n07902336 +n07902443 +n07902520 +n07902698 +n07902799 +n07902937 +n07903101 +n07903208 +n07903543 +n07903643 +n07903731 +n07903841 +n07903962 +n07904072 +n07904293 +n07904395 +n07904637 +n07904760 +n07904865 +n07904934 +n07905038 +n07905296 +n07905386 +n07905474 +n07905618 +n07905770 +n07905979 +n07906111 +n07906284 +n07906572 +n07906718 +n07906877 +n07907037 +n07907161 +n07907342 +n07907429 +n07907548 +n07907831 +n07907943 +n07908411 +n07908567 +n07908647 +n07908812 +n07908923 +n07909129 +n07909231 +n07909362 +n07909504 +n07909593 +n07909714 +n07909811 +n07909954 +n07910048 +n07910152 +n07910245 +n07910379 +n07910538 +n07910656 +n07910799 +n07910970 +n07911061 +n07911249 +n07911371 +n07911677 +n07912093 +n07912211 +n07913180 +n07913300 +n07913393 +n07913537 +n07913644 +n07913774 +n07913882 +n07914006 +n07914128 +n07914271 +n07914413 +n07914586 +n07914686 +n07914777 +n07914887 +n07914995 +n07915094 +n07915213 +n07915366 +n07915491 +n07915618 +n07915800 +n07915918 +n07916041 +n07916183 +n07916319 +n07916437 +n07916582 +n07917133 +n07917272 +n07917392 +n07917507 +n07917618 +n07917791 +n07917874 +n07917951 +n07918028 +n07918193 +n07918309 +n07918706 +n07918879 +n07919165 +n07919310 +n07919441 +n07919572 +n07919665 +n07919787 +n07919894 +n07920052 +n07920222 +n07920349 +n07920540 +n07920663 +n07920872 +n07920989 +n07921090 +n07921239 +n07921360 +n07921455 +n07921615 +n07921834 +n07921948 +n07922041 +n07922147 +n07922512 +n07922607 +n07922764 +n07922955 +n07923748 +n07924033 +n07924276 +n07924366 +n07924443 +n07924560 +n07924655 +n07924747 +n07924834 +n07924955 +n07925116 +n07925229 +n07925327 +n07925423 +n07925500 +n07925608 +n07925708 +n07925808 +n07925966 +n07926250 +n07926346 +n07926442 +n07926540 +n07926785 +n07926920 +n07927070 +n07927197 +n07927512 +n07927716 +n07927836 +n07927931 +n07928163 +n07928264 +n07928367 +n07928488 +n07928578 +n07928696 +n07928790 +n07928887 +n07928998 +n07929172 +n07929351 +n07929519 +n07929940 +n07930062 +n07930205 +n07930315 +n07930433 +n07930554 +n07930864 +n07931001 +n07931096 +n07931280 +n07931452 +n07931612 +n07931733 +n07931870 +n07932039 +n07932323 +n07932454 +n07932614 +n07932762 +n07932841 +n07933154 +n07933274 +n07933530 +n07933652 +n07933799 +n07933891 +n07934032 +n07934152 +n07934282 +n07934373 +n07934530 +n07934678 +n07934800 +n07934908 +n07935043 +n07935152 +n07935288 +n07935379 +n07935504 +n07935737 +n07935878 +n07936015 +n07936093 +n07936263 +n07936459 +n07936548 +n07936745 +n07936979 +n07937069 +n07937344 +n07937461 +n07937621 +n07938007 +n07938149 +n07938313 +n07938594 +n07942152 +n07951464 +n07954211 +n07977870 +n08079613 +n08182379 +n08238463 +n08242223 +n08249459 +n08253141 +n08256735 +n08376250 +n08385989 +n08492354 +n08492461 +n08494231 +n08495908 +n08496334 +n08500819 +n08500989 +n08501887 +n08505018 +n08506347 +n08511017 +n08517010 +n08517676 +n08518171 +n08519299 +n08521623 +n08523340 +n08524735 +n08539072 +n08539276 +n08540532 +n08547468 +n08547544 +n08551296 +n08554440 +n08555333 +n08555710 +n08558770 +n08558963 +n08559155 +n08560295 +n08569482 +n08571275 +n08571642 +n08571898 +n08573674 +n08573842 +n08578517 +n08579266 +n08579352 +n08580944 +n08583292 +n08583455 +n08583554 +n08583682 +n08584914 +n08586978 +n08589670 +n08596076 +n08597579 +n08598301 +n08598568 +n08599174 +n08599292 +n08611339 +n08611421 +n08613733 +n08614632 +n08616050 +n08618831 +n08619112 +n08623676 +n08628141 +n08633683 +n08640531 +n08640739 +n08640962 +n08643267 +n08644045 +n08645104 +n08645212 +n08645318 +n08647264 +n08648917 +n08649711 +n08651104 +n08652376 +n08658309 +n08658918 +n08659242 +n08659331 +n08659446 +n08659861 +n08661878 +n08662427 +n08663051 +n08663703 +n08663860 +n08673039 +n08674344 +n08676253 +n08677424 +n08677801 +n08678783 +n08679167 +n08679269 +n08679562 +n08685188 +n08782627 +n08896327 +n09032191 +n09186592 +n09189157 +n09191635 +n09193551 +n09193705 +n09194227 +n09199101 +n09201998 +n09203827 +n09205509 +n09206896 +n09206985 +n09208496 +n09209025 +n09210862 +n09213434 +n09213565 +n09214060 +n09214269 +n09214916 +n09215023 +n09215437 +n09217230 +n09218315 +n09218494 +n09218641 +n09219233 +n09223487 +n09224725 +n09226869 +n09228055 +n09229709 +n09230041 +n09230202 +n09231117 +n09233446 +n09233603 +n09238926 +n09239302 +n09242389 +n09245515 +n09246464 +n09247410 +n09248153 +n09248399 +n09249034 +n09249155 +n09251407 +n09255070 +n09256479 +n09257843 +n09259025 +n09259219 +n09260907 +n09262690 +n09263912 +n09264803 +n09265620 +n09266604 +n09267854 +n09268007 +n09269341 +n09269472 +n09269882 +n09270160 +n09270657 +n09270735 +n09274152 +n09274305 +n09279986 +n09281252 +n09282208 +n09283193 +n09283405 +n09283514 +n09283767 +n09283866 +n09287415 +n09287968 +n09288635 +n09289331 +n09289596 +n09290350 +n09290444 +n09294877 +n09295210 +n09295946 +n09300306 +n09300905 +n09302616 +n09303008 +n09303528 +n09304750 +n09305031 +n09305898 +n09308572 +n09308743 +n09309046 +n09309168 +n09309292 +n09310616 +n09315159 +n09319604 +n09325824 +n09326662 +n09327077 +n09327538 +n09330378 +n09331251 +n09332890 +n09335693 +n09335809 +n09336555 +n09337048 +n09337253 +n09338013 +n09339810 +n09344198 +n09344324 +n09344724 +n09348460 +n09349648 +n09351905 +n09352849 +n09353815 +n09354511 +n09357346 +n09357447 +n09359803 +n09361517 +n09362316 +n09362945 +n09366017 +n09366317 +n09375606 +n09376198 +n09376526 +n09376786 +n09381242 +n09382099 +n09384106 +n09389867 +n09391386 +n09391644 +n09391774 +n09392402 +n09393524 +n09393605 +n09396465 +n09396608 +n09398076 +n09398677 +n09399592 +n09400584 +n09400987 +n09402944 +n09403086 +n09403211 +n09403427 +n09403734 +n09405078 +n09405787 +n09406793 +n09409512 +n09409752 +n09410224 +n09411189 +n09411295 +n09415584 +n09415671 +n09416076 +n09416890 +n09421031 +n09421799 +n09421951 +n09422190 +n09422631 +n09425019 +n09425344 +n09428293 +n09428628 +n09429630 +n09432283 +n09432990 +n09433312 +n09433442 +n09433839 +n09435739 +n09436444 +n09436708 +n09437454 +n09438844 +n09438940 +n09439032 +n09439213 +n09442595 +n09443281 +n09443641 +n09444783 +n09445008 +n09445289 +n09447666 +n09448690 +n09450163 +n09451237 +n09452291 +n09452395 +n09452760 +n09453008 +n09454153 +n09454412 +n09454744 +n09456207 +n09457979 +n09458269 +n09459979 +n09460046 +n09461069 +n09462600 +n09463226 +n09464486 +n09466678 +n09467696 +n09468604 +n09470027 +n09470222 +n09472413 +n09472597 +n09474010 +n09474412 +n09474765 +n09475044 +n09475179 +n09475925 +n09476123 +n09478210 +n09480959 +n09481120 +n09493983 +n09495962 +n09505153 +n09537660 +n09556121 +n09605110 +n09606009 +n09606527 +n09607630 +n09607782 +n09607903 +n09608709 +n09610255 +n09610405 +n09611722 +n09612700 +n09613118 +n09613191 +n09613690 +n09615336 +n09616573 +n09616922 +n09617161 +n09617435 +n09617577 +n09617696 +n09618760 +n09618880 +n09618957 +n09619168 +n09619452 +n09620078 +n09620794 +n09621232 +n09622049 +n09622302 +n09624168 +n09624559 +n09624899 +n09625401 +n09626238 +n09627807 +n09627906 +n09629065 +n09629246 +n09629752 +n09631129 +n09632274 +n09632518 +n09633969 +n09635534 +n09635635 +n09635973 +n09636339 +n09637339 +n09638454 +n09638875 +n09639382 +n09639919 +n09640327 +n09640715 +n09641002 +n09641578 +n09643799 +n09644152 +n09644657 +n09648743 +n09648911 +n09649067 +n09650729 +n09650839 +n09650989 +n09651123 +n09651968 +n09652149 +n09653144 +n09653438 +n09654079 +n09654518 +n09654898 +n09655213 +n09655466 +n09656077 +n09657206 +n09657748 +n09658254 +n09658398 +n09658815 +n09658921 +n09659039 +n09659188 +n09660010 +n09660240 +n09661873 +n09662038 +n09662661 +n09662951 +n09663248 +n09663786 +n09663999 +n09664556 +n09664908 +n09665367 +n09665545 +n09666349 +n09666476 +n09666883 +n09667358 +n09668199 +n09668437 +n09668562 +n09668988 +n09669631 +n09670280 +n09670521 +n09670909 +n09671089 +n09672590 +n09672725 +n09672840 +n09673091 +n09674412 +n09674786 +n09675045 +n09675673 +n09675799 +n09675922 +n09676021 +n09676247 +n09676884 +n09677427 +n09678747 +n09679028 +n09679170 +n09679925 +n09680908 +n09681107 +n09681234 +n09681973 +n09683180 +n09683757 +n09683924 +n09684082 +n09684901 +n09685233 +n09685806 +n09686262 +n09686401 +n09688233 +n09688804 +n09689435 +n09689958 +n09690083 +n09690208 +n09690496 +n09690621 +n09690864 +n09691604 +n09691729 +n09691858 +n09692125 +n09692915 +n09693244 +n09693982 +n09694664 +n09694771 +n09695019 +n09695132 +n09695514 +n09695620 +n09695979 +n09696456 +n09696585 +n09696763 +n09697401 +n09697986 +n09698644 +n09699020 +n09699642 +n09700125 +n09700964 +n09701148 +n09701833 +n09702134 +n09702673 +n09703101 +n09703344 +n09703485 +n09703708 +n09703809 +n09703932 +n09704057 +n09704157 +n09704283 +n09705003 +n09705124 +n09705671 +n09705784 +n09706029 +n09706255 +n09707061 +n09707289 +n09707735 +n09708750 +n09708889 +n09709531 +n09709673 +n09710041 +n09710164 +n09710886 +n09711132 +n09711435 +n09712324 +n09712448 +n09712696 +n09712967 +n09713108 +n09714120 +n09714694 +n09715165 +n09715303 +n09715427 +n09716047 +n09716933 +n09717233 +n09718217 +n09718811 +n09718936 +n09719309 +n09719794 +n09720033 +n09720256 +n09720595 +n09720702 +n09720842 +n09721244 +n09721444 +n09722064 +n09722658 +n09722817 +n09723067 +n09723819 +n09723944 +n09724234 +n09724533 +n09724656 +n09724785 +n09725000 +n09725229 +n09725546 +n09725653 +n09725772 +n09725935 +n09726621 +n09726811 +n09727440 +n09727826 +n09728137 +n09728285 +n09729062 +n09729156 +n09730077 +n09730204 +n09730824 +n09731343 +n09731436 +n09731571 +n09732170 +n09733459 +n09733793 +n09734185 +n09734450 +n09734535 +n09734639 +n09735258 +n09735654 +n09736485 +n09736798 +n09736945 +n09737050 +n09737161 +n09737453 +n09738121 +n09738400 +n09740724 +n09741074 +n09741331 +n09741722 +n09741816 +n09741904 +n09741999 +n09742101 +n09742315 +n09742927 +n09743487 +n09743601 +n09743792 +n09744161 +n09744346 +n09744462 +n09744679 +n09744834 +n09745229 +n09745324 +n09745834 +n09745933 +n09746936 +n09747191 +n09747495 +n09748101 +n09748408 +n09748648 +n09748889 +n09749386 +n09750282 +n09750641 +n09750770 +n09750891 +n09751076 +n09751496 +n09751622 +n09751895 +n09752023 +n09752519 +n09753348 +n09753792 +n09754152 +n09754217 +n09754633 +n09754907 +n09755086 +n09755241 +n09755555 +n09755788 +n09755893 +n09756049 +n09756195 +n09756961 +n09757449 +n09758173 +n09758885 +n09759501 +n09760290 +n09760609 +n09760913 +n09761068 +n09761753 +n09762011 +n09762385 +n09763272 +n09763784 +n09764201 +n09764598 +n09764732 +n09764900 +n09765118 +n09765278 +n09767197 +n09769076 +n09769525 +n09769929 +n09770179 +n09770359 +n09771435 +n09772330 +n09772746 +n09772930 +n09773962 +n09774167 +n09774783 +n09775907 +n09776346 +n09776642 +n09776807 +n09777870 +n09778266 +n09778537 +n09778783 +n09778927 +n09779124 +n09779280 +n09779461 +n09779790 +n09780395 +n09780828 +n09780984 +n09781398 +n09781504 +n09781650 +n09782167 +n09782397 +n09782855 +n09783537 +n09783776 +n09783884 +n09784043 +n09784160 +n09784564 +n09785236 +n09785659 +n09785891 +n09786115 +n09787534 +n09787765 +n09788073 +n09788237 +n09789150 +n09789566 +n09789898 +n09790047 +n09790482 +n09791014 +n09791419 +n09791816 +n09792125 +n09792555 +n09792969 +n09793141 +n09793352 +n09793946 +n09794550 +n09794668 +n09795010 +n09795124 +n09795334 +n09796809 +n09796974 +n09797742 +n09797873 +n09797998 +n09798096 +n09800469 +n09800964 +n09801102 +n09801275 +n09801533 +n09802445 +n09802641 +n09802951 +n09804230 +n09805151 +n09805324 +n09805475 +n09806944 +n09807075 +n09808080 +n09808591 +n09809279 +n09809538 +n09809749 +n09809925 +n09810166 +n09811568 +n09811712 +n09811852 +n09813219 +n09814252 +n09814381 +n09814488 +n09814567 +n09814660 +n09815455 +n09815790 +n09816654 +n09816771 +n09817174 +n09817386 +n09818022 +n09819477 +n09820044 +n09820263 +n09821831 +n09822830 +n09823153 +n09823287 +n09823502 +n09823832 +n09824135 +n09824609 +n09825096 +n09825750 +n09826204 +n09826605 +n09826821 +n09827246 +n09827363 +n09828216 +n09828403 +n09828988 +n09830194 +n09830400 +n09830629 +n09830759 +n09830926 +n09831962 +n09832456 +n09832633 +n09832978 +n09833111 +n09833275 +n09833441 +n09833536 +n09833751 +n09833997 +n09834258 +n09834378 +n09834699 +n09834885 +n09835017 +n09835153 +n09835230 +n09835348 +n09835506 +n09836160 +n09836343 +n09836519 +n09836786 +n09837459 +n09837720 +n09838295 +n09838370 +n09838621 +n09839702 +n09840217 +n09840435 +n09840520 +n09841188 +n09841515 +n09841696 +n09842047 +n09842288 +n09842395 +n09842528 +n09842823 +n09843443 +n09843602 +n09843716 +n09843824 +n09844457 +n09844898 +n09845401 +n09845849 +n09846142 +n09846469 +n09846586 +n09846755 +n09846894 +n09847267 +n09847344 +n09847543 +n09848110 +n09848489 +n09849167 +n09849990 +n09850760 +n09850974 +n09851165 +n09851575 +n09853541 +n09853645 +n09853881 +n09854218 +n09854421 +n09854915 +n09855433 +n09856401 +n09856671 +n09856827 +n09857007 +n09858165 +n09858299 +n09858733 +n09859152 +n09859285 +n09859684 +n09859975 +n09861287 +n09861599 +n09861863 +n09861946 +n09862183 +n09862621 +n09863031 +n09863339 +n09863749 +n09863936 +n09864632 +n09864968 +n09865068 +n09865162 +n09865398 +n09865672 +n09865744 +n09866115 +n09866354 +n09866559 +n09866661 +n09866817 +n09866922 +n09867069 +n09867154 +n09867311 +n09868270 +n09868782 +n09868899 +n09869317 +n09869447 +n09869578 +n09870096 +n09871095 +n09871229 +n09871681 +n09871867 +n09871952 +n09872066 +n09872557 +n09873348 +n09873473 +n09873769 +n09873899 +n09874428 +n09874725 +n09874862 +n09875025 +n09875979 +n09876701 +n09877288 +n09877587 +n09877750 +n09877951 +n09878921 +n09879552 +n09880189 +n09880741 +n09881265 +n09881358 +n09881895 +n09883047 +n09883452 +n09883807 +n09885059 +n09885866 +n09886403 +n09886540 +n09888635 +n09889065 +n09889170 +n09889691 +n09889941 +n09890192 +n09890749 +n09891730 +n09892262 +n09892513 +n09892693 +n09893191 +n09893344 +n09893502 +n09893600 +n09894143 +n09894445 +n09894654 +n09894909 +n09895222 +n09895480 +n09895561 +n09895701 +n09895902 +n09896170 +n09896311 +n09896401 +n09896685 +n09896826 +n09898020 +n09899289 +n09899671 +n09899782 +n09899929 +n09901337 +n09901502 +n09901642 +n09901786 +n09901921 +n09902128 +n09902353 +n09902731 +n09902851 +n09902954 +n09903153 +n09903501 +n09903639 +n09903936 +n09904208 +n09904837 +n09905050 +n09905185 +n09905530 +n09906293 +n09906449 +n09906704 +n09907804 +n09908769 +n09909660 +n09909929 +n09910222 +n09910374 +n09910556 +n09910840 +n09911226 +n09912431 +n09912681 +n09912907 +n09912995 +n09913329 +n09913455 +n09913593 +n09915434 +n09915651 +n09916348 +n09917214 +n09917345 +n09917481 +n09917593 +n09918248 +n09918554 +n09918867 +n09919061 +n09919200 +n09919451 +n09919899 +n09920106 +n09920283 +n09920901 +n09921034 +n09923003 +n09923186 +n09923418 +n09923561 +n09923673 +n09923996 +n09924106 +n09924195 +n09924313 +n09924437 +n09924996 +n09927089 +n09927451 +n09928136 +n09928451 +n09928845 +n09929202 +n09929298 +n09929577 +n09930257 +n09930628 +n09930876 +n09931165 +n09931418 +n09931640 +n09932098 +n09932336 +n09932508 +n09932788 +n09933020 +n09933098 +n09933842 +n09933972 +n09934337 +n09934488 +n09934774 +n09935107 +n09935434 +n09936825 +n09936892 +n09937056 +n09937688 +n09937802 +n09937903 +n09938080 +n09938449 +n09938991 +n09940725 +n09940818 +n09941089 +n09941571 +n09941787 +n09941964 +n09942697 +n09942970 +n09943239 +n09943811 +n09944022 +n09944160 +n09944430 +n09945021 +n09945223 +n09945319 +n09945603 +n09945745 +n09946814 +n09947127 +n09950457 +n09950728 +n09951070 +n09951274 +n09951524 +n09951616 +n09952163 +n09953052 +n09953350 +n09953615 +n09954355 +n09954639 +n09955406 +n09955944 +n09956578 +n09957523 +n09958133 +n09958292 +n09958447 +n09958569 +n09959142 +n09959658 +n09960688 +n09961198 +n09961331 +n09961469 +n09961605 +n09961739 +n09962966 +n09964202 +n09964411 +n09965515 +n09965787 +n09966470 +n09966554 +n09967063 +n09967406 +n09967555 +n09967816 +n09967967 +n09968259 +n09968652 +n09968741 +n09968845 +n09970088 +n09970192 +n09970402 +n09970822 +n09971273 +n09971385 +n09971839 +n09972010 +n09972458 +n09972587 +n09974648 +n09975425 +n09976024 +n09976283 +n09976429 +n09976728 +n09976917 +n09978442 +n09979321 +n09979913 +n09980458 +n09980805 +n09980985 +n09981092 +n09981278 +n09981540 +n09981939 +n09982152 +n09982525 +n09983314 +n09983572 +n09983889 +n09984960 +n09985470 +n09985809 +n09985978 +n09986450 +n09986700 +n09986904 +n09987045 +n09987161 +n09987239 +n09988063 +n09988311 +n09988493 +n09988703 +n09989502 +n09990415 +n09990690 +n09990777 +n09991740 +n09991867 +n09992538 +n09992837 +n09993252 +n09993651 +n09994400 +n09994673 +n09994808 +n09994878 +n09995829 +n09996039 +n09996304 +n09996481 +n09997622 +n09998788 +n09999135 +n10000294 +n10000459 +n10000787 +n10001217 +n10001481 +n10001764 +n10002257 +n10002760 +n10003476 +n10004718 +n10005006 +n10005934 +n10006177 +n10006748 +n10007684 +n10007809 +n10007995 +n10008123 +n10008254 +n10009162 +n10009276 +n10009484 +n10009671 +n10010062 +n10010243 +n10010632 +n10010767 +n10010864 +n10011360 +n10011486 +n10012484 +n10013811 +n10015215 +n10015485 +n10015792 +n10015897 +n10017272 +n10017422 +n10018747 +n10018861 +n10019072 +n10019187 +n10019406 +n10020366 +n10020533 +n10020670 +n10020807 +n10020890 +n10022908 +n10023264 +n10023506 +n10023656 +n10024025 +n10024362 +n10024937 +n10025060 +n10025295 +n10025391 +n10025635 +n10026976 +n10027246 +n10027590 +n10028402 +n10028541 +n10029068 +n10030277 +n10032987 +n10033412 +n10033572 +n10033663 +n10033888 +n10034201 +n10034614 +n10035952 +n10036266 +n10036444 +n10036692 +n10036929 +n10037080 +n10037385 +n10037588 +n10037922 +n10038119 +n10038409 +n10038620 +n10039271 +n10039946 +n10040240 +n10040698 +n10040945 +n10041373 +n10041887 +n10042690 +n10042845 +n10043024 +n10043491 +n10043643 +n10044682 +n10044879 +n10047199 +n10047459 +n10048117 +n10048367 +n10048612 +n10048836 +n10049363 +n10050043 +n10050880 +n10051026 +n10051761 +n10051861 +n10051975 +n10052694 +n10053439 +n10053808 +n10054657 +n10055297 +n10055410 +n10055566 +n10055730 +n10055847 +n10056103 +n10056611 +n10056719 +n10057271 +n10058411 +n10058962 +n10059067 +n10060075 +n10060175 +n10060352 +n10061043 +n10061195 +n10061431 +n10061882 +n10062042 +n10062176 +n10062275 +n10062492 +n10062594 +n10062716 +n10062905 +n10062996 +n10063635 +n10063919 +n10064831 +n10064977 +n10065758 +n10066206 +n10066314 +n10067011 +n10067305 +n10067600 +n10067968 +n10068234 +n10068425 +n10069296 +n10069981 +n10070108 +n10070377 +n10070449 +n10070563 +n10070711 +n10071332 +n10071557 +n10072054 +n10074249 +n10074578 +n10074735 +n10074841 +n10075299 +n10075693 +n10076224 +n10076483 +n10076604 +n10076957 +n10077106 +n10077593 +n10077879 +n10078131 +n10078719 +n10078806 +n10079399 +n10079893 +n10080117 +n10080508 +n10080869 +n10081204 +n10081842 +n10082043 +n10082299 +n10082423 +n10082562 +n10082687 +n10082997 +n10083677 +n10083823 +n10084043 +n10084295 +n10085101 +n10085869 +n10086383 +n10086744 +n10087434 +n10087736 +n10088200 +n10090745 +n10091349 +n10091450 +n10091564 +n10091651 +n10091861 +n10091997 +n10092488 +n10092643 +n10092794 +n10092978 +n10093167 +n10093475 +n10093818 +n10094320 +n10094584 +n10094782 +n10095265 +n10095420 +n10095769 +n10095869 +n10096126 +n10096508 +n10097262 +n10097477 +n10097590 +n10097842 +n10097995 +n10098245 +n10098388 +n10098517 +n10098624 +n10098710 +n10098862 +n10099002 +n10099375 +n10101308 +n10101634 +n10101981 +n10102800 +n10103155 +n10103228 +n10103921 +n10104064 +n10104487 +n10104756 +n10104888 +n10105085 +n10105733 +n10105906 +n10106387 +n10106509 +n10106995 +n10107173 +n10107303 +n10108018 +n10108089 +n10108464 +n10108832 +n10109443 +n10109662 +n10109826 +n10110093 +n10110731 +n10110893 +n10111358 +n10111779 +n10111903 +n10112129 +n10113249 +n10113583 +n10113869 +n10114476 +n10114550 +n10114662 +n10115430 +n10115946 +n10116370 +n10116478 +n10116702 +n10117017 +n10117267 +n10117415 +n10117739 +n10117851 +n10118301 +n10118743 +n10118844 +n10119609 +n10120330 +n10120671 +n10121026 +n10121246 +n10121714 +n10121800 +n10122300 +n10122531 +n10123122 +n10123844 +n10126177 +n10126424 +n10126708 +n10127186 +n10127689 +n10128519 +n10128748 +n10129338 +n10129825 +n10130686 +n10130877 +n10131151 +n10131268 +n10131590 +n10131815 +n10132035 +n10132502 +n10134178 +n10134396 +n10134760 +n10134982 +n10135129 +n10135197 +n10135297 +n10136615 +n10136959 +n10137825 +n10138369 +n10138472 +n10139077 +n10139651 +n10140051 +n10140597 +n10140683 +n10140783 +n10140929 +n10141364 +n10141732 +n10142166 +n10142391 +n10142537 +n10142747 +n10142946 +n10143172 +n10143595 +n10143725 +n10144338 +n10145239 +n10145340 +n10145480 +n10145590 +n10145774 +n10145902 +n10146002 +n10146104 +n10146416 +n10146816 +n10146927 +n10147121 +n10147262 +n10147710 +n10147935 +n10148035 +n10148305 +n10148825 +n10149436 +n10149867 +n10150071 +n10150794 +n10150940 +n10151133 +n10151261 +n10151367 +n10151570 +n10151760 +n10152306 +n10152616 +n10152763 +n10153155 +n10153414 +n10153594 +n10153865 +n10154013 +n10154186 +n10154601 +n10155222 +n10155600 +n10155849 +n10156629 +n10156831 +n10157016 +n10157128 +n10157271 +n10158506 +n10159045 +n10159289 +n10159533 +n10160188 +n10160280 +n10160412 +n10161622 +n10162016 +n10162194 +n10162354 +n10164025 +n10164233 +n10164492 +n10165448 +n10166189 +n10166394 +n10167152 +n10167361 +n10167565 +n10167838 +n10168012 +n10168183 +n10168584 +n10168837 +n10169147 +n10169241 +n10169419 +n10169796 +n10170060 +n10170681 +n10170866 +n10171219 +n10171456 +n10171567 +n10172080 +n10173410 +n10173579 +n10173665 +n10173771 +n10174253 +n10174330 +n10174445 +n10174589 +n10174695 +n10174971 +n10175248 +n10175725 +n10176913 +n10177150 +n10178077 +n10178216 +n10179069 +n10180580 +n10180791 +n10180923 +n10181445 +n10181547 +n10181799 +n10181878 +n10182190 +n10182402 +n10183347 +n10183931 +n10184505 +n10185148 +n10185483 +n10185793 +n10186068 +n10186143 +n10186216 +n10186350 +n10186686 +n10186774 +n10187130 +n10187491 +n10187990 +n10188715 +n10188856 +n10188957 +n10189278 +n10189597 +n10190122 +n10190516 +n10191001 +n10191388 +n10191613 +n10192839 +n10193650 +n10194231 +n10194775 +n10195056 +n10195155 +n10195261 +n10195593 +n10196404 +n10196725 +n10197392 +n10198437 +n10198832 +n10199251 +n10200246 +n10200781 +n10202225 +n10202624 +n10202763 +n10203949 +n10204177 +n10204833 +n10205231 +n10205344 +n10205457 +n10205714 +n10206173 +n10206506 +n10206629 +n10207077 +n10207169 +n10208189 +n10208847 +n10208950 +n10209082 +n10209731 +n10210137 +n10210512 +n10210648 +n10210911 +n10211036 +n10211666 +n10211830 +n10212231 +n10212501 +n10212780 +n10213034 +n10213429 +n10214062 +n10214390 +n10215623 +n10216106 +n10216403 +n10217208 +n10218043 +n10218164 +n10218292 +n10219240 +n10219453 +n10219879 +n10220080 +n10220924 +n10221312 +n10221520 +n10222170 +n10222259 +n10222497 +n10222716 +n10223069 +n10223177 +n10223606 +n10224578 +n10225219 +n10225931 +n10226413 +n10227166 +n10227266 +n10227393 +n10227490 +n10227698 +n10227793 +n10227985 +n10228278 +n10228468 +n10228592 +n10228712 +n10229883 +n10230216 +n10233248 +n10235024 +n10235269 +n10235385 +n10236304 +n10236521 +n10236842 +n10237069 +n10237196 +n10237464 +n10237556 +n10237676 +n10237799 +n10238272 +n10238375 +n10239928 +n10240082 +n10240235 +n10240417 +n10240821 +n10241024 +n10241300 +n10242328 +n10243137 +n10243273 +n10243483 +n10243664 +n10243872 +n10244108 +n10244359 +n10244913 +n10245029 +n10245341 +n10245507 +n10245639 +n10245863 +n10246317 +n10246395 +n10246703 +n10247358 +n10247880 +n10248008 +n10248198 +n10248377 +n10249191 +n10249270 +n10249459 +n10249869 +n10249950 +n10250712 +n10251329 +n10251612 +n10252075 +n10252222 +n10252354 +n10252547 +n10253122 +n10253296 +n10253479 +n10253611 +n10253703 +n10255459 +n10257221 +n10258602 +n10258786 +n10259348 +n10259780 +n10259997 +n10260473 +n10260706 +n10260800 +n10261211 +n10261511 +n10261624 +n10261862 +n10262343 +n10262445 +n10262561 +n10262655 +n10262880 +n10263146 +n10263411 +n10263790 +n10265281 +n10265801 +n10265891 +n10266016 +n10266328 +n10266848 +n10267166 +n10267311 +n10267865 +n10268629 +n10269199 +n10269289 +n10271677 +n10272782 +n10272913 +n10273064 +n10274173 +n10274318 +n10274815 +n10275249 +n10275395 +n10275848 +n10276045 +n10276477 +n10276942 +n10277027 +n10277638 +n10277815 +n10277912 +n10278456 +n10279018 +n10279778 +n10280034 +n10280130 +n10280598 +n10280674 +n10281546 +n10281770 +n10281896 +n10282482 +n10282672 +n10283170 +n10283366 +n10283546 +n10284064 +n10284871 +n10284965 +n10286282 +n10286539 +n10286749 +n10288964 +n10289039 +n10289176 +n10289462 +n10289766 +n10290422 +n10290541 +n10290813 +n10290919 +n10291110 +n10291469 +n10291822 +n10291942 +n10292316 +n10293332 +n10293590 +n10293861 +n10294020 +n10294139 +n10295371 +n10295479 +n10296176 +n10296444 +n10297234 +n10297367 +n10297531 +n10297841 +n10298202 +n10298271 +n10298647 +n10298912 +n10299125 +n10299250 +n10299700 +n10299875 +n10300041 +n10300154 +n10300303 +n10300500 +n10300654 +n10300829 +n10302576 +n10302700 +n10302905 +n10303037 +n10303814 +n10304086 +n10304650 +n10304914 +n10305635 +n10305802 +n10306004 +n10306279 +n10306496 +n10306595 +n10306890 +n10307114 +n10308066 +n10308168 +n10308275 +n10308504 +n10308653 +n10308732 +n10310783 +n10311506 +n10311661 +n10312287 +n10312491 +n10312600 +n10313000 +n10313239 +n10313441 +n10313724 +n10314054 +n10314182 +n10314517 +n10314836 +n10315217 +n10315456 +n10315561 +n10315730 +n10316360 +n10316527 +n10316862 +n10317007 +n10317500 +n10317963 +n10318293 +n10318607 +n10318686 +n10319313 +n10320484 +n10320863 +n10321126 +n10321340 +n10321632 +n10321882 +n10322238 +n10323634 +n10323752 +n10323999 +n10324560 +n10325549 +n10325774 +n10326776 +n10327143 +n10327987 +n10328123 +n10328328 +n10328437 +n10328696 +n10328941 +n10329035 +n10330593 +n10330931 +n10331098 +n10331167 +n10331258 +n10331347 +n10331841 +n10332110 +n10332385 +n10332861 +n10332953 +n10333044 +n10333165 +n10333317 +n10333439 +n10333601 +n10333838 +n10334009 +n10334461 +n10334782 +n10335246 +n10335801 +n10335931 +n10336411 +n10336904 +n10337488 +n10338231 +n10338391 +n10339179 +n10339251 +n10339717 +n10340312 +n10341243 +n10341343 +n10341446 +n10341573 +n10341955 +n10342180 +n10342367 +n10342543 +n10342893 +n10342992 +n10343088 +n10343355 +n10343449 +n10343554 +n10343869 +n10344121 +n10344203 +n10344319 +n10344656 +n10344774 +n10345015 +n10345100 +n10345302 +n10345422 +n10345659 +n10346015 +n10347204 +n10347446 +n10348526 +n10349243 +n10349750 +n10349836 +n10350220 +n10350774 +n10351064 +n10353016 +n10353355 +n10353928 +n10354265 +n10354754 +n10355142 +n10355306 +n10355449 +n10355688 +n10355806 +n10356450 +n10356877 +n10357012 +n10357613 +n10357737 +n10358032 +n10358124 +n10358575 +n10359117 +n10359422 +n10359546 +n10359659 +n10360366 +n10360747 +n10361060 +n10361194 +n10361296 +n10361525 +n10362003 +n10362319 +n10362557 +n10363445 +n10363573 +n10364198 +n10364502 +n10365514 +n10366145 +n10366276 +n10366966 +n10368291 +n10368528 +n10368624 +n10368711 +n10368798 +n10369095 +n10369317 +n10369417 +n10369528 +n10369699 +n10369955 +n10370381 +n10370955 +n10371052 +n10371221 +n10371330 +n10371450 +n10373390 +n10373525 +n10374541 +n10374849 +n10374943 +n10375052 +n10375314 +n10375402 +n10376523 +n10376890 +n10377021 +n10377185 +n10377291 +n10377542 +n10377633 +n10378026 +n10378113 +n10378780 +n10379376 +n10380126 +n10380499 +n10380672 +n10381804 +n10381981 +n10382157 +n10382302 +n10382480 +n10382710 +n10382825 +n10383094 +n10383237 +n10383505 +n10383816 +n10384214 +n10384392 +n10384496 +n10385566 +n10386196 +n10386754 +n10386874 +n10386984 +n10387196 +n10387324 +n10387836 +n10389865 +n10389976 +n10390600 +n10390698 +n10390807 +n10391416 +n10393909 +n10394434 +n10394786 +n10395073 +n10395209 +n10395390 +n10395828 +n10396106 +n10396337 +n10396727 +n10396908 +n10397001 +n10397142 +n10397392 +n10399130 +n10400003 +n10400108 +n10400205 +n10400437 +n10400618 +n10400998 +n10401204 +n10401331 +n10401639 +n10402709 +n10402824 +n10403633 +n10403876 +n10404426 +n10404998 +n10405540 +n10405694 +n10406266 +n10406391 +n10406765 +n10407310 +n10407954 +n10408809 +n10409459 +n10409752 +n10410246 +n10410996 +n10411356 +n10411551 +n10411867 +n10414239 +n10414768 +n10414865 +n10415037 +n10416567 +n10417288 +n10417424 +n10417551 +n10417682 +n10417843 +n10417969 +n10418101 +n10418735 +n10419047 +n10419472 +n10419630 +n10419785 +n10420031 +n10420277 +n10420507 +n10420649 +n10421016 +n10421470 +n10421956 +n10422405 +n10425946 +n10426454 +n10426630 +n10427223 +n10427359 +n10427764 +n10428004 +n10431122 +n10431625 +n10432189 +n10432441 +n10432875 +n10432957 +n10433077 +n10433452 +n10433610 +n10433737 +n10435169 +n10435251 +n10435716 +n10435988 +n10436334 +n10437014 +n10437137 +n10437262 +n10437698 +n10438172 +n10438619 +n10438842 +n10439373 +n10439523 +n10439727 +n10439851 +n10441037 +n10441124 +n10441694 +n10441962 +n10442093 +n10442232 +n10442417 +n10442573 +n10443032 +n10443659 +n10443830 +n10444194 +n10448322 +n10448455 +n10449664 +n10450038 +n10450161 +n10450303 +n10451450 +n10451590 +n10451858 +n10453184 +n10455619 +n10456070 +n10456138 +n10456696 +n10457214 +n10457444 +n10457903 +n10458111 +n10458356 +n10458596 +n10459882 +n10460033 +n10461060 +n10462588 +n10462751 +n10462860 +n10464052 +n10464542 +n10464711 +n10464870 +n10465002 +n10465451 +n10465831 +n10466198 +n10466564 +n10466918 +n10467179 +n10467395 +n10468750 +n10469611 +n10469874 +n10470779 +n10471640 +n10471732 +n10471859 +n10472129 +n10472447 +n10473453 +n10473562 +n10473789 +n10473917 +n10474064 +n10474343 +n10474446 +n10474645 +n10475835 +n10475940 +n10476467 +n10477713 +n10477955 +n10478118 +n10478293 +n10478462 +n10478827 +n10478960 +n10479135 +n10479328 +n10481167 +n10481268 +n10482054 +n10482220 +n10482587 +n10482921 +n10483138 +n10483395 +n10483799 +n10483890 +n10484858 +n10485298 +n10485883 +n10486166 +n10486236 +n10486561 +n10487182 +n10487363 +n10487592 +n10488016 +n10488309 +n10488656 +n10489426 +n10490421 +n10491998 +n10492086 +n10492727 +n10493199 +n10493419 +n10493685 +n10493835 +n10493922 +n10494195 +n10494373 +n10495167 +n10495421 +n10495555 +n10495756 +n10496393 +n10496489 +n10497135 +n10497534 +n10497645 +n10498046 +n10498699 +n10498816 +n10498986 +n10499110 +n10499232 +n10499355 +n10499631 +n10499857 +n10500217 +n10500419 +n10500603 +n10500824 +n10500942 +n10501453 +n10501635 +n10502046 +n10502329 +n10502950 +n10503818 +n10504090 +n10504206 +n10505347 +n10505613 +n10505732 +n10505942 +n10506336 +n10506544 +n10506915 +n10507070 +n10507380 +n10507482 +n10507565 +n10507692 +n10508141 +n10508379 +n10508710 +n10509063 +n10509161 +n10509810 +n10510245 +n10510974 +n10511771 +n10512201 +n10512372 +n10512708 +n10512859 +n10513509 +n10513823 +n10513938 +n10514051 +n10514121 +n10514255 +n10514429 +n10514784 +n10515863 +n10516527 +n10517137 +n10517283 +n10518349 +n10519126 +n10519494 +n10519984 +n10520286 +n10520544 +n10520964 +n10521100 +n10521662 +n10521853 +n10522035 +n10522324 +n10522759 +n10523341 +n10524076 +n10524223 +n10524869 +n10525134 +n10525436 +n10525617 +n10525878 +n10526534 +n10527147 +n10527334 +n10528023 +n10528148 +n10528493 +n10529231 +n10530150 +n10530383 +n10530571 +n10530959 +n10531109 +n10531445 +n10531838 +n10533874 +n10533983 +n10536134 +n10536274 +n10536416 +n10537708 +n10537906 +n10538629 +n10538733 +n10538853 +n10539015 +n10539160 +n10539278 +n10540114 +n10540252 +n10540656 +n10541833 +n10542608 +n10542761 +n10542888 +n10543161 +n10543937 +n10544232 +n10544748 +n10545792 +n10546428 +n10546633 +n10548419 +n10548537 +n10548681 +n10549510 +n10550252 +n10550369 +n10550468 +n10551576 +n10552393 +n10553140 +n10553235 +n10554024 +n10554141 +n10554846 +n10555059 +n10555430 +n10556033 +n10556518 +n10556704 +n10556825 +n10557246 +n10557854 +n10559009 +n10559288 +n10559508 +n10559683 +n10559996 +n10560106 +n10560637 +n10561222 +n10561320 +n10561736 +n10562135 +n10562283 +n10562509 +n10562968 +n10563314 +n10563403 +n10563711 +n10564098 +n10565502 +n10565667 +n10566072 +n10567613 +n10567722 +n10567848 +n10568200 +n10568358 +n10568443 +n10568608 +n10568915 +n10569011 +n10569179 +n10570019 +n10570704 +n10571907 +n10572706 +n10572889 +n10573957 +n10574311 +n10574538 +n10574840 +n10575463 +n10575594 +n10575787 +n10576223 +n10576316 +n10576676 +n10576818 +n10576962 +n10577182 +n10577284 +n10577710 +n10577820 +n10578021 +n10578162 +n10578471 +n10578656 +n10579062 +n10579549 +n10580030 +n10580437 +n10580535 +n10581648 +n10581890 +n10582604 +n10582746 +n10583387 +n10583790 +n10585077 +n10585217 +n10585628 +n10586166 +n10586265 +n10586444 +n10586903 +n10586998 +n10588074 +n10588357 +n10588724 +n10588965 +n10589666 +n10590146 +n10590239 +n10590452 +n10590903 +n10591072 +n10591811 +n10592049 +n10592811 +n10593521 +n10594147 +n10594523 +n10594857 +n10595164 +n10595647 +n10596517 +n10596899 +n10597505 +n10597745 +n10597889 +n10598013 +n10598181 +n10598459 +n10598904 +n10599215 +n10599806 +n10601234 +n10601362 +n10602119 +n10602470 +n10602985 +n10603528 +n10603851 +n10604275 +n10604380 +n10604634 +n10604880 +n10604979 +n10605253 +n10605737 +n10607291 +n10607478 +n10609092 +n10609198 +n10610465 +n10610850 +n10611267 +n10611613 +n10612210 +n10612373 +n10612518 +n10613996 +n10614507 +n10614629 +n10615179 +n10615334 +n10616578 +n10617024 +n10617193 +n10617397 +n10618234 +n10618342 +n10618465 +n10618685 +n10618848 +n10619492 +n10619642 +n10619888 +n10620212 +n10620586 +n10620758 +n10621294 +n10621400 +n10621514 +n10622053 +n10624074 +n10624310 +n10624437 +n10624540 +n10625860 +n10626630 +n10627252 +n10628097 +n10628644 +n10629329 +n10629647 +n10629939 +n10630093 +n10630188 +n10631131 +n10631309 +n10631654 +n10632576 +n10633298 +n10633450 +n10634464 +n10634849 +n10634990 +n10635788 +n10636488 +n10637483 +n10638922 +n10639238 +n10639359 +n10639637 +n10639817 +n10641223 +n10642596 +n10642705 +n10643095 +n10643837 +n10643937 +n10644598 +n10645017 +n10645223 +n10646032 +n10646140 +n10646433 +n10646641 +n10646780 +n10646942 +n10647745 +n10648237 +n10648696 +n10649197 +n10649308 +n10650162 +n10652605 +n10652703 +n10654015 +n10654211 +n10654321 +n10654827 +n10654932 +n10655169 +n10655442 +n10655594 +n10655730 +n10655986 +n10656120 +n10656223 +n10656969 +n10657306 +n10657556 +n10657835 +n10658304 +n10659042 +n10659762 +n10660128 +n10660621 +n10660883 +n10661002 +n10661216 +n10661563 +n10661732 +n10663315 +n10663549 +n10665302 +n10665587 +n10665698 +n10666752 +n10667477 +n10667709 +n10667863 +n10668450 +n10668666 +n10669991 +n10671042 +n10671613 +n10671736 +n10671898 +n10672371 +n10672540 +n10672662 +n10673296 +n10673776 +n10674130 +n10674713 +n10675010 +n10675142 +n10675609 +n10676018 +n10676434 +n10676569 +n10678937 +n10679174 +n10679503 +n10679610 +n10679723 +n10680609 +n10680796 +n10681194 +n10681557 +n10682713 +n10682953 +n10683675 +n10684146 +n10684630 +n10684827 +n10685398 +n10686073 +n10686517 +n10686694 +n10686885 +n10688356 +n10688811 +n10689306 +n10690268 +n10690421 +n10690648 +n10691318 +n10691937 +n10692090 +n10692482 +n10692883 +n10693235 +n10693334 +n10693824 +n10694258 +n10694939 +n10695450 +n10696101 +n10696508 +n10697135 +n10697282 +n10698368 +n10699558 +n10699752 +n10699981 +n10700105 +n10700201 +n10700640 +n10700963 +n10701180 +n10701644 +n10701962 +n10702167 +n10702615 +n10703221 +n10703336 +n10703480 +n10703692 +n10704238 +n10704712 +n10704886 +n10705448 +n10705615 +n10706812 +n10707134 +n10707233 +n10707707 +n10708292 +n10708454 +n10709529 +n10710171 +n10710259 +n10710778 +n10710913 +n10711483 +n10711766 +n10712229 +n10712374 +n10712474 +n10712690 +n10712835 +n10713254 +n10713686 +n10713843 +n10714195 +n10715030 +n10715347 +n10715789 +n10716576 +n10716864 +n10717055 +n10717196 +n10717337 +n10718131 +n10718349 +n10718509 +n10718665 +n10718952 +n10719036 +n10719132 +n10719267 +n10719807 +n10720197 +n10720453 +n10720964 +n10721124 +n10721321 +n10721612 +n10721708 +n10721819 +n10722029 +n10722575 +n10722965 +n10723230 +n10723597 +n10724132 +n10724372 +n10724570 +n10725280 +n10726031 +n10726786 +n10727016 +n10727171 +n10727458 +n10728117 +n10728233 +n10728624 +n10728998 +n10729330 +n10730542 +n10730728 +n10731013 +n10731732 +n10732010 +n10732521 +n10732854 +n10732967 +n10733820 +n10734394 +n10734741 +n10734891 +n10734963 +n10735173 +n10735298 +n10735984 +n10737103 +n10737264 +n10738111 +n10738215 +n10738670 +n10738871 +n10739135 +n10739297 +n10739391 +n10740594 +n10740732 +n10740868 +n10741152 +n10741367 +n10741493 +n10742005 +n10742111 +n10742546 +n10742997 +n10743124 +n10743356 +n10744078 +n10744164 +n10745006 +n10745770 +n10746931 +n10747119 +n10747424 +n10747548 +n10747965 +n10748142 +n10748506 +n10748620 +n10749928 +n10750031 +n10750188 +n10750640 +n10751026 +n10751152 +n10751265 +n10751710 +n10752480 +n10753061 +n10753182 +n10753339 +n10753442 +n10753989 +n10754189 +n10754281 +n10754449 +n10755080 +n10755164 +n10755394 +n10755648 +n10756061 +n10756148 +n10756261 +n10756641 +n10756837 +n10757050 +n10757492 +n10758337 +n10758445 +n10758949 +n10759151 +n10759331 +n10759982 +n10760199 +n10760622 +n10760951 +n10761190 +n10761326 +n10761519 +n10762212 +n10762480 +n10763075 +n10763245 +n10763383 +n10763620 +n10764465 +n10764622 +n10764719 +n10765305 +n10765587 +n10765679 +n10765885 +n10766260 +n10768148 +n10768272 +n10768903 +n10769084 +n10769188 +n10769321 +n10769459 +n10771066 +n10772092 +n10772580 +n10772937 +n10773665 +n10773800 +n10774329 +n10774756 +n10775003 +n10775128 +n10776052 +n10776339 +n10776887 +n10777299 +n10778044 +n10778148 +n10778711 +n10778999 +n10779610 +n10779897 +n10779995 +n10780284 +n10780632 +n10781236 +n10781817 +n10782362 +n10782471 +n10782791 +n10782940 +n10783240 +n10783539 +n10783646 +n10783734 +n10784113 +n10784544 +n10784922 +n10785480 +n10787470 +n10788852 +n10789415 +n10789709 +n10791115 +n10791221 +n10791820 +n10791890 +n10792335 +n10792506 +n10792856 +n10793570 +n10793799 +n10794014 +n10801561 +n10801802 +n10802507 +n10802621 +n10802953 +n10803031 +n10803282 +n10803978 +n10804287 +n10804636 +n10804732 +n10805501 +n10806113 +n10994097 +n11100798 +n11196627 +n11242849 +n11318824 +n11346873 +n11448153 +n11487732 +n11508382 +n11511327 +n11524451 +n11530008 +n11531193 +n11531334 +n11532682 +n11533212 +n11533999 +n11536567 +n11536673 +n11537327 +n11539289 +n11542137 +n11542640 +n11544015 +n11545350 +n11545524 +n11545714 +n11547562 +n11547855 +n11548728 +n11548870 +n11549009 +n11549245 +n11549779 +n11549895 +n11552133 +n11552386 +n11552594 +n11552806 +n11552976 +n11553240 +n11553522 +n11596108 +n11597657 +n11598287 +n11598686 +n11598886 +n11599324 +n11600372 +n11601177 +n11601333 +n11601918 +n11602091 +n11602478 +n11602873 +n11603246 +n11603462 +n11603835 +n11604046 +n11608250 +n11609475 +n11609684 +n11609862 +n11610047 +n11610215 +n11610437 +n11610602 +n11610823 +n11611087 +n11611233 +n11611356 +n11611561 +n11611758 +n11612018 +n11612235 +n11612349 +n11612575 +n11612923 +n11613219 +n11613459 +n11613692 +n11613867 +n11614039 +n11614250 +n11614420 +n11614713 +n11615026 +n11615259 +n11615387 +n11615607 +n11615812 +n11615967 +n11616260 +n11616486 +n11616662 +n11616852 +n11617090 +n11617272 +n11617631 +n11617878 +n11618079 +n11618290 +n11618525 +n11618861 +n11619227 +n11619455 +n11619687 +n11619845 +n11620016 +n11620389 +n11620673 +n11621029 +n11621281 +n11621547 +n11621727 +n11621950 +n11622184 +n11622368 +n11622591 +n11622771 +n11623105 +n11623815 +n11623967 +n11624192 +n11624531 +n11625003 +n11625223 +n11625391 +n11625632 +n11625804 +n11626010 +n11626152 +n11626409 +n11626585 +n11626826 +n11627168 +n11627512 +n11627714 +n11627908 +n11628087 +n11628456 +n11628793 +n11629047 +n11629354 +n11630017 +n11630489 +n11631159 +n11631405 +n11631619 +n11631854 +n11631985 +n11632167 +n11632376 +n11632619 +n11632929 +n11633284 +n11634736 +n11635152 +n11635433 +n11635830 +n11636204 +n11636835 +n11639084 +n11639306 +n11639445 +n11640132 +n11643835 +n11644046 +n11644226 +n11644462 +n11644872 +n11645163 +n11645590 +n11645914 +n11646167 +n11646344 +n11646517 +n11646694 +n11646955 +n11647306 +n11647703 +n11647868 +n11648039 +n11648268 +n11648776 +n11649150 +n11649359 +n11649878 +n11650160 +n11650307 +n11650430 +n11650558 +n11650759 +n11652039 +n11652217 +n11652376 +n11652578 +n11652753 +n11652966 +n11653126 +n11653570 +n11653904 +n11654293 +n11654438 +n11654984 +n11655152 +n11655592 +n11655974 +n11656123 +n11656549 +n11656771 +n11657585 +n11658331 +n11658544 +n11658709 +n11659248 +n11659627 +n11660300 +n11661372 +n11661909 +n11662128 +n11662371 +n11662585 +n11662937 +n11663263 +n11664418 +n11665372 +n11666854 +n11668117 +n11669786 +n11669921 +n11672269 +n11672400 +n11674019 +n11674332 +n11675025 +n11675404 +n11675738 +n11676500 +n11676743 +n11676850 +n11677485 +n11677902 +n11678010 +n11678299 +n11678377 +n11679378 +n11680457 +n11680596 +n11682659 +n11683216 +n11683838 +n11684264 +n11684499 +n11684654 +n11685091 +n11685621 +n11686195 +n11686652 +n11686780 +n11686912 +n11687071 +n11687432 +n11687789 +n11687964 +n11688069 +n11688378 +n11689197 +n11689367 +n11689483 +n11689678 +n11689815 +n11689957 +n11690088 +n11690254 +n11690455 +n11691046 +n11691857 +n11692265 +n11692792 +n11693981 +n11694300 +n11694469 +n11694664 +n11694866 +n11695085 +n11695285 +n11695599 +n11695974 +n11696450 +n11696935 +n11697560 +n11697802 +n11698042 +n11698245 +n11699442 +n11699751 +n11700058 +n11700279 +n11700864 +n11701066 +n11701302 +n11702713 +n11703669 +n11704093 +n11704620 +n11704791 +n11705171 +n11705387 +n11705573 +n11705776 +n11706325 +n11706761 +n11706942 +n11707229 +n11707827 +n11708658 +n11708857 +n11709045 +n11709205 +n11709674 +n11710136 +n11710393 +n11710658 +n11710827 +n11710987 +n11711289 +n11711537 +n11711764 +n11711971 +n11712282 +n11713164 +n11713370 +n11713763 +n11714382 +n11715430 +n11715678 +n11716698 +n11717399 +n11717577 +n11718296 +n11718681 +n11719286 +n11720353 +n11720643 +n11720891 +n11721337 +n11721642 +n11722036 +n11722342 +n11722466 +n11722621 +n11722982 +n11723227 +n11723452 +n11723770 +n11723986 +n11724109 +n11724660 +n11725015 +n11725311 +n11725480 +n11725623 +n11725821 +n11725973 +n11726145 +n11726269 +n11726433 +n11726707 +n11727091 +n11727358 +n11727540 +n11727738 +n11728099 +n11728769 +n11728945 +n11729142 +n11729478 +n11729860 +n11730015 +n11730458 +n11730602 +n11730750 +n11730933 +n11731157 +n11731659 +n11732052 +n11732567 +n11733054 +n11733312 +n11733548 +n11734493 +n11734698 +n11735053 +n11735570 +n11735977 +n11736362 +n11736694 +n11736851 +n11737009 +n11737125 +n11737534 +n11738547 +n11738997 +n11739365 +n11739978 +n11740414 +n11741175 +n11741350 +n11741575 +n11741797 +n11742310 +n11742878 +n11744011 +n11744108 +n11744471 +n11745817 +n11746600 +n11747468 +n11748002 +n11748811 +n11749112 +n11749603 +n11750173 +n11750508 +n11750989 +n11751765 +n11751974 +n11752578 +n11752798 +n11752937 +n11753143 +n11753355 +n11753562 +n11753700 +n11754893 +n11756092 +n11756329 +n11756669 +n11756870 +n11757017 +n11757190 +n11757653 +n11757851 +n11758122 +n11758276 +n11758483 +n11758799 +n11759224 +n11759404 +n11759609 +n11759853 +n11760785 +n11761202 +n11761650 +n11761836 +n11762018 +n11762433 +n11762927 +n11763142 +n11763625 +n11763874 +n11764478 +n11764814 +n11765568 +n11766046 +n11766189 +n11766432 +n11767354 +n11767877 +n11768816 +n11769176 +n11769621 +n11769803 +n11770256 +n11771147 +n11771539 +n11771746 +n11771924 +n11772408 +n11772879 +n11773408 +n11773628 +n11773987 +n11774513 +n11774972 +n11775340 +n11775626 +n11776234 +n11777080 +n11778092 +n11778257 +n11779300 +n11780148 +n11780424 +n11781176 +n11782036 +n11782266 +n11782761 +n11782878 +n11783162 +n11783920 +n11784126 +n11784497 +n11785276 +n11785668 +n11785875 +n11786131 +n11786539 +n11786843 +n11787190 +n11788039 +n11788727 +n11789066 +n11789438 +n11789589 +n11789962 +n11790089 +n11790788 +n11790936 +n11791341 +n11791569 +n11792029 +n11792341 +n11792742 +n11793403 +n11793779 +n11794024 +n11794139 +n11794519 +n11795049 +n11795216 +n11795580 +n11796005 +n11796188 +n11797321 +n11797508 +n11797981 +n11798270 +n11798496 +n11798688 +n11798978 +n11799331 +n11799732 +n11800236 +n11800565 +n11801392 +n11801665 +n11801891 +n11802410 +n11802586 +n11802800 +n11802995 +n11805255 +n11805544 +n11805956 +n11806219 +n11806369 +n11806521 +n11806679 +n11806814 +n11807108 +n11807525 +n11807696 +n11807979 +n11808299 +n11808468 +n11808721 +n11808932 +n11809094 +n11809271 +n11809437 +n11809594 +n11809754 +n11810030 +n11810358 +n11811059 +n11811473 +n11811706 +n11811921 +n11812094 +n11812910 +n11813077 +n11814584 +n11814996 +n11815491 +n11815721 +n11815918 +n11816121 +n11816336 +n11816649 +n11816829 +n11817160 +n11817501 +n11817914 +n11818069 +n11818636 +n11819509 +n11819912 +n11820965 +n11821184 +n11822300 +n11823043 +n11823305 +n11823436 +n11823756 +n11824146 +n11824344 +n11824747 +n11825351 +n11825749 +n11826198 +n11826569 +n11827541 +n11828577 +n11828973 +n11829205 +n11829672 +n11829922 +n11830045 +n11830252 +n11830400 +n11830714 +n11830906 +n11831100 +n11831297 +n11831521 +n11832214 +n11832480 +n11832671 +n11832899 +n11833373 +n11833749 +n11834272 +n11834654 +n11834890 +n11835251 +n11836327 +n11836722 +n11837204 +n11837351 +n11837562 +n11837743 +n11837970 +n11838413 +n11838916 +n11839460 +n11839568 +n11839823 +n11840067 +n11840246 +n11840476 +n11840764 +n11841247 +n11843441 +n11844371 +n11844892 +n11845557 +n11845793 +n11845913 +n11846312 +n11846425 +n11846765 +n11847169 +n11848479 +n11848867 +n11849271 +n11849467 +n11849871 +n11849983 +n11850521 +n11850918 +n11851258 +n11851578 +n11851839 +n11852028 +n11852148 +n11852531 +n11853079 +n11853356 +n11853813 +n11854479 +n11855274 +n11855435 +n11855553 +n11855842 +n11856573 +n11857696 +n11857875 +n11858077 +n11858703 +n11858814 +n11859275 +n11859472 +n11859737 +n11860208 +n11860555 +n11861238 +n11861487 +n11861641 +n11861853 +n11862835 +n11863467 +n11863877 +n11865071 +n11865276 +n11865429 +n11865574 +n11865874 +n11866248 +n11866706 +n11867311 +n11868814 +n11869351 +n11869689 +n11870044 +n11870418 +n11870747 +n11871059 +n11871496 +n11871748 +n11872146 +n11872324 +n11872658 +n11873182 +n11873612 +n11874081 +n11874423 +n11874878 +n11875523 +n11875691 +n11875938 +n11876204 +n11876432 +n11876634 +n11876803 +n11877193 +n11877283 +n11877473 +n11877646 +n11877860 +n11878101 +n11878283 +n11878633 +n11879054 +n11879722 +n11879895 +n11881189 +n11882074 +n11882237 +n11882426 +n11882636 +n11882821 +n11882972 +n11883328 +n11883628 +n11883945 +n11884384 +n11884967 +n11885856 +n11887119 +n11887310 +n11887476 +n11887750 +n11888061 +n11888424 +n11888800 +n11889205 +n11889619 +n11890022 +n11890150 +n11890884 +n11891175 +n11892029 +n11892181 +n11892637 +n11892817 +n11893640 +n11893916 +n11894327 +n11894558 +n11894770 +n11895092 +n11895472 +n11895714 +n11896141 +n11896722 +n11897116 +n11897466 +n11898639 +n11898775 +n11899223 +n11899762 +n11899921 +n11900569 +n11901294 +n11901452 +n11901597 +n11901759 +n11901977 +n11902200 +n11902389 +n11902709 +n11902982 +n11903333 +n11903671 +n11904109 +n11904274 +n11905392 +n11905749 +n11906127 +n11906514 +n11906917 +n11907100 +n11907405 +n11907689 +n11908549 +n11908846 +n11909864 +n11910271 +n11910460 +n11910666 +n11915214 +n11915658 +n11915899 +n11916467 +n11916696 +n11917407 +n11917835 +n11918286 +n11918473 +n11918808 +n11919447 +n11919761 +n11919975 +n11920133 +n11920498 +n11920663 +n11920998 +n11921395 +n11921792 +n11922661 +n11922755 +n11922839 +n11922926 +n11923174 +n11923397 +n11923637 +n11924014 +n11924445 +n11924849 +n11925303 +n11925450 +n11925898 +n11926365 +n11926833 +n11926976 +n11927215 +n11927740 +n11928352 +n11928858 +n11929743 +n11930038 +n11930203 +n11930353 +n11930571 +n11930788 +n11930994 +n11931135 +n11931540 +n11931918 +n11932745 +n11932927 +n11933099 +n11933257 +n11933387 +n11933546 +n11933728 +n11933903 +n11934041 +n11934239 +n11934463 +n11934616 +n11934807 +n11935027 +n11935187 +n11935330 +n11935469 +n11935627 +n11935715 +n11935794 +n11935877 +n11935953 +n11936027 +n11936113 +n11936199 +n11936287 +n11936369 +n11936448 +n11936539 +n11936624 +n11936707 +n11936782 +n11936864 +n11936946 +n11937023 +n11937102 +n11937195 +n11937278 +n11937360 +n11937446 +n11937692 +n11938556 +n11939180 +n11939491 +n11939699 +n11940006 +n11940349 +n11940599 +n11940750 +n11941094 +n11941478 +n11941924 +n11942659 +n11943133 +n11943407 +n11943660 +n11943992 +n11944196 +n11944751 +n11944954 +n11945367 +n11945514 +n11945783 +n11946051 +n11946313 +n11946727 +n11946918 +n11947251 +n11947629 +n11947802 +n11948044 +n11948264 +n11948469 +n11948864 +n11949015 +n11949402 +n11949857 +n11950345 +n11950686 +n11950877 +n11951052 +n11951511 +n11951820 +n11952346 +n11952541 +n11953038 +n11953339 +n11953610 +n11953884 +n11954161 +n11954345 +n11954484 +n11954642 +n11954798 +n11955040 +n11955153 +n11955532 +n11955896 +n11956348 +n11956850 +n11957317 +n11957514 +n11957678 +n11958080 +n11958499 +n11958888 +n11959259 +n11959632 +n11959862 +n11960245 +n11960673 +n11961100 +n11961446 +n11961871 +n11962272 +n11962667 +n11962994 +n11963572 +n11963932 +n11964446 +n11964848 +n11965218 +n11965627 +n11965962 +n11966083 +n11966215 +n11966385 +n11966617 +n11966896 +n11967142 +n11967315 +n11967744 +n11967878 +n11968519 +n11968704 +n11968931 +n11969166 +n11969607 +n11969806 +n11970101 +n11970298 +n11970586 +n11971248 +n11971406 +n11971783 +n11971927 +n11972291 +n11972759 +n11972959 +n11973341 +n11973634 +n11973749 +n11974373 +n11974557 +n11974888 +n11975254 +n11976170 +n11976314 +n11976511 +n11976933 +n11977303 +n11977660 +n11977887 +n11978233 +n11978551 +n11978713 +n11978961 +n11979187 +n11979354 +n11979527 +n11979715 +n11979964 +n11980318 +n11980682 +n11981192 +n11981475 +n11982115 +n11982545 +n11982939 +n11983375 +n11983606 +n11984144 +n11984542 +n11985053 +n11985321 +n11985739 +n11985903 +n11986511 +n11986729 +n11987126 +n11987349 +n11987511 +n11988132 +n11988596 +n11988893 +n11989087 +n11989393 +n11989869 +n11990167 +n11990313 +n11990627 +n11990920 +n11991263 +n11991549 +n11991777 +n11992479 +n11992806 +n11993203 +n11993444 +n11993675 +n11994150 +n11995092 +n11995396 +n11996251 +n11996677 +n11997032 +n11997160 +n11997969 +n11998492 +n11998888 +n11999278 +n11999656 +n12000191 +n12001294 +n12001707 +n12001924 +n12002428 +n12002651 +n12002826 +n12003167 +n12003696 +n12004120 +n12004547 +n12004987 +n12005656 +n12006306 +n12006766 +n12006930 +n12007196 +n12007406 +n12007766 +n12008252 +n12008487 +n12008749 +n12009047 +n12009420 +n12009792 +n12010628 +n12010815 +n12011370 +n12011620 +n12012111 +n12012253 +n12012510 +n12013035 +n12013511 +n12013701 +n12014085 +n12014355 +n12014923 +n12015221 +n12015525 +n12015959 +n12016434 +n12016567 +n12016777 +n12016914 +n12017127 +n12017326 +n12017511 +n12017664 +n12017853 +n12018014 +n12018100 +n12018188 +n12018271 +n12018363 +n12018447 +n12018530 +n12018760 +n12019035 +n12019827 +n12020184 +n12020507 +n12020736 +n12020941 +n12022054 +n12022382 +n12022821 +n12023108 +n12023407 +n12023726 +n12024176 +n12024445 +n12024690 +n12024805 +n12025220 +n12026018 +n12026476 +n12026981 +n12027222 +n12027658 +n12028424 +n12029039 +n12029635 +n12030092 +n12030654 +n12030908 +n12031139 +n12031388 +n12031547 +n12031927 +n12032429 +n12032686 +n12033139 +n12033504 +n12033709 +n12034141 +n12034384 +n12034594 +n12035631 +n12035907 +n12036067 +n12036226 +n12036939 +n12037499 +n12037691 +n12038038 +n12038208 +n12038406 +n12038585 +n12038760 +n12038898 +n12039317 +n12041446 +n12043444 +n12043673 +n12043836 +n12044041 +n12044467 +n12044784 +n12045157 +n12045514 +n12045860 +n12046028 +n12046428 +n12046815 +n12047345 +n12047884 +n12048056 +n12048399 +n12048928 +n12049282 +n12049562 +n12050533 +n12050959 +n12051103 +n12051514 +n12051792 +n12052267 +n12052447 +n12052787 +n12053405 +n12053690 +n12053962 +n12054195 +n12055073 +n12055516 +n12056099 +n12056217 +n12056601 +n12056758 +n12056990 +n12057211 +n12057447 +n12057660 +n12057895 +n12058192 +n12058630 +n12058822 +n12059314 +n12059625 +n12060546 +n12061104 +n12061380 +n12061614 +n12062105 +n12062468 +n12062626 +n12062781 +n12063211 +n12063639 +n12064389 +n12064591 +n12065316 +n12065649 +n12065777 +n12066018 +n12066261 +n12066451 +n12066630 +n12066821 +n12067029 +n12067193 +n12067433 +n12067672 +n12067817 +n12068138 +n12068432 +n12068615 +n12069009 +n12069217 +n12069679 +n12070016 +n12070381 +n12070583 +n12070712 +n12071259 +n12071477 +n12071744 +n12072210 +n12072722 +n12073217 +n12073554 +n12073991 +n12074408 +n12074867 +n12075010 +n12075151 +n12075299 +n12075830 +n12076223 +n12076577 +n12076852 +n12077244 +n12077944 +n12078172 +n12078451 +n12078747 +n12079120 +n12079523 +n12079963 +n12080395 +n12080588 +n12080820 +n12081215 +n12081649 +n12082131 +n12083113 +n12083591 +n12083847 +n12084158 +n12084400 +n12084555 +n12084890 +n12085267 +n12085664 +n12086012 +n12086192 +n12086539 +n12086778 +n12087961 +n12088223 +n12088327 +n12088495 +n12088909 +n12089320 +n12089496 +n12089846 +n12090890 +n12091213 +n12091377 +n12091550 +n12091697 +n12091953 +n12092262 +n12092417 +n12092629 +n12092930 +n12093329 +n12093600 +n12093885 +n12094244 +n12094401 +n12094612 +n12095020 +n12095281 +n12095412 +n12095543 +n12095647 +n12095934 +n12096089 +n12096395 +n12096563 +n12096674 +n12097396 +n12097556 +n12098403 +n12098524 +n12098827 +n12099342 +n12100187 +n12101870 +n12102133 +n12103680 +n12103894 +n12104104 +n12104238 +n12104501 +n12104734 +n12105125 +n12105353 +n12105828 +n12105981 +n12106134 +n12106323 +n12107002 +n12107191 +n12107710 +n12107970 +n12108432 +n12108613 +n12108871 +n12109365 +n12109827 +n12110085 +n12110236 +n12110352 +n12110475 +n12110778 +n12111238 +n12111627 +n12112008 +n12112337 +n12112609 +n12112918 +n12113195 +n12113323 +n12113657 +n12114010 +n12114590 +n12115180 +n12116058 +n12116429 +n12116734 +n12117017 +n12117235 +n12117326 +n12117695 +n12117912 +n12118414 +n12118661 +n12119099 +n12119238 +n12119390 +n12119539 +n12119717 +n12120347 +n12120578 +n12121033 +n12121187 +n12121610 +n12122442 +n12122725 +n12122918 +n12123648 +n12123741 +n12124172 +n12124627 +n12124818 +n12125001 +n12125183 +n12125584 +n12126084 +n12126360 +n12126736 +n12127460 +n12127575 +n12127768 +n12128071 +n12128306 +n12128490 +n12129134 +n12129738 +n12129986 +n12130549 +n12131405 +n12131550 +n12132092 +n12132956 +n12133151 +n12133462 +n12133682 +n12134025 +n12134486 +n12134695 +n12134836 +n12135049 +n12135576 +n12135729 +n12135898 +n12136392 +n12136581 +n12136720 +n12137120 +n12137569 +n12137791 +n12137954 +n12138110 +n12138248 +n12138444 +n12138578 +n12139196 +n12139575 +n12139793 +n12139921 +n12140511 +n12140759 +n12140903 +n12141167 +n12141385 +n12141495 +n12142085 +n12142357 +n12142450 +n12143065 +n12143215 +n12143405 +n12143676 +n12144313 +n12144580 +n12144987 +n12145148 +n12145477 +n12146311 +n12146488 +n12146654 +n12147226 +n12147835 +n12148757 +n12150722 +n12150969 +n12151170 +n12151615 +n12152031 +n12152251 +n12152532 +n12152722 +n12153033 +n12153224 +n12153580 +n12153741 +n12153914 +n12154114 +n12154773 +n12155009 +n12155583 +n12155773 +n12156679 +n12156819 +n12157056 +n12157179 +n12157769 +n12158031 +n12158443 +n12158798 +n12159055 +n12159388 +n12159555 +n12159804 +n12159942 +n12160125 +n12160303 +n12160490 +n12160857 +n12161056 +n12161285 +n12161577 +n12161744 +n12161969 +n12162181 +n12162425 +n12162758 +n12163035 +n12163279 +n12164363 +n12164656 +n12164881 +n12165170 +n12165384 +n12165758 +n12166128 +n12166424 +n12166793 +n12166929 +n12167075 +n12167436 +n12167602 +n12168565 +n12169099 +n12170585 +n12171098 +n12171316 +n12171966 +n12172364 +n12172481 +n12172906 +n12173069 +n12173664 +n12173912 +n12174311 +n12174521 +n12174926 +n12175181 +n12175370 +n12175598 +n12176453 +n12176709 +n12176953 +n12177129 +n12177455 +n12178129 +n12178780 +n12178896 +n12179122 +n12179632 +n12180168 +n12180456 +n12180885 +n12181352 +n12181612 +n12182049 +n12182276 +n12183026 +n12183452 +n12183816 +n12184095 +n12184468 +n12184912 +n12185254 +n12185859 +n12186352 +n12186554 +n12186839 +n12187247 +n12187663 +n12187891 +n12188289 +n12188635 +n12189429 +n12189779 +n12189987 +n12190410 +n12190869 +n12191240 +n12192132 +n12192877 +n12193334 +n12193665 +n12194147 +n12194613 +n12195391 +n12195533 +n12195734 +n12196129 +n12196336 +n12196527 +n12196694 +n12196954 +n12197359 +n12197601 +n12198286 +n12198793 +n12199266 +n12199399 +n12199790 +n12199982 +n12200143 +n12200504 +n12200905 +n12201331 +n12201580 +n12201938 +n12202936 +n12203529 +n12203699 +n12203896 +n12204032 +n12204175 +n12204730 +n12205460 +n12205694 +n12214789 +n12215022 +n12215210 +n12215579 +n12215824 +n12216215 +n12216628 +n12216968 +n12217453 +n12217851 +n12218274 +n12218490 +n12218868 +n12219668 +n12220019 +n12220496 +n12220829 +n12221191 +n12221368 +n12221522 +n12221801 +n12222090 +n12222493 +n12222900 +n12223160 +n12223569 +n12223764 +n12224978 +n12225222 +n12225349 +n12225563 +n12226932 +n12227658 +n12227909 +n12228229 +n12228387 +n12228689 +n12228886 +n12229111 +n12229651 +n12229887 +n12230540 +n12230794 +n12231192 +n12231709 +n12232114 +n12232280 +n12232851 +n12233249 +n12234318 +n12234669 +n12235051 +n12235479 +n12236160 +n12236546 +n12236768 +n12236977 +n12237152 +n12237486 +n12237641 +n12237855 +n12238756 +n12238913 +n12239240 +n12239647 +n12239880 +n12240150 +n12240477 +n12240965 +n12241192 +n12241426 +n12241880 +n12242123 +n12242409 +n12242850 +n12243109 +n12243693 +n12244153 +n12244458 +n12244650 +n12244819 +n12245319 +n12245695 +n12245885 +n12246037 +n12246232 +n12246773 +n12246941 +n12247202 +n12247407 +n12247963 +n12248141 +n12248359 +n12248574 +n12248780 +n12248941 +n12249122 +n12249294 +n12249542 +n12251001 +n12251278 +n12251740 +n12252168 +n12252383 +n12252866 +n12253229 +n12253487 +n12253664 +n12253835 +n12254168 +n12255225 +n12256112 +n12256325 +n12256522 +n12256708 +n12256920 +n12257570 +n12257725 +n12258101 +n12258885 +n12259316 +n12260799 +n12261359 +n12261571 +n12261808 +n12262018 +n12262185 +n12262553 +n12263038 +n12263204 +n12263410 +n12263588 +n12263738 +n12263987 +n12264512 +n12264786 +n12265083 +n12265394 +n12265600 +n12266217 +n12266528 +n12266644 +n12266796 +n12266984 +n12267133 +n12267265 +n12267411 +n12267534 +n12267677 +n12267931 +n12268246 +n12269241 +n12269406 +n12269652 +n12270027 +n12270278 +n12270460 +n12270741 +n12270946 +n12271187 +n12271451 +n12271643 +n12271933 +n12272239 +n12272432 +n12272735 +n12272883 +n12273114 +n12273344 +n12273515 +n12273768 +n12273939 +n12274151 +n12274358 +n12274630 +n12274863 +n12275131 +n12275317 +n12275489 +n12275675 +n12275888 +n12276110 +n12276314 +n12276477 +n12276628 +n12276872 +n12277150 +n12277334 +n12277578 +n12277800 +n12278107 +n12278371 +n12278650 +n12278865 +n12279060 +n12279293 +n12279458 +n12279772 +n12280060 +n12280364 +n12281241 +n12281788 +n12281974 +n12282235 +n12282527 +n12282737 +n12282933 +n12283147 +n12283395 +n12283542 +n12283790 +n12284262 +n12284821 +n12285049 +n12285195 +n12285369 +n12285512 +n12285705 +n12285900 +n12286068 +n12286197 +n12286826 +n12286988 +n12287195 +n12287642 +n12287836 +n12288005 +n12288823 +n12289310 +n12289433 +n12289585 +n12290748 +n12290975 +n12291143 +n12291459 +n12291671 +n12291959 +n12292463 +n12292877 +n12293723 +n12294124 +n12294331 +n12294542 +n12294723 +n12294871 +n12295033 +n12295237 +n12295429 +n12295796 +n12296045 +n12296432 +n12296735 +n12296929 +n12297110 +n12297280 +n12297507 +n12297846 +n12298165 +n12299640 +n12300840 +n12301180 +n12301445 +n12301613 +n12301766 +n12302071 +n12302248 +n12302565 +n12303083 +n12303462 +n12304115 +n12304286 +n12304420 +n12304703 +n12304899 +n12305089 +n12305293 +n12305475 +n12305654 +n12305819 +n12305986 +n12306089 +n12306270 +n12306717 +n12306938 +n12307076 +n12307240 +n12307756 +n12308112 +n12308447 +n12308907 +n12309277 +n12309630 +n12310021 +n12310349 +n12310638 +n12311045 +n12311224 +n12311413 +n12311579 +n12312110 +n12312728 +n12315060 +n12315245 +n12315598 +n12315999 +n12316444 +n12316572 +n12317296 +n12318378 +n12318782 +n12318965 +n12319204 +n12319414 +n12320010 +n12320414 +n12320627 +n12320806 +n12321077 +n12321395 +n12321669 +n12321873 +n12322099 +n12322501 +n12322699 +n12323665 +n12324056 +n12324222 +n12324388 +n12324558 +n12324906 +n12325234 +n12325787 +n12327022 +n12327528 +n12327846 +n12328398 +n12328567 +n12328801 +n12329260 +n12329473 +n12330239 +n12330469 +n12330587 +n12330891 +n12331066 +n12331263 +n12331655 +n12331788 +n12332030 +n12332218 +n12332555 +n12333053 +n12333530 +n12333771 +n12333961 +n12334153 +n12334293 +n12334891 +n12335483 +n12335664 +n12335800 +n12335937 +n12336092 +n12336224 +n12336333 +n12336586 +n12336727 +n12336973 +n12337131 +n12337246 +n12337391 +n12337617 +n12337800 +n12337922 +n12338034 +n12338146 +n12338258 +n12338454 +n12338655 +n12338796 +n12338979 +n12339526 +n12339831 +n12340383 +n12340581 +n12340755 +n12341542 +n12341931 +n12342299 +n12342498 +n12342852 +n12343480 +n12343753 +n12344283 +n12344483 +n12344700 +n12344837 +n12345280 +n12345899 +n12346578 +n12346813 +n12346986 +n12347158 +n12349315 +n12349711 +n12350032 +n12350758 +n12351091 +n12351790 +n12352287 +n12352639 +n12352844 +n12352990 +n12353203 +n12353431 +n12353754 +n12355760 +n12356023 +n12356395 +n12356960 +n12357485 +n12357968 +n12358293 +n12360108 +n12360534 +n12360684 +n12360817 +n12360958 +n12361135 +n12361560 +n12361754 +n12361946 +n12362274 +n12362514 +n12362668 +n12363301 +n12363768 +n12364604 +n12364940 +n12365158 +n12365285 +n12365462 +n12365900 +n12366053 +n12366186 +n12366313 +n12366675 +n12366870 +n12367611 +n12368028 +n12368257 +n12368451 +n12369066 +n12369309 +n12369476 +n12369665 +n12369845 +n12370174 +n12370549 +n12371202 +n12371439 +n12371704 +n12372233 +n12373100 +n12373739 +n12374418 +n12374705 +n12374862 +n12375769 +n12377198 +n12377494 +n12378249 +n12378753 +n12378963 +n12379531 +n12380761 +n12381511 +n12382233 +n12382875 +n12383737 +n12383894 +n12384037 +n12384227 +n12384375 +n12384569 +n12384680 +n12384839 +n12385429 +n12385566 +n12385830 +n12386945 +n12387103 +n12387633 +n12387839 +n12388143 +n12388293 +n12388858 +n12388989 +n12389130 +n12389501 +n12389727 +n12389932 +n12390099 +n12390314 +n12392070 +n12392549 +n12392765 +n12393269 +n12394118 +n12394328 +n12394638 +n12395068 +n12395289 +n12395463 +n12395906 +n12396091 +n12396924 +n12397431 +n12399132 +n12399384 +n12399534 +n12399656 +n12399899 +n12400489 +n12400720 +n12400924 +n12401335 +n12401684 +n12401893 +n12402051 +n12402348 +n12402596 +n12402840 +n12403075 +n12403276 +n12403513 +n12403994 +n12404729 +n12405714 +n12406304 +n12406488 +n12406715 +n12406902 +n12407079 +n12407222 +n12407396 +n12407545 +n12407715 +n12407890 +n12408077 +n12408280 +n12408466 +n12408717 +n12408873 +n12409231 +n12409470 +n12409651 +n12409840 +n12411461 +n12412355 +n12412606 +n12412987 +n12413165 +n12413301 +n12413419 +n12413642 +n12413880 +n12414035 +n12414159 +n12414329 +n12414449 +n12414818 +n12414932 +n12415595 +n12416073 +n12416423 +n12416703 +n12417836 +n12418221 +n12418507 +n12419037 +n12419878 +n12420124 +n12420535 +n12420722 +n12421137 +n12421467 +n12421683 +n12421917 +n12422129 +n12422559 +n12425281 +n12426623 +n12426749 +n12427184 +n12427391 +n12427566 +n12427757 +n12427946 +n12428076 +n12428242 +n12428412 +n12428747 +n12429352 +n12430198 +n12430471 +n12430675 +n12431434 +n12432069 +n12432356 +n12432574 +n12432707 +n12433081 +n12433178 +n12433769 +n12433952 +n12434106 +n12434483 +n12434634 +n12434775 +n12434985 +n12435152 +n12435486 +n12435649 +n12435777 +n12435965 +n12436090 +n12436907 +n12437513 +n12437769 +n12437930 +n12439154 +n12439830 +n12441183 +n12441390 +n12441552 +n12441958 +n12442548 +n12443323 +n12443736 +n12444095 +n12444898 +n12446200 +n12446519 +n12446737 +n12446908 +n12447121 +n12447346 +n12447581 +n12447891 +n12448136 +n12448361 +n12448700 +n12449296 +n12449526 +n12449784 +n12449934 +n12450344 +n12450607 +n12450840 +n12451070 +n12451240 +n12451399 +n12451566 +n12451915 +n12452256 +n12452480 +n12452673 +n12452836 +n12453018 +n12453186 +n12453714 +n12453857 +n12454159 +n12454436 +n12454556 +n12454705 +n12454793 +n12454949 +n12455950 +n12457091 +n12458550 +n12458713 +n12458874 +n12459629 +n12460146 +n12460697 +n12460957 +n12461109 +n12461466 +n12461673 +n12462032 +n12462221 +n12462582 +n12462805 +n12463134 +n12463743 +n12463975 +n12464128 +n12464476 +n12464649 +n12465557 +n12466727 +n12467018 +n12467197 +n12467433 +n12467592 +n12468545 +n12468719 +n12469517 +n12470092 +n12470512 +n12470907 +n12472024 +n12473608 +n12473840 +n12474167 +n12474418 +n12475035 +n12475242 +n12475774 +n12476510 +n12477163 +n12477401 +n12477583 +n12477747 +n12477983 +n12478768 +n12479537 +n12480456 +n12480895 +n12481150 +n12481289 +n12481458 +n12482437 +n12482668 +n12482893 +n12483282 +n12483427 +n12483625 +n12483841 +n12484244 +n12484784 +n12485653 +n12485981 +n12486574 +n12487058 +n12488454 +n12488709 +n12489046 +n12489676 +n12489815 +n12490490 +n12491017 +n12491435 +n12491826 +n12492106 +n12492460 +n12492682 +n12492900 +n12493208 +n12493426 +n12493868 +n12494794 +n12495146 +n12495670 +n12495895 +n12496427 +n12496949 +n12497669 +n12498055 +n12498457 +n12499163 +n12499757 +n12499979 +n12500309 +n12500518 +n12500751 +n12501202 +n12504570 +n12504783 +n12505253 +n12506181 +n12506341 +n12506991 +n12507379 +n12507823 +n12508309 +n12508618 +n12508762 +n12509109 +n12509476 +n12509665 +n12509821 +n12509993 +n12510343 +n12510774 +n12511488 +n12511856 +n12512095 +n12512294 +n12512674 +n12513172 +n12513613 +n12513933 +n12514138 +n12514592 +n12514992 +n12515393 +n12515711 +n12515925 +n12516165 +n12516584 +n12516828 +n12517077 +n12517445 +n12517642 +n12518013 +n12518481 +n12519089 +n12519563 +n12520406 +n12521186 +n12521394 +n12522188 +n12522678 +n12522894 +n12523141 +n12523475 +n12523850 +n12524188 +n12525168 +n12525513 +n12525753 +n12526178 +n12526516 +n12526754 +n12527081 +n12527738 +n12528109 +n12528382 +n12528549 +n12528768 +n12528974 +n12529220 +n12529500 +n12529905 +n12530629 +n12530818 +n12531328 +n12531727 +n12532564 +n12532886 +n12533190 +n12533437 +n12534208 +n12534625 +n12534862 +n12536291 +n12537253 +n12537569 +n12538209 +n12539074 +n12539306 +n12539832 +n12540250 +n12540647 +n12540966 +n12541157 +n12541403 +n12542043 +n12542240 +n12543186 +n12543455 +n12543639 +n12543826 +n12544240 +n12544539 +n12545232 +n12545635 +n12545865 +n12546183 +n12546420 +n12546617 +n12546962 +n12547215 +n12547503 +n12548280 +n12548564 +n12548804 +n12549005 +n12549192 +n12549420 +n12549799 +n12550210 +n12550408 +n12551173 +n12551457 +n12552309 +n12552893 +n12553742 +n12554029 +n12554526 +n12554729 +n12554911 +n12555255 +n12555859 +n12556656 +n12557064 +n12557438 +n12557556 +n12557681 +n12558230 +n12558425 +n12558680 +n12559044 +n12559518 +n12560282 +n12560621 +n12560775 +n12561169 +n12561309 +n12561594 +n12562141 +n12562577 +n12562785 +n12563045 +n12563702 +n12564083 +n12564613 +n12565102 +n12565912 +n12566331 +n12566954 +n12567950 +n12568186 +n12568649 +n12569037 +n12569616 +n12569851 +n12570394 +n12570703 +n12570972 +n12571781 +n12572546 +n12572759 +n12572858 +n12573256 +n12573474 +n12573647 +n12573911 +n12574320 +n12574470 +n12574866 +n12575322 +n12575812 +n12576323 +n12576451 +n12576695 +n12577362 +n12577895 +n12578255 +n12578626 +n12578916 +n12579038 +n12579404 +n12579822 +n12580012 +n12580654 +n12580786 +n12580896 +n12581110 +n12582231 +n12582665 +n12582846 +n12583126 +n12583401 +n12583681 +n12583855 +n12584191 +n12584365 +n12584715 +n12585137 +n12585373 +n12585629 +n12586298 +n12586499 +n12586725 +n12586989 +n12587132 +n12587487 +n12587803 +n12588320 +n12588780 +n12589142 +n12589458 +n12589687 +n12589841 +n12590232 +n12590499 +n12590600 +n12590715 +n12591017 +n12591351 +n12591702 +n12592058 +n12592544 +n12592839 +n12593122 +n12593341 +n12593994 +n12594324 +n12594989 +n12595699 +n12595964 +n12596148 +n12596345 +n12596709 +n12596849 +n12597134 +n12597466 +n12597798 +n12598027 +n12599185 +n12599435 +n12599661 +n12599874 +n12600095 +n12600267 +n12601494 +n12601805 +n12602262 +n12602434 +n12602612 +n12602980 +n12603273 +n12603449 +n12603672 +n12604228 +n12604460 +n12604639 +n12604845 +n12605683 +n12606438 +n12606545 +n12607456 +n12609379 +n12610328 +n12610740 +n12611640 +n12612170 +n12612811 +n12613706 +n12614096 +n12614477 +n12614625 +n12615232 +n12615710 +n12616248 +n12616630 +n12616996 +n12617559 +n12618146 +n12618727 +n12620196 +n12620546 +n12620969 +n12621410 +n12621619 +n12621945 +n12622297 +n12622875 +n12623077 +n12623211 +n12623818 +n12624381 +n12624568 +n12625003 +n12625383 +n12625670 +n12625823 +n12626674 +n12626878 +n12627119 +n12627347 +n12627526 +n12628356 +n12628705 +n12628986 +n12629305 +n12629666 +n12630763 +n12630999 +n12631331 +n12631637 +n12631932 +n12632335 +n12632733 +n12633061 +n12633638 +n12633994 +n12634211 +n12634429 +n12634734 +n12634986 +n12635151 +n12635359 +n12635532 +n12635744 +n12635955 +n12636224 +n12636885 +n12637123 +n12637485 +n12638218 +n12638556 +n12638753 +n12638964 +n12639168 +n12639376 +n12639584 +n12639736 +n12639910 +n12640081 +n12640284 +n12640435 +n12640607 +n12640839 +n12641007 +n12641180 +n12641413 +n12641931 +n12642090 +n12642200 +n12642435 +n12642600 +n12642964 +n12643113 +n12643313 +n12643473 +n12643688 +n12643877 +n12644283 +n12644902 +n12645174 +n12645530 +n12646072 +n12646197 +n12646397 +n12646605 +n12646740 +n12646950 +n12647231 +n12647376 +n12647560 +n12647787 +n12647893 +n12648045 +n12648196 +n12648424 +n12648693 +n12648888 +n12649065 +n12649317 +n12649539 +n12649866 +n12650038 +n12650229 +n12650379 +n12650556 +n12650805 +n12650915 +n12651229 +n12651611 +n12651821 +n12653218 +n12653436 +n12653633 +n12654227 +n12654857 +n12655062 +n12655245 +n12655351 +n12655498 +n12655605 +n12655726 +n12655869 +n12656369 +n12656528 +n12656685 +n12656909 +n12657082 +n12657755 +n12658118 +n12658308 +n12658481 +n12658603 +n12658715 +n12658846 +n12659064 +n12659356 +n12659539 +n12660601 +n12661045 +n12661227 +n12661538 +n12662074 +n12662379 +n12662772 +n12663023 +n12663254 +n12663359 +n12663804 +n12664005 +n12664187 +n12664469 +n12664710 +n12665048 +n12665271 +n12665659 +n12665857 +n12666050 +n12666159 +n12666369 +n12666965 +n12667406 +n12667582 +n12667964 +n12668131 +n12669803 +n12670334 +n12670758 +n12670962 +n12671651 +n12672289 +n12673588 +n12674120 +n12674685 +n12674895 +n12675299 +n12675515 +n12675876 +n12676134 +n12676370 +n12676534 +n12676703 +n12677120 +n12677331 +n12677612 +n12677841 +n12678794 +n12679023 +n12679432 +n12679593 +n12679876 +n12680402 +n12680652 +n12680864 +n12681376 +n12681579 +n12681893 +n12682411 +n12682668 +n12682882 +n12683096 +n12683407 +n12683571 +n12683791 +n12684379 +n12685431 +n12685831 +n12686077 +n12686274 +n12686496 +n12686676 +n12686877 +n12687044 +n12687462 +n12687698 +n12687957 +n12688187 +n12688372 +n12688716 +n12689305 +n12690653 +n12691428 +n12691661 +n12692024 +n12692160 +n12692521 +n12692714 +n12693244 +n12693352 +n12693865 +n12694486 +n12695144 +n12695975 +n12696492 +n12696830 +n12697152 +n12697514 +n12698027 +n12698435 +n12698598 +n12698774 +n12699031 +n12699301 +n12699922 +n12700088 +n12700357 +n12702124 +n12703190 +n12703383 +n12703557 +n12703716 +n12703856 +n12704041 +n12704343 +n12704513 +n12705013 +n12705220 +n12705458 +n12705698 +n12705978 +n12706410 +n12707199 +n12707781 +n12708293 +n12708654 +n12708941 +n12709103 +n12709349 +n12709688 +n12709901 +n12710295 +n12710415 +n12710577 +n12710693 +n12710917 +n12711182 +n12711398 +n12711596 +n12711817 +n12711984 +n12712320 +n12712626 +n12713063 +n12713358 +n12713521 +n12713866 +n12714254 +n12714755 +n12714949 +n12715195 +n12715914 +n12716400 +n12716594 +n12717072 +n12717224 +n12717644 +n12718074 +n12718483 +n12718995 +n12719684 +n12719944 +n12720200 +n12720354 +n12721122 +n12721477 +n12722071 +n12723062 +n12723610 +n12724942 +n12725521 +n12725738 +n12725940 +n12726159 +n12726357 +n12726528 +n12726670 +n12726902 +n12727101 +n12727301 +n12727518 +n12727729 +n12727960 +n12728164 +n12728322 +n12728508 +n12728656 +n12728864 +n12729023 +n12729164 +n12729315 +n12729521 +n12729729 +n12729950 +n12730143 +n12730370 +n12730544 +n12730776 +n12731029 +n12731401 +n12731835 +n12732009 +n12732252 +n12732491 +n12732605 +n12732756 +n12732966 +n12733218 +n12733428 +n12733647 +n12733870 +n12734070 +n12734215 +n12735160 +n12736603 +n12736999 +n12737383 +n12737898 +n12738259 +n12739332 +n12739966 +n12740967 +n12741222 +n12741586 +n12741792 +n12742290 +n12742741 +n12742878 +n12743009 +n12743352 +n12743823 +n12743976 +n12744142 +n12744387 +n12744850 +n12745386 +n12745564 +n12746884 +n12747120 +n12748248 +n12749049 +n12749456 +n12749679 +n12749852 +n12750076 +n12750767 +n12751172 +n12751675 +n12752205 +n12753007 +n12753245 +n12753573 +n12753762 +n12754003 +n12754174 +n12754311 +n12754468 +n12754648 +n12754781 +n12754981 +n12755225 +n12755387 +n12755559 +n12755727 +n12755876 +n12756457 +n12757115 +n12757303 +n12757458 +n12757668 +n12757816 +n12757930 +n12758014 +n12758099 +n12758176 +n12758250 +n12758325 +n12758399 +n12758471 +n12758555 +n12759273 +n12759668 +n12760539 +n12760875 +n12761284 +n12761702 +n12761905 +n12762049 +n12762405 +n12762896 +n12763529 +n12764008 +n12764202 +n12764507 +n12764978 +n12765115 +n12765402 +n12765846 +n12766043 +n12766595 +n12766869 +n12767208 +n12767423 +n12767648 +n12768369 +n12768682 +n12768809 +n12768933 +n12769065 +n12769219 +n12769318 +n12770529 +n12770892 +n12771085 +n12771192 +n12771390 +n12771597 +n12771890 +n12772753 +n12772908 +n12773142 +n12773651 +n12773917 +n12774299 +n12774641 +n12775070 +n12775393 +n12775717 +n12775919 +n12776558 +n12776774 +n12777436 +n12777680 +n12777778 +n12777892 +n12778398 +n12778605 +n12779603 +n12779851 +n12780325 +n12780563 +n12781940 +n12782530 +n12782915 +n12783316 +n12783730 +n12784371 +n12784889 +n12785724 +n12785889 +n12786273 +n12786464 +n12786836 +n12787364 +n12788854 +n12789054 +n12789554 +n12789977 +n12790430 +n12791064 +n12791329 +n12793015 +n12793284 +n12793494 +n12793695 +n12793886 +n12794135 +n12794367 +n12794568 +n12794985 +n12795209 +n12795352 +n12795555 +n12796022 +n12796385 +n12796849 +n12797368 +n12797860 +n12798284 +n12798910 +n12799269 +n12799776 +n12800049 +n12800586 +n12801072 +n12801520 +n12801781 +n12801966 +n12803226 +n12803754 +n12803958 +n12804352 +n12805146 +n12805561 +n12805762 +n12806015 +n12806732 +n12807251 +n12807409 +n12807624 +n12807773 +n12808007 +n12809868 +n12810007 +n12810151 +n12810595 +n12811027 +n12811713 +n12812235 +n12812478 +n12812801 +n12813189 +n12814643 +n12814857 +n12814960 +n12815198 +n12815668 +n12815838 +n12816508 +n12816942 +n12817464 +n12817694 +n12817855 +n12818004 +n12818346 +n12818601 +n12818966 +n12819141 +n12819354 +n12819728 +n12820113 +n12820669 +n12820853 +n12821505 +n12821895 +n12822115 +n12822466 +n12822769 +n12822955 +n12823717 +n12823859 +n12824053 +n12824289 +n12824735 +n12825497 +n12826143 +n12827270 +n12827537 +n12827907 +n12828220 +n12828379 +n12828520 +n12828791 +n12828977 +n12829582 +n12829975 +n12830222 +n12830568 +n12831141 +n12831535 +n12831932 +n12832315 +n12832538 +n12832822 +n12833149 +n12833985 +n12834190 +n12834798 +n12834938 +n12835331 +n12835766 +n12836212 +n12836337 +n12836508 +n12836862 +n12837052 +n12837259 +n12837466 +n12837803 +n12839574 +n12839979 +n12840168 +n12840362 +n12840502 +n12840749 +n12841007 +n12841193 +n12841354 +n12842302 +n12842519 +n12842642 +n12842887 +n12843144 +n12843316 +n12843557 +n12843970 +n12844409 +n12844939 +n12845187 +n12845413 +n12845908 +n12846335 +n12846690 +n12847008 +n12847374 +n12847927 +n12848499 +n12849061 +n12849279 +n12849416 +n12849952 +n12850168 +n12850336 +n12850906 +n12851094 +n12851469 +n12851860 +n12852234 +n12852428 +n12852570 +n12853080 +n12853287 +n12853482 +n12854048 +n12854193 +n12854600 +n12855365 +n12855494 +n12855710 +n12855886 +n12856091 +n12856287 +n12856479 +n12856680 +n12857204 +n12857779 +n12858150 +n12858397 +n12858618 +n12858871 +n12858987 +n12859153 +n12859272 +n12859679 +n12859986 +n12860365 +n12860978 +n12861345 +n12861541 +n12861892 +n12862512 +n12862828 +n12863234 +n12863624 +n12864160 +n12865037 +n12865562 +n12865708 +n12865824 +n12866002 +n12866162 +n12866333 +n12866459 +n12866635 +n12866968 +n12867184 +n12867449 +n12867826 +n12868019 +n12868880 +n12869061 +n12869478 +n12869668 +n12870048 +n12870225 +n12870535 +n12870682 +n12870891 +n12871272 +n12871696 +n12871859 +n12872458 +n12872914 +n12873341 +n12873984 +n12875269 +n12875697 +n12875861 +n12876899 +n12877244 +n12877493 +n12877637 +n12877838 +n12878169 +n12878325 +n12878784 +n12879068 +n12879527 +n12879963 +n12880244 +n12880462 +n12880638 +n12880799 +n12881105 +n12881913 +n12882158 +n12882779 +n12882945 +n12883265 +n12883628 +n12884100 +n12884260 +n12885045 +n12885265 +n12885510 +n12885754 +n12886185 +n12886402 +n12886600 +n12886831 +n12887293 +n12887532 +n12887713 +n12888016 +n12888234 +n12888457 +n12889219 +n12889412 +n12889579 +n12889713 +n12890265 +n12890490 +n12890685 +n12890928 +n12891093 +n12891305 +n12891469 +n12891643 +n12891824 +n12892013 +n12893463 +n12893993 +n12895298 +n12895811 +n12896615 +n12897118 +n12897788 +n12897999 +n12898342 +n12898774 +n12899166 +n12899537 +n12899752 +n12899971 +n12900783 +n12901724 +n12902466 +n12902662 +n12903014 +n12903367 +n12903503 +n12903964 +n12904314 +n12904562 +n12904938 +n12905135 +n12905412 +n12906214 +n12906498 +n12906771 +n12907057 +n12907671 +n12907857 +n12908093 +n12908645 +n12908854 +n12909421 +n12909614 +n12909759 +n12909917 +n12911079 +n12911264 +n12911440 +n12911673 +n12911914 +n12912274 +n12912670 +n12912801 +n12913144 +n12913524 +n12913791 +n12914923 +n12915140 +n12915568 +n12915811 +n12916179 +n12916511 +n12917901 +n12918609 +n12918810 +n12918991 +n12919195 +n12919403 +n12919646 +n12919847 +n12920043 +n12920204 +n12920521 +n12920719 +n12920955 +n12921315 +n12921499 +n12921660 +n12921868 +n12922119 +n12922458 +n12922763 +n12923108 +n12923257 +n12924623 +n12925179 +n12925583 +n12926039 +n12926480 +n12926689 +n12927013 +n12927194 +n12927494 +n12927758 +n12928071 +n12928307 +n12928491 +n12928819 +n12929403 +n12929600 +n12930778 +n12930951 +n12931231 +n12931542 +n12931906 +n12932173 +n12932365 +n12932706 +n12932966 +n12933274 +n12934036 +n12934174 +n12934479 +n12934685 +n12934985 +n12935166 +n12935609 +n12936155 +n12936826 +n12937130 +n12938081 +n12938193 +n12938445 +n12938667 +n12939104 +n12939282 +n12939479 +n12939874 +n12940226 +n12940609 +n12941220 +n12941536 +n12941717 +n12942025 +n12942395 +n12942572 +n12942729 +n12943049 +n12943443 +n12943912 +n12944095 +n12945177 +n12945366 +n12945549 +n12946849 +n12947313 +n12947544 +n12947756 +n12947895 +n12948053 +n12948251 +n12948495 +n12949160 +n12949361 +n12950126 +n12950314 +n12950796 +n12951146 +n12951835 +n12952165 +n12952469 +n12952590 +n12952717 +n12953206 +n12953484 +n12953712 +n12954353 +n12954799 +n12955414 +n12955840 +n12956170 +n12956367 +n12956588 +n12956922 +n12957608 +n12957803 +n12957924 +n12958261 +n12958615 +n12959074 +n12959538 +n12960378 +n12960552 +n12960863 +n12961242 +n12961393 +n12961536 +n12961879 +n12963628 +n12964920 +n12965626 +n12965951 +n12966804 +n12966945 +n12968136 +n12968309 +n12969131 +n12969425 +n12969670 +n12969927 +n12970193 +n12970293 +n12970733 +n12971400 +n12971804 +n12972136 +n12973443 +n12973791 +n12973937 +n12974987 +n12975804 +n12976198 +n12976554 +n12978076 +n12979316 +n12979829 +n12980080 +n12980840 +n12981086 +n12981301 +n12981443 +n12981954 +n12982468 +n12982590 +n12982915 +n12983048 +n12983654 +n12983873 +n12983961 +n12984267 +n12984489 +n12984595 +n12985420 +n12985773 +n12985857 +n12986227 +n12987056 +n12987423 +n12987535 +n12988158 +n12988341 +n12988572 +n12989007 +n12989938 +n12990597 +n12991184 +n12991837 +n12992177 +n12992868 +n12994892 +n12995601 +n12997654 +n12997919 +n12998815 +n13000891 +n13001041 +n13001206 +n13001366 +n13001529 +n13001930 +n13002209 +n13002750 +n13002925 +n13003061 +n13003254 +n13003522 +n13003712 +n13004423 +n13004640 +n13004826 +n13004992 +n13005329 +n13005984 +n13006171 +n13006631 +n13006894 +n13007034 +n13007417 +n13007629 +n13008157 +n13008315 +n13008485 +n13008689 +n13008839 +n13009085 +n13009244 +n13009429 +n13009656 +n13010694 +n13010951 +n13011221 +n13011595 +n13012253 +n13012469 +n13012973 +n13013534 +n13013764 +n13013965 +n13014097 +n13014265 +n13014409 +n13014581 +n13014741 +n13014879 +n13015509 +n13015688 +n13016076 +n13016289 +n13017102 +n13017240 +n13017439 +n13017610 +n13017789 +n13017979 +n13018088 +n13018232 +n13018407 +n13018906 +n13019496 +n13019643 +n13019835 +n13020191 +n13020481 +n13020964 +n13021166 +n13021332 +n13021543 +n13021689 +n13021867 +n13022210 +n13022709 +n13022903 +n13023134 +n13024012 +n13024500 +n13024653 +n13025647 +n13025854 +n13026015 +n13027557 +n13027879 +n13028611 +n13028937 +n13029122 +n13029326 +n13029610 +n13029760 +n13030337 +n13030616 +n13030852 +n13031193 +n13031323 +n13031474 +n13032115 +n13032381 +n13032618 +n13032923 +n13033134 +n13033396 +n13033577 +n13033879 +n13034062 +n13034555 +n13034788 +n13035241 +n13035389 +n13035707 +n13035925 +n13036116 +n13036312 +n13036804 +n13037406 +n13037585 +n13037805 +n13038068 +n13038376 +n13038577 +n13038744 +n13039349 +n13040303 +n13040629 +n13040796 +n13041312 +n13041943 +n13042134 +n13042316 +n13042982 +n13043926 +n13044375 +n13044778 +n13045210 +n13045594 +n13045975 +n13046130 +n13046669 +n13047862 +n13048447 +n13049953 +n13050397 +n13050705 +n13050940 +n13051346 +n13052014 +n13052248 +n13052670 +n13052931 +n13053608 +n13054073 +n13054560 +n13055423 +n13055577 +n13055792 +n13055949 +n13056135 +n13056349 +n13056607 +n13056799 +n13057054 +n13057242 +n13057422 +n13057639 +n13058037 +n13058272 +n13058608 +n13059298 +n13059657 +n13060017 +n13060190 +n13061172 +n13061348 +n13061471 +n13061704 +n13062421 +n13063269 +n13063514 +n13064111 +n13064457 +n13065089 +n13065514 +n13066129 +n13066448 +n13066979 +n13067191 +n13067330 +n13067532 +n13067672 +n13068255 +n13068434 +n13068735 +n13068917 +n13069224 +n13069773 +n13070308 +n13070875 +n13071371 +n13071553 +n13071815 +n13072031 +n13072209 +n13072350 +n13072528 +n13072706 +n13072863 +n13073055 +n13073703 +n13074619 +n13074814 +n13075020 +n13075272 +n13075441 +n13075684 +n13075847 +n13076041 +n13076405 +n13076643 +n13076831 +n13077033 +n13077295 +n13078021 +n13079073 +n13079419 +n13079567 +n13080306 +n13080866 +n13081229 +n13081999 +n13082568 +n13083023 +n13083461 +n13084184 +n13084834 +n13085113 +n13085747 +n13090018 +n13090871 +n13091620 +n13091774 +n13091982 +n13092078 +n13092240 +n13092385 +n13092987 +n13093275 +n13093629 +n13094145 +n13094273 +n13095013 +n13096779 +n13098515 +n13098962 +n13099833 +n13099999 +n13100156 +n13100677 +n13102648 +n13102775 +n13103023 +n13103660 +n13103750 +n13103877 +n13104059 +n13107694 +n13107807 +n13107891 +n13108131 +n13108323 +n13108481 +n13108545 +n13108662 +n13108841 +n13109733 +n13110915 +n13111174 +n13111340 +n13111504 +n13111881 +n13112035 +n13112201 +n13118330 +n13118707 +n13119870 +n13120211 +n13120958 +n13121104 +n13121349 +n13122364 +n13123309 +n13123431 +n13123841 +n13124358 +n13124654 +n13125117 +n13126050 +n13126856 +n13127001 +n13127303 +n13127666 +n13127843 +n13128278 +n13128582 +n13128976 +n13129078 +n13130014 +n13130161 +n13130726 +n13131028 +n13131618 +n13132034 +n13132156 +n13132338 +n13132486 +n13132656 +n13132756 +n13132940 +n13133140 +n13133233 +n13133316 +n13133613 +n13133932 +n13134302 +n13134531 +n13134844 +n13134947 +n13135692 +n13135832 +n13136316 +n13136556 +n13136781 +n13137010 +n13137225 +n13137409 +n13137672 +n13137951 +n13138155 +n13138308 +n13138658 +n13138842 +n13139055 +n13139321 +n13139482 +n13139647 +n13139837 +n13140049 +n13140367 +n13141141 +n13141415 +n13141564 +n13141797 +n13141972 +n13142182 +n13142504 +n13142907 +n13143285 +n13143758 +n13144084 +n13145040 +n13145250 +n13145444 +n13146403 +n13146583 +n13146928 +n13147153 +n13147270 +n13147386 +n13147532 +n13147689 +n13147918 +n13148208 +n13148384 +n13149296 +n13149970 +n13150378 +n13150592 +n13150894 +n13151082 +n13152339 +n13154388 +n13154494 +n13154841 +n13155095 +n13155305 +n13155611 +n13156986 +n13157137 +n13157346 +n13157481 +n13157684 +n13157971 +n13158167 +n13158512 +n13158605 +n13158714 +n13158815 +n13159357 +n13159691 +n13159890 +n13160116 +n13160254 +n13160365 +n13160604 +n13160831 +n13160938 +n13161151 +n13161254 +n13161904 +n13163553 +n13163649 +n13163991 +n13164501 +n13170840 +n13171210 +n13171797 +n13172923 +n13173132 +n13173259 +n13173488 +n13173697 +n13173882 +n13174354 +n13174670 +n13174823 +n13175682 +n13176363 +n13176714 +n13177048 +n13177529 +n13177768 +n13177884 +n13178284 +n13178707 +n13179056 +n13179804 +n13180534 +n13180875 +n13181055 +n13181244 +n13181406 +n13181811 +n13182164 +n13182338 +n13182799 +n13182937 +n13183056 +n13183489 +n13184394 +n13185269 +n13185658 +n13186388 +n13186546 +n13187367 +n13188096 +n13188268 +n13188462 +n13188767 +n13190060 +n13190747 +n13191148 +n13191620 +n13191884 +n13192625 +n13193143 +n13193269 +n13193466 +n13193642 +n13193856 +n13194036 +n13194212 +n13194572 +n13194758 +n13194918 +n13195341 +n13195761 +n13196003 +n13196234 +n13196369 +n13196738 +n13197274 +n13197507 +n13198054 +n13198482 +n13198914 +n13199717 +n13199970 +n13200193 +n13200542 +n13200651 +n13200986 +n13201423 +n13201566 +n13201969 +n13202125 +n13202355 +n13202602 +n13205058 +n13205249 +n13206178 +n13206817 +n13207094 +n13207335 +n13207572 +n13207736 +n13207923 +n13208302 +n13208705 +n13208965 +n13209129 +n13209270 +n13209460 +n13209808 +n13210350 +n13210597 +n13211020 +n13211790 +n13212025 +n13212175 +n13212379 +n13212559 +n13213066 +n13213397 +n13213577 +n13214217 +n13214340 +n13214485 +n13215258 +n13215586 +n13217005 +n13219422 +n13219833 +n13219976 +n13220122 +n13220355 +n13220525 +n13220663 +n13221529 +n13222877 +n13222985 +n13223090 +n13223588 +n13223710 +n13223843 +n13224673 +n13224922 +n13225244 +n13225365 +n13225617 +n13226320 +n13226871 +n13228017 +n13228536 +n13229543 +n13229951 +n13230190 +n13230662 +n13230843 +n13231078 +n13231678 +n13231919 +n13232106 +n13232363 +n13232779 +n13233727 +n13234114 +n13234519 +n13234678 +n13234857 +n13235011 +n13235159 +n13235319 +n13235503 +n13235766 +n13236100 +n13237188 +n13237508 +n13238375 +n13238654 +n13238988 +n13239177 +n13239736 +n13239921 +n13240362 +n13252672 +n13354021 +n13555775 +n13579829 +n13650447 +n13653902 +n13862407 +n13862552 +n13862780 +n13863020 +n13863186 +n13863473 +n13863771 +n13864035 +n13864153 +n13864965 +n13865298 +n13865483 +n13865904 +n13866144 +n13866626 +n13866827 +n13867005 +n13867492 +n13868248 +n13868371 +n13868515 +n13868944 +n13869045 +n13869547 +n13869788 +n13869896 +n13871717 +n13872592 +n13872822 +n13873361 +n13873502 +n13873917 +n13874073 +n13874558 +n13875392 +n13875571 +n13875884 +n13876561 +n13877547 +n13877667 +n13878306 +n13879049 +n13879320 +n13879816 +n13880199 +n13880415 +n13880551 +n13880704 +n13880994 +n13881512 +n13881644 +n13882201 +n13882276 +n13882487 +n13882563 +n13882639 +n13882713 +n13882961 +n13883603 +n13883763 +n13884261 +n13884384 +n13884930 +n13885011 +n13886260 +n13888491 +n13889066 +n13889331 +n13891547 +n13891937 +n13893786 +n13894154 +n13894434 +n13895262 +n13896100 +n13896217 +n13897198 +n13897528 +n13897996 +n13898207 +n13898315 +n13898645 +n13899735 +n13900287 +n13900422 +n13901211 +n13901321 +n13901423 +n13901490 +n13901858 +n13902048 +n13902336 +n13902793 +n13903079 +n13905121 +n13905275 +n13905792 +n13906484 +n13906669 +n13906767 +n13906936 +n13907272 +n13908201 +n13908580 +n13911045 +n13912260 +n13912540 +n13914141 +n13914265 +n13914608 +n13915023 +n13915113 +n13915209 +n13915305 +n13915999 +n13916363 +n13916721 +n13917690 +n13917785 +n13918274 +n13918387 +n13918717 +n13919547 +n13919919 +n13926786 +n14131950 +n14175579 +n14564779 +n14582716 +n14583400 +n14585392 +n14592309 +n14603798 +n14633206 +n14685296 +n14696793 +n14698884 +n14714645 +n14720833 +n14765422 +n14785065 +n14786943 +n14804958 +n14810561 +n14820180 +n14821852 +n14844693 +n14853210 +n14858292 +n14867545 +n14891255 +n14899328 +n14900184 +n14900342 +n14908027 +n14909584 +n14914945 +n14915184 +n14919819 +n14938389 +n14941787 +n14942411 +n14973585 +n14974264 +n14975598 +n14976759 +n14976871 +n14977188 +n14977504 +n14992287 +n14993378 +n15005577 +n15006012 +n15019030 +n15048888 +n15060326 +n15060688 +n15062057 +n15067877 +n15075141 +n15086247 +n15089258 +n15089472 +n15089645 +n15089803 +n15090065 +n15090238 +n15090742 +n15091129 +n15091304 +n15091473 +n15091669 +n15091846 +n15092059 +n15092227 +n15092409 +n15092650 +n15092751 +n15092942 +n15093049 +n15093137 +n15093298 +n15102359 +n15102455 +n15102894 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/imagenet_a_indices.txt b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_a_indices.txt new file mode 100644 index 0000000000..8e373bc707 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_a_indices.txt @@ -0,0 +1,200 @@ +6 +11 +13 +15 +17 +22 +23 +27 +30 +37 +39 +42 +47 +50 +57 +70 +71 +76 +79 +89 +90 +94 +96 +97 +99 +105 +107 +108 +110 +113 +124 +125 +130 +132 +143 +144 +150 +151 +207 +234 +235 +254 +277 +283 +287 +291 +295 +298 +301 +306 +307 +308 +309 +310 +311 +313 +314 +315 +317 +319 +323 +324 +326 +327 +330 +334 +335 +336 +347 +361 +363 +372 +378 +386 +397 +400 +401 +402 +404 +407 +411 +416 +417 +420 +425 +428 +430 +437 +438 +445 +456 +457 +461 +462 +470 +472 +483 +486 +488 +492 +496 +514 +516 +528 +530 +539 +542 +543 +549 +552 +557 +561 +562 +569 +572 +573 +575 +579 +589 +606 +607 +609 +614 +626 +627 +640 +641 +642 +643 +658 +668 +677 +682 +684 +687 +701 +704 +719 +736 +746 +749 +752 +758 +763 +765 +768 +773 +774 +776 +779 +780 +786 +792 +797 +802 +803 +804 +813 +815 +820 +823 +831 +833 +835 +839 +845 +847 +850 +859 +862 +870 +879 +880 +888 +890 +897 +900 +907 +913 +924 +932 +933 +934 +937 +943 +945 +947 +951 +954 +956 +957 +959 +971 +972 +980 +981 +984 +986 +987 +988 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/imagenet_a_synsets.txt b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_a_synsets.txt new file mode 100644 index 0000000000..6eeaaf66f3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_a_synsets.txt @@ -0,0 +1,200 @@ +n01498041 +n01531178 +n01534433 +n01558993 +n01580077 +n01614925 +n01616318 +n01631663 +n01641577 +n01669191 +n01677366 +n01687978 +n01694178 +n01698640 +n01735189 +n01770081 +n01770393 +n01774750 +n01784675 +n01819313 +n01820546 +n01833805 +n01843383 +n01847000 +n01855672 +n01882714 +n01910747 +n01914609 +n01924916 +n01944390 +n01985128 +n01986214 +n02007558 +n02009912 +n02037110 +n02051845 +n02077923 +n02085620 +n02099601 +n02106550 +n02106662 +n02110958 +n02119022 +n02123394 +n02127052 +n02129165 +n02133161 +n02137549 +n02165456 +n02174001 +n02177972 +n02190166 +n02206856 +n02219486 +n02226429 +n02231487 +n02233338 +n02236044 +n02259212 +n02268443 +n02279972 +n02280649 +n02281787 +n02317335 +n02325366 +n02346627 +n02356798 +n02361337 +n02410509 +n02445715 +n02454379 +n02486410 +n02492035 +n02504458 +n02655020 +n02669723 +n02672831 +n02676566 +n02690373 +n02701002 +n02730930 +n02777292 +n02782093 +n02787622 +n02793495 +n02797295 +n02802426 +n02814860 +n02815834 +n02837789 +n02879718 +n02883205 +n02895154 +n02906734 +n02948072 +n02951358 +n02980441 +n02992211 +n02999410 +n03014705 +n03026506 +n03124043 +n03125729 +n03187595 +n03196217 +n03223299 +n03250847 +n03255030 +n03291819 +n03325584 +n03355925 +n03384352 +n03388043 +n03417042 +n03443371 +n03444034 +n03445924 +n03452741 +n03483316 +n03584829 +n03590841 +n03594945 +n03617480 +n03666591 +n03670208 +n03717622 +n03720891 +n03721384 +n03724870 +n03775071 +n03788195 +n03804744 +n03837869 +n03840681 +n03854065 +n03888257 +n03891332 +n03935335 +n03982430 +n04019541 +n04033901 +n04039381 +n04067472 +n04086273 +n04099969 +n04118538 +n04131690 +n04133789 +n04141076 +n04146614 +n04147183 +n04179913 +n04208210 +n04235860 +n04252077 +n04252225 +n04254120 +n04270147 +n04275548 +n04310018 +n04317175 +n04344873 +n04347754 +n04355338 +n04366367 +n04376876 +n04389033 +n04399382 +n04442312 +n04456115 +n04482393 +n04507155 +n04509417 +n04532670 +n04540053 +n04554684 +n04562935 +n04591713 +n04606251 +n07583066 +n07695742 +n07697313 +n07697537 +n07714990 +n07718472 +n07720875 +n07734744 +n07749582 +n07753592 +n07760859 +n07768694 +n07831146 +n09229709 +n09246464 +n09472597 +n09835506 +n11879895 +n12057211 +n12144580 +n12267677 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/imagenet_r_indices.txt b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_r_indices.txt new file mode 100644 index 0000000000..e4ff6ffbbb --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_r_indices.txt @@ -0,0 +1,200 @@ +1 +2 +4 +6 +8 +9 +11 +13 +22 +23 +26 +29 +31 +39 +47 +63 +71 +76 +79 +84 +90 +94 +96 +97 +99 +100 +105 +107 +113 +122 +125 +130 +132 +144 +145 +147 +148 +150 +151 +155 +160 +161 +162 +163 +171 +172 +178 +187 +195 +199 +203 +207 +208 +219 +231 +232 +234 +235 +242 +245 +247 +250 +251 +254 +259 +260 +263 +265 +267 +269 +276 +277 +281 +288 +289 +291 +292 +293 +296 +299 +301 +308 +309 +310 +311 +314 +315 +319 +323 +327 +330 +334 +335 +337 +338 +340 +341 +344 +347 +353 +355 +361 +362 +365 +366 +367 +368 +372 +388 +390 +393 +397 +401 +407 +413 +414 +425 +428 +430 +435 +437 +441 +447 +448 +457 +462 +463 +469 +470 +471 +472 +476 +483 +487 +515 +546 +555 +558 +570 +579 +583 +587 +593 +594 +596 +609 +613 +617 +621 +629 +637 +657 +658 +701 +717 +724 +763 +768 +774 +776 +779 +780 +787 +805 +812 +815 +820 +824 +833 +847 +852 +866 +875 +883 +889 +895 +907 +928 +931 +932 +933 +934 +936 +937 +943 +945 +947 +948 +949 +951 +953 +954 +957 +963 +965 +967 +980 +981 +983 +988 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/imagenet_r_synsets.txt b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_r_synsets.txt new file mode 100644 index 0000000000..33723baf48 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_r_synsets.txt @@ -0,0 +1,200 @@ +n01443537 +n01484850 +n01494475 +n01498041 +n01514859 +n01518878 +n01531178 +n01534433 +n01614925 +n01616318 +n01630670 +n01632777 +n01644373 +n01677366 +n01694178 +n01748264 +n01770393 +n01774750 +n01784675 +n01806143 +n01820546 +n01833805 +n01843383 +n01847000 +n01855672 +n01860187 +n01882714 +n01910747 +n01944390 +n01983481 +n01986214 +n02007558 +n02009912 +n02051845 +n02056570 +n02066245 +n02071294 +n02077923 +n02085620 +n02086240 +n02088094 +n02088238 +n02088364 +n02088466 +n02091032 +n02091134 +n02092339 +n02094433 +n02096585 +n02097298 +n02098286 +n02099601 +n02099712 +n02102318 +n02106030 +n02106166 +n02106550 +n02106662 +n02108089 +n02108915 +n02109525 +n02110185 +n02110341 +n02110958 +n02112018 +n02112137 +n02113023 +n02113624 +n02113799 +n02114367 +n02117135 +n02119022 +n02123045 +n02128385 +n02128757 +n02129165 +n02129604 +n02130308 +n02134084 +n02138441 +n02165456 +n02190166 +n02206856 +n02219486 +n02226429 +n02233338 +n02236044 +n02268443 +n02279972 +n02317335 +n02325366 +n02346627 +n02356798 +n02363005 +n02364673 +n02391049 +n02395406 +n02398521 +n02410509 +n02423022 +n02437616 +n02445715 +n02447366 +n02480495 +n02480855 +n02481823 +n02483362 +n02486410 +n02510455 +n02526121 +n02607072 +n02655020 +n02672831 +n02701002 +n02749479 +n02769748 +n02793495 +n02797295 +n02802426 +n02808440 +n02814860 +n02823750 +n02841315 +n02843684 +n02883205 +n02906734 +n02909870 +n02939185 +n02948072 +n02950826 +n02951358 +n02966193 +n02980441 +n02992529 +n03124170 +n03272010 +n03345487 +n03372029 +n03424325 +n03452741 +n03467068 +n03481172 +n03494278 +n03495258 +n03498962 +n03594945 +n03602883 +n03630383 +n03649909 +n03676483 +n03710193 +n03773504 +n03775071 +n03888257 +n03930630 +n03947888 +n04086273 +n04118538 +n04133789 +n04141076 +n04146614 +n04147183 +n04192698 +n04254680 +n04266014 +n04275548 +n04310018 +n04325704 +n04347754 +n04389033 +n04409515 +n04465501 +n04487394 +n04522168 +n04536866 +n04552348 +n04591713 +n07614500 +n07693725 +n07695742 +n07697313 +n07697537 +n07714571 +n07714990 +n07718472 +n07720875 +n07734744 +n07742313 +n07745940 +n07749582 +n07753275 +n07753592 +n07768694 +n07873807 +n07880968 +n07920052 +n09472597 +n09835506 +n10565667 +n12267677 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/imagenet_real_labels.json b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_real_labels.json new file mode 100644 index 0000000000..2818c1f2f6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_real_labels.json @@ -0,0 +1 @@ +[[], [970, 795], [230, 231], [809], [516, 850], [57], [334], [700], [674], [332], [109], [286], [370], [757], [595], [147], [327, 108], [21, 22], [478], [517], [334], [], [948], [727], [23], [619, 526, 846], [270], [167], [64, 55], [858], [324], [573], [150], [981], [586], [887], [], [398], [], [74], [516], [756], [129], [198], [256], [725], [565], [162, 167], [717, 581], [390, 467], [92], [29], [844], [591], [358], [468], [], [994], [872], [588], [608, 474], [183], [107], [40, 46], [842], [390], [101], [887], [870], [903, 841], [], [149], [21], [476], [80], [424], [159], [275], [175], [461], [970], [160], [788], [58], [479, 817], [498], [374], [28], [487], [50], [270], [383], [366], [484, 724], [373], [705], [330], [142], [949], [348, 349], [473], [159], [872], [878], [201], [906], [70], [889, 486], [632], [608, 774, 630, 636], [122], [720], [227], [], [162], [959], [638], [], [655, 851, 598], [645], [718], [483], [852], [397], [312, 988, 311], [457, 834], [352], [82], [934], [283], [802], [742], [276], [234, 236], [751], [342], [526, 528, 784], [328], [], [251], [163], [328], [771], [726], [977], [], [265], [], [590], [977, 978], [681, 810, 620, 508], [637], [39], [115], [937], [274], [277], [763], [905, 789], [646], [], [894], [647], [504], [937], [687], [781], [666], [583], [158], [825], [212], [659], [257, 222], [436], [199], [140], [248], [339], [230], [361], [909, 910, 926], [935], [638, 639], [654, 785], [289], [867], [], [103], [584], [243], [703], [449, 975], [771], [118], [396], [934], [16], [548], [993], [704], [841, 457], [233], [401, 593, 819], [827], [376], [146], [606], [922], [431], [284], [889], [475], [977, 978], [475], [984], [16], [77], [610, 453], [254], [636], [662], [473], [207], [25], [427, 463], [215], [230, 173], [35], [741], [125], [518, 652, 663, 465], [289], [425], [973], [], [167], [121], [445], [702], [], [366], [678], [764], [125], [349], [13], [179], [522], [], [989], [], [647, 438], [660], [801, 836, 837, 983], [533], [487], [27], [644], [750, 721], [865, 850], [1], [176], [694], [488, 664, 695, 508], [798], [809], [652, 413], [], [], [821], [421], [361], [920], [761], [27], [464], [92], [182], [897], [612], [610, 918], [283], [881], [906], [728], [426], [554], [], [531], [869], [730], [0], [866], [738, 580], [547], [43], [64], [69], [176], [329], [544, 926], [288, 290], [991], [591], [346], [1], [607], [934], [784, 828], [572], [], [888], [654], [546, 402], [390], [702], [24], [102], [949, 953, 954, 923], [810, 508], [361], [280], [65], [777], [359], [234], [21], [7], [525], [737, 886, 760, 894], [938], [254], [616, 733], [707], [463], [60], [], [531, 487, 623, 893], [380], [982], [305], [355], [503], [], [495], [472], [293], [816], [195], [738, 905], [475], [481], [431], [260], [130], [627], [977, 978], [622], [696], [300], [37], [133], [637], [867], [465], [592], [741], [908, 404, 895], [91], [109], [426], [694], [546], [208], [488, 649], [786], [959], [], [834, 906], [879, 568], [649], [228], [621], [630, 703], [107], [818, 598], [420], [], [133], [185], [471], [230], [974], [74], [76], [852], [383], [267], [], [359], [484], [510], [33], [177], [935], [310], [987, 998], [270], [598], [199], [998], [836, 837, 608], [14], [97], [856], [398], [319], [549, 681, 620], [92], [765], [840, 728, 412], [769, 945], [160], [265, 266], [638, 639], [846], [722], [183], [674], [468], [], [748, 636], [867], [636], [], [912], [721], [16], [199], [170], [], [946], [350], [557], [361], [361], [594], [861], [208], [606], [734], [767], [746], [788], [346], [153], [739], [414], [915], [], [152], [943], [849], [], [100], [546], [657], [764], [141], [39], [993], [758], [190], [888], [18], [], [341], [875], [359], [388], [894], [437], [987, 998], [517], [372], [286], [754, 662], [713], [915], [964], [146], [529], [416], [376], [147], [902], [26], [398], [175], [270], [335], [899, 559, 532, 505, 762, 923], [540], [607], [495], [257, 222], [801], [576, 879, 982, 472], [301], [166], [56], [868, 967, 968, 659], [], [], [567], [277], [], [651], [377], [684], [832], [39], [219], [863], [868], [794], [80], [983], [269, 347], [238], [781], [223], [521, 926], [830], [260], [491], [896], [220], [680], [48], [542], [], [820], [148], [113, 114], [99], [143], [691, 570], [796], [986], [346], [367], [939], [875], [625], [481, 482, 848], [464], [812], [705], [], [466], [781], [499], [617, 338], [679, 488], [858], [795], [437], [11], [625], [965], [874], [949, 954], [600, 517], [86], [133], [149], [865], [480, 582, 760, 886], [325], [499], [834], [506, 421], [298], [900], [905], [202], [740], [258], [762], [297, 295], [132], [240, 238], [833], [471], [386], [898], [162], [288, 290], [450], [850], [232], [273], [954], [965], [611], [643], [147], [290], [866, 977], [186], [156], [776, 683], [775], [987, 998], [333], [325], [572], [927], [744, 657], [777, 623], [833], [551], [301], [716], [485], [102], [791], [959], [404], [987, 998], [415], [455], [242, 852], [], [517], [16], [320], [632], [568], [], [216], [332], [769, 726], [923, 959], [861, 605], [134], [677], [288], [10], [919, 733], [852], [], [104], [712], [388], [261], [609, 479], [673, 681, 620, 526, 664, 508], [], [579], [450], [628], [217], [810, 878], [763], [208], [126], [442, 497], [864], [232], [776], [942], [336], [978], [681, 620], [512, 587], [78], [668], [699], [746], [46, 39], [968, 809, 618, 828], [330], [615], [], [62], [116], [127], [955], [306], [425], [190], [370], [187], [971], [897, 411], [396], [744, 657], [840, 463], [718], [116], [836, 837], [994], [419], [764], [214], [285], [641], [951], [882], [13], [829], [453], [216], [665], [521], [268], [468], [418], [728], [], [449], [194], [362], [928, 963, 948, 923], [924], [249], [524, 461], [992], [571], [283], [608], [129], [486], [859], [498], [21], [467], [591], [924], [556], [97], [898], [586], [10], [202], [67], [649], [141], [603], [727], [101], [995], [278], [964], [238, 240], [423, 424], [489, 634], [533], [424, 423], [451], [555], [732], [514], [803], [300], [551], [753], [411], [315], [963], [], [389], [559, 578, 601], [673, 742, 526, 527, 662, 664, 508], [839], [299], [578, 689], [112], [960], [632], [867], [], [61], [427], [367], [926], [465, 597, 413], [34], [773], [654], [131], [874], [281, 282], [891], [956], [201], [267], [], [200], [673, 508], [424, 423], [907], [57], [27], [906, 578, 834, 459], [7], [322, 946], [934], [663], [423, 424], [687], [836, 837], [958], [645], [119], [306], [930], [124], [694], [777, 524, 461], [205], [137], [849], [681, 620, 526, 508], [380], [586], [916], [478], [182], [874], [715], [487], [], [19], [161, 162, 785], [915], [730], [678, 487, 830], [822], [], [699], [689, 819, 578], [673], [], [], [624], [679], [887], [581], [665], [903], [746, 622], [585, 440], [800], [899], [669], [81], [746], [866], [935], [668], [295], [893], [265], [628], [987, 923], [367], [294], [727], [12], [435, 876], [192, 186], [589], [70], [129], [454], [17], [946], [204], [181], [163], [80], [940], [587], [21], [198], [25], [932], [339], [480], [465, 413], [883], [453, 619, 818], [807], [287], [], [614], [814], [591, 689, 601], [919], [508], [479], [452], [155], [41], [163], [606], [8, 7], [], [515, 808, 693], [858], [506], [23], [976, 447], [801, 397, 983], [856, 595], [753], [5], [186], [667], [305], [46], [303], [], [927], [91], [34], [675, 654], [406], [65], [76], [517], [806], [330, 331], [], [130], [103], [56], [], [78], [31], [372], [225, 235], [431], [159], [187], [930], [888], [96], [836, 837, 655, 879, 444], [994], [872, 622, 759], [302], [566], [33], [619], [694], [406], [20], [18], [371], [320], [780], [997], [730], [613], [105], [810, 878], [311], [883], [367], [243], [], [], [515, 39, 47], [412], [921], [332], [514, 464], [276], [629], [917], [77], [643], [556], [998], [328], [723], [161], [250], [1], [919], [392], [264], [652, 847, 465, 408, 413], [488, 633], [968, 495, 504], [188], [884], [335], [795], [241, 238], [842], [71], [862], [254], [27], [409], [444], [433], [324], [322], [688], [579], [562], [917, 335], [803], [863], [44], [719], [16], [384], [328], [348], [194], [678], [593], [9], [], [25], [913, 983], [260, 667], [104], [72, 815], [223], [268], [283], [784, 477], [53], [615, 465], [100], [543], [133], [159], [439], [151], [355], [392], [577], [72], [383], [619, 846], [145], [109], [988], [824], [293], [], [821], [484], [608, 806, 966, 572], [259], [344], [132], [128], [154], [210], [508], [638, 639], [138, 83], [256, 233, 252], [376], [720], [464], [960, 968, 504], [999], [455], [613], [314], [993], [17], [759], [843], [591, 721], [330], [681, 810, 620, 531], [432], [778], [489, 372], [468], [489], [375], [263], [], [418], [377], [878], [283], [838, 631], [442], [382], [641], [628], [592], [59], [223], [587], [724], [207], [228], [8], [962], [575], [988], [402, 889], [551], [990], [141], [120], [207], [118], [946], [828, 463], [786], [166, 167], [256], [986], [28], [283], [636, 834, 671], [720], [411], [80], [678, 211], [29], [606], [636, 748], [156], [91], [734], [569], [458], [84], [230], [274], [707], [75], [965], [260], [978], [709], [372], [717], [763, 764], [96], [958], [884], [327], [140], [88], [156], [137, 98, 99], [559, 836, 837, 842], [669], [492], [771], [653], [484, 871, 913], [], [787], [827], [644], [393], [386], [654], [137], [715], [906], [724], [633, 477, 823], [516], [64], [850], [321], [611], [392], [509], [207], [903, 655, 638], [397], [582, 949], [188], [652, 465, 830], [750], [259], [294], [450], [511], [477], [255], [814], [781], [177], [654], [806, 911], [680], [769], [830], [273], [24], [463, 977, 978], [321], [480], [331], [21], [556], [481], [420], [195], [216], [215], [152], [333], [646], [152], [635], [128], [993], [351], [928], [267], [830], [], [335], [319], [786], [816], [334], [509], [444], [155], [902], [526, 527, 664], [483, 581, 479, 817, 511], [346], [482], [173], [438], [], [], [374], [548], [552], [619, 607], [411, 478], [451], [277], [715, 652], [], [855], [694], [709], [611], [168], [113], [782, 851], [974], [147], [69], [546, 650, 402, 818, 819], [11], [543], [629], [127], [652, 465, 764, 413], [349], [975, 628], [922, 412], [484], [78], [204], [399], [192, 186], [543], [89], [423], [323], [764], [970], [829], [645], [542], [809, 925], [195], [732], [474], [741], [820], [238], [643], [977, 978], [234], [844], [717], [925], [57], [806, 911], [444], [], [245], [], [923, 868], [791], [401], [896, 804], [773], [977], [875], [637], [442], [652, 847], [873], [472], [977, 978, 608, 502], [926], [102], [810, 878], [784], [], [355], [643], [279], [92], [523], [50], [510], [765], [681, 620, 526, 664, 281, 508], [870], [748], [253], [749], [], [452, 911], [824, 775], [261], [562], [911], [289], [950], [456], [449], [117], [97], [101], [291], [346], [809], [997], [168], [896, 861], [714], [126], [593], [8], [432], [72], [158], [958], [662], [945], [47], [919], [427], [809, 762], [185], [685], [122, 124], [660], [449, 536], [434, 533], [178], [356], [128], [819, 517], [157], [404], [23], [939, 582, 943], [204, 155], [756], [797], [916], [254], [9], [471], [577, 904], [255], [882], [654], [261, 174], [923, 931], [950], [360], [246], [872], [578, 982], [675], [418], [556], [216, 220], [928, 923, 960], [402], [911], [601], [179], [975, 638, 639], [303], [709, 526, 470, 767], [778], [664, 553, 697, 851], [178], [500], [], [557], [745], [611], [401], [571], [621], [206], [89], [394], [481], [627], [333], [701], [644], [364], [450], [979], [203], [872], [795], [265, 267], [118], [705], [565], [519], [641], [75], [], [590], [749], [374], [986], [76], [83], [14], [945], [683], [770], [74], [211], [429], [269], [], [505], [150], [344], [858], [45], [959], [884], [333], [953], [86], [204], [62], [928, 960], [257], [178], [178], [274], [], [552, 37], [147], [919, 920, 555, 733], [566], [74], [248], [399], [281], [768], [296], [327], [502], [721], [310], [944], [377], [825], [404], [], [17], [356], [860], [750], [926], [345], [957], [488, 830], [843], [430], [656, 919], [871], [424, 610], [141, 142], [653], [930], [977], [744], [673, 681, 620, 526, 527, 782, 664, 508], [840], [471], [], [863], [122], [851, 981, 664], [803], [544], [365], [326], [80], [166], [304], [398], [821], [456], [738, 428, 580], [149], [505], [366, 367, 369], [872], [173], [944], [220], [780], [492], [437], [888], [185], [12], [33], [763, 764], [740], [522], [917], [921, 638, 639], [86], [193, 187, 852], [], [300], [741], [262], [839], [307], [673, 681, 620, 526, 632, 508], [859], [49], [658], [966], [], [215], [64], [867], [370], [690], [68], [403], [433], [313], [138], [868, 813], [968, 504], [966, 907, 572], [], [587], [862], [67], [328], [390], [81], [968, 187], [15], [872], [519], [494], [405], [786], [423], [593], [917, 454], [65], [149], [558, 541, 542], [], [868, 945, 923], [894], [454, 921], [651], [943], [559], [], [72], [921, 763], [567], [861], [687], [40, 47], [257], [766], [169], [578, 982], [889, 486], [87], [448], [654], [789], [790], [185], [798], [35], [275], [636], [783], [353], [81], [960], [139, 140], [586], [44], [254], [603], [533], [37], [489], [159], [30], [963], [551], [906], [374], [816], [951], [671], [724], [671, 535], [37], [219], [669], [532, 762], [482, 754], [42, 26], [898], [], [330, 331], [951], [810, 878], [874], [481], [641], [], [472], [92], [559, 846, 818], [890], [659, 828], [840], [684], [235], [559], [264], [673, 526, 527, 782, 664, 508], [979], [518], [840], [548], [956], [221], [548], [], [167], [157], [], [547], [470], [665], [251], [53], [897], [350], [], [607], [], [264], [], [209], [343], [681, 620], [], [786], [127], [323], [861], [836, 837], [], [361], [581, 479, 656], [715, 652, 439], [43], [872, 917], [968], [114], [27], [536], [740], [417], [100], [692], [902, 488], [], [779], [307], [482], [31], [327], [896], [299], [994], [122, 124], [387], [114], [390], [327], [90], [478], [16], [320], [654], [711], [486], [518], [], [219, 220], [816], [78], [494], [255], [308], [204, 243, 155], [], [78], [977], [263, 185], [401], [603, 653], [779], [556], [690], [399], [265, 181], [304], [167], [950], [152], [438, 647], [227], [157], [588, 790], [599], [924], [], [475], [877], [763], [809, 925], [358, 359], [785], [927], [434], [812], [642], [867], [884, 406], [785], [139], [779], [39], [786], [771], [466], [], [894], [170], [867], [492], [905, 831], [175], [], [631], [778], [25, 28], [884], [116], [], [860], [467], [965], [923, 960], [370], [171], [936], [602], [781], [], [142], [605], [894], [700, 999], [], [306], [546], [550], [761], [621], [595], [515, 583], [320], [939, 813, 909, 910, 567], [179], [840], [], [769, 798], [215], [954], [385, 101], [223], [], [729], [759], [559], [87], [116], [236], [554], [911, 636], [661, 479], [168, 211], [828], [520, 529, 516, 431], [719], [978, 437], [100], [538], [], [697], [21], [240, 241], [312], [634], [515], [309], [685], [783], [61], [998, 987], [886], [111], [427], [314], [350], [719], [71], [286], [588], [616], [132], [670], [], [], [877], [558], [591], [251], [788], [232], [908, 895], [471], [754], [959], [767], [8], [690], [496], [], [407], [767], [647], [715], [629], [13], [407], [268], [842], [738], [943], [320], [810, 878], [195, 202], [922], [262], [185], [184], [], [197, 199], [502], [40], [941], [106], [900], [6], [949, 954], [247], [30], [47], [505], [460], [40, 46], [921, 604], [216], [473], [590], [872, 759], [315], [], [39], [], [404], [765], [608, 678, 841], [155, 204], [124], [181], [386], [113], [575], [689], [557, 624, 436], [230], [499], [818], [726], [932, 415], [217], [727], [737, 455, 760, 886], [230, 231], [541], [732], [686], [395], [547, 565], [623], [732], [344], [670], [506], [650, 818, 819], [], [675], [120], [970, 979, 858], [74], [292], [831, 721, 745], [483, 460, 975], [529, 831], [212], [961], [715], [751, 479], [583], [], [706, 762], [893], [865], [749], [134], [131], [248, 249, 537], [], [], [314], [540], [565], [661], [382], [235], [750, 721, 697], [], [], [], [406], [768], [562], [452], [196, 198], [899, 968, 504, 505], [10], [171], [500], [716], [318], [357], [330, 331], [106], [22], [577], [573], [481, 482], [910, 438], [283], [542], [457], [897], [502], [72], [305], [541, 542], [915], [633], [755], [991], [333], [571], [524], [51], [16], [479], [932], [894], [644], [822, 542], [515, 467, 728], [175], [126], [483, 698], [402], [270], [352], [792], [248, 250], [828], [772], [], [340], [14], [285], [351], [77], [529], [356], [46, 47], [505], [162], [868], [859], [672], [959], [369], [832], [907, 440], [674], [783], [673, 526, 527, 664, 508], [146], [785], [883], [628], [871], [632], [586], [219], [951], [946], [93], [64], [877], [980], [497], [296], [61, 62], [673, 650, 664, 526, 527, 632, 508], [896], [489, 981], [677], [], [758], [653], [487], [507], [496], [], [417], [668], [471], [628], [847], [658], [90], [987], [135], [308], [], [], [724], [64, 55], [299], [810, 878], [730], [575], [835], [394], [0, 758], [988], [376], [300], [612], [546], [137], [412], [874], [277], [398], [392], [156], [581], [124], [992], [65], [552, 903], [781], [121], [447], [662], [845], [449], [847], [34], [792], [754], [148], [996], [23], [692], [141], [513], [89], [796], [636], [673, 681, 620, 664, 526, 527, 508], [190], [84], [], [952], [683], [], [610], [414], [958], [838], [974], [954], [], [532, 799], [], [10], [129], [682, 708], [184, 232], [613], [585], [], [614], [547], [332], [683, 889], [437], [637], [809], [741], [854], [5], [154], [594], [569], [538], [499], [867], [153], [727], [251], [956], [583], [442], [400, 667], [962, 923], [187], [640], [607], [320, 319], [933, 923], [449], [24], [679, 488], [104], [62], [37], [879], [241], [578, 982], [745], [842, 977, 978], [738, 580], [], [650, 819, 854], [1], [133], [123], [424, 423], [614], [162, 167], [229], [610], [534], [524], [840, 911], [932], [559], [560, 981], [333], [565], [821], [904], [269], [222], [114, 947], [], [91], [846], [139], [537], [252], [], [652, 413], [928, 927], [354], [556], [345, 690], [722], [601], [803], [241], [682], [300], [490], [721, 831], [386], [250], [5], [651, 659, 411, 813], [], [742, 713], [156], [981], [570], [608, 610, 841, 894], [662], [598], [217, 852, 239], [43], [], [212], [218], [763], [106], [839, 873], [238], [220], [744, 657], [301], [777], [356], [625], [98], [], [138], [545], [199], [574], [217], [614], [243], [200, 155], [247], [185], [984], [539], [211], [684], [173], [92, 95], [654], [174], [297], [246], [], [775], [799], [370], [808], [956], [500], [2], [358], [801], [686], [773], [936, 939, 940], [605], [749], [779], [618], [993], [805], [924], [589], [145], [215], [], [938, 37], [752], [12], [481], [906, 834], [769], [401], [918], [836, 837, 951], [], [275], [799], [369], [515, 40, 42], [504], [137], [761], [8, 765], [166], [677], [767], [430], [469, 616], [400, 834, 667], [325], [927], [390], [802], [84], [], [736], [745], [23], [92], [712], [630], [410], [474], [221], [793], [83], [309], [], [165], [843], [579, 881], [397], [222], [104], [426], [488, 479, 695], [195], [886], [401, 881], [265], [466], [194, 185], [949], [331], [551], [315], [221], [172], [550], [629], [806, 658], [889], [897], [769], [832], [10], [608, 774], [525], [111], [593], [968, 504], [704], [868], [347], [569], [], [596], [738], [763], [59], [953], [506], [585], [922], [22], [807], [676], [279], [363], [14], [378], [], [60], [608], [357], [872], [612], [154], [740, 587, 783, 477], [877], [265], [230, 220], [612], [785], [690], [433, 728], [423], [89], [275], [975], [653], [584], [292], [330], [580], [284], [976], [374], [669], [70], [6], [251], [443], [340], [263], [208], [59], [483], [140], [535], [947, 997], [140], [241], [707], [755], [977, 978], [121], [452], [571], [508], [677], [357], [122], [163], [150], [60], [979], [418], [701], [428], [], [24], [37], [977, 638, 639], [37], [122], [243], [766], [399, 786], [588], [652], [457, 158, 151], [102], [405], [29], [395], [467, 596], [449, 718, 975], [672], [752], [366, 367], [612], [825], [], [908, 404], [], [18], [996], [430], [82], [968], [834], [558], [370], [977, 978, 879], [700], [261], [513], [464], [185], [218], [274], [432], [831, 765, 799], [652], [768], [758], [289], [201], [209], [105, 106], [378], [560], [435], [632], [270], [309], [733], [57], [959], [106], [22], [289], [197], [861], [996], [192, 187], [632], [296], [382], [681, 810, 620, 508], [775], [320], [350], [704], [972, 977], [498], [442], [596], [525], [748], [514, 819, 638, 639], [578, 903, 689, 601], [536], [965], [], [470, 921], [899], [526, 527, 782, 664, 508], [880], [699], [292], [113], [820], [763], [925], [807], [681, 620, 526], [994], [247], [865], [423, 424, 762], [803], [760], [243], [590], [181], [489], [865, 850], [929, 443], [727], [415], [55], [690], [738, 703], [385], [527], [617], [117], [544], [200], [784, 507], [850], [141], [754], [255], [496], [847], [435], [515, 596], [291], [415], [808], [707], [956], [62], [289], [], [238], [316], [], [398], [383], [997], [393, 108], [17], [166], [617], [139], [284], [433], [628], [617, 823], [700], [80], [701], [773], [793], [11], [347], [785], [678], [], [806, 850], [538], [920], [851], [965], [147], [57], [545], [19], [836, 837, 975], [999, 153, 700], [119], [910], [962], [183], [73, 74, 815], [517], [579], [140], [346], [], [627], [863], [814], [665], [65], [529], [466], [155], [655], [540], [712], [726], [247], [894], [239], [34], [820], [], [528], [678], [189], [117], [695], [380], [293], [413], [114], [389, 391], [784], [332], [], [68], [690, 346], [313], [855], [842, 731], [10], [830], [472, 718], [456], [723], [668], [461], [558], [143], [745], [523, 975, 978], [221], [946], [123], [931], [654], [27], [406], [510], [261], [746], [981], [2], [933], [], [508], [754], [342], [456], [704], [905, 750], [545], [363], [113], [205], [187, 636], [945], [370], [647, 600], [155], [790], [589], [532], [191], [28], [532], [67], [456], [506], [143], [245], [433], [417], [35, 37], [203], [803], [273], [322, 769], [919], [340], [873], [87], [754], [816], [241, 238], [], [843], [844], [643], [563], [842, 978], [421], [87], [406], [946], [979], [806], [877], [619, 846], [417], [782], [105], [465], [624], [830], [352], [746], [33], [552], [863], [855], [688], [597], [281], [185], [15], [907], [221], [625], [506], [563], [], [37], [605], [679], [944], [58], [402], [80], [961], [763], [828], [577], [93], [315], [300], [81], [178], [1], [841], [729], [923], [66, 68], [409], [256], [76], [818], [369], [128], [391], [567], [591], [365], [387], [766], [], [], [], [458], [172], [769, 767], [457, 541], [20], [883], [356], [545], [405], [716], [212], [730], [635], [277], [256], [25], [868], [728], [540], [386], [864], [971], [53], [351], [], [320], [707], [691, 570], [978], [870, 903], [630], [62], [989], [128], [515, 775, 564, 669], [362], [311], [822, 542], [325], [642], [695], [473], [761], [365], [715], [], [559], [950], [999], [158, 151], [704], [647], [627], [227], [3], [676, 236], [349], [936, 923], [715], [993], [60], [17], [703], [290], [33], [225], [968], [323], [377], [462], [108], [51], [], [592], [870], [421], [126], [564], [801], [959], [270], [928, 923, 960], [578, 982], [651, 732], [577], [129], [958], [841], [350], [78], [259], [559, 721], [0, 391], [958], [877], [474], [650], [141], [755], [283], [907, 440], [2, 3], [744, 657], [104], [929], [991], [94], [772], [879], [628], [105], [975, 634], [619, 846], [828], [988], [936], [591], [], [738], [280], [156], [240], [708, 460, 975], [80], [546], [936], [196], [694], [340], [], [386, 101], [672, 792], [], [135], [191], [611], [472, 693], [624, 453, 454], [221], [894, 638, 639], [72], [535, 479], [824], [], [298], [286], [758, 472], [85], [555], [794], [961], [958], [936], [990], [180, 243], [71], [608, 514], [], [224], [868], [454], [611], [309], [539], [317], [393], [610, 443], [338], [520], [430], [276], [841], [93], [749], [281, 282], [892], [294], [457], [945], [923], [458], [817, 479], [555], [624], [855], [135], [110], [510], [], [783], [451], [866], [419], [206], [492], [265, 266], [94], [713], [720], [457, 459], [89], [], [47], [], [435], [908, 404], [119], [762, 766], [890], [215], [719], [844], [717], [358], [687], [643], [970], [394], [730], [618, 813, 659, 567], [914], [639], [475], [473], [18], [492], [396], [64], [679], [105], [], [450], [834], [49], [538, 668], [821], [697], [794], [781], [656], [748], [968], [624, 454], [909, 567], [], [915], [689, 601], [126], [13], [724], [379], [], [673, 508], [734], [80], [763], [43], [352], [], [697], [535], [532, 923, 868], [894], [504], [884], [], [688], [297, 295], [154], [984, 475], [629], [238], [268], [653], [681, 620], [63], [302], [914], [439], [144], [643, 819], [150], [1], [], [228], [144], [211], [57], [820], [28], [387], [], [37], [690], [567], [351], [785], [787, 524], [343], [488, 600], [135], [24], [369], [528], [271], [520], [585], [683], [225, 235], [912], [44], [265], [], [335], [74], [2], [793], [868], [573], [374], [590], [26], [911, 533], [306], [443], [387], [603], [602], [231], [877], [202], [652], [978, 510], [907], [337], [612, 670], [2], [169], [607], [681, 810, 620], [135], [518, 920, 671], [917], [761], [847], [362], [27, 455], [707], [647, 968], [524, 461], [479], [724], [], [351], [756], [342], [253, 703], [351], [873], [176], [956], [673, 681, 620, 526, 527, 664, 508], [724], [633], [199], [613], [479], [], [777], [], [419, 605], [320], [939, 567, 926], [669], [256], [223], [605], [880], [593], [469], [337], [630], [839], [752, 852], [846], [528], [105], [630], [514, 515], [125], [742], [94], [776], [], [512], [738], [968], [270], [455], [182], [58], [181], [674], [96], [118], [37], [453], [148], [203], [770], [894, 799], [11, 14], [101], [715, 671], [970], [601], [495], [786], [57], [33, 973], [990], [400], [716], [788], [337], [812, 908], [739], [292], [878], [9], [61], [361], [605], [218], [344], [232], [844], [832], [246], [596], [120], [950, 953], [896, 999, 648, 861], [975], [853], [921], [348, 349], [537], [866], [], [836, 837, 518, 898, 671], [39], [76], [], [449, 975, 472], [862], [138], [719], [], [262], [981, 429], [930], [22], [], [913], [617, 823], [821], [150], [825], [369], [474], [922], [], [343], [], [312, 937], [823], [951], [676, 235], [862], [92], [346], [28], [497], [549], [72], [195], [212, 251], [37], [112], [648], [107], [623], [139], [929], [170], [99], [475], [713], [], [264], [813], [432], [916], [475], [526, 664], [976], [44], [749, 526], [204], [121], [622, 759, 414], [194], [685], [283], [362], [555], [474], [17], [587], [368], [460, 718], [247], [885], [109], [737], [865], [783], [739], [462], [548], [136], [999, 876, 435], [579, 402], [351], [274], [641], [982], [426], [], [538], [354], [], [890], [954], [229], [824, 911], [728, 861], [932], [626], [681, 620], [107], [646], [69], [702], [987, 998], [607], [478], [], [792], [908], [898], [304], [73], [401], [], [923, 122], [268, 151], [593], [], [373], [503], [302], [402], [481, 482], [750, 721], [], [374], [446], [492], [755], [277], [91], [], [], [], [372], [531], [479], [763], [359], [595], [642], [654, 603], [499], [467], [346], [650], [804, 631], [111], [], [487], [693, 472], [929], [134], [661], [629], [117], [689], [743], [479, 817, 511], [447], [232], [321], [351], [621], [494], [200], [470], [316], [786], [981, 429], [258], [485], [960, 923], [144], [], [395], [506], [789], [325], [297], [384], [], [453], [614], [349], [645], [608], [673, 526, 527, 916, 664, 508], [807], [818], [830], [370], [754], [762, 923], [], [903], [213], [581], [6], [], [802], [579, 881], [242], [696], [683], [939], [139], [475], [749], [685], [219], [702], [661], [834, 906, 630], [495], [816], [945], [327], [386], [374], [298], [377], [979], [625], [888], [227], [176], [677, 587, 784], [11], [907, 953], [276], [105], [546], [650, 834, 906], [578], [739], [298], [14], [4], [526], [47], [281, 897], [791], [984], [839], [842, 625], [986], [411], [582], [52], [43], [778], [306], [820], [150], [], [822, 542], [78], [887], [], [], [948], [829], [259], [199], [804], [580, 315], [513], [84], [821], [814], [929], [928, 960], [957], [921], [608], [744, 657], [172], [392], [820], [937], [610, 114], [331], [51], [113], [7], [7], [60], [836, 837], [592], [396], [650], [687], [873], [431], [159], [182], [430], [837, 465, 597], [950], [566], [31], [440], [234], [726], [113], [980], [517], [221], [572], [598], [376], [913], [843], [531], [803], [125], [685], [255], [801, 983, 327], [417], [666], [891], [530], [57], [520], [166], [745], [450], [], [385, 386], [716], [650, 819], [564], [355], [353], [308], [883, 725], [775], [131], [939], [83], [116, 126], [], [955], [18], [628], [480], [841, 823], [594], [405], [162], [95], [605, 748], [430], [338], [86], [305], [648, 794], [403], [], [939, 943], [497], [236], [413], [350], [854], [93], [49, 50], [719], [187], [262], [405], [62], [955], [608, 679], [260], [331], [265], [938], [370], [497], [26], [581, 407], [660], [119], [67], [89], [683], [779], [801], [263], [368], [862], [], [486, 776, 683], [143], [689, 501], [], [541], [916], [530], [40], [], [634, 561], [177], [208], [425, 825], [836, 638, 639], [], [547], [198], [671, 518, 615], [110], [232], [1], [558], [911, 796], [126], [807], [619], [616, 618], [526], [88], [759, 474], [249], [33], [386], [30], [900], [898, 680], [472, 693], [370], [123], [608, 897, 651, 567], [], [482], [546, 631], [197], [566], [515], [721], [18], [774], [962, 923], [272], [361], [494], [55], [], [783], [946], [430], [293], [195], [100], [120], [619, 409, 442, 892], [578, 689, 982, 601], [751], [764], [336], [441], [448], [558], [55], [283, 435], [561], [291], [189], [465], [182], [179], [574], [257], [896], [487], [696, 738], [], [534], [856], [346], [], [416], [], [911], [391], [762], [258], [535], [230], [983], [378], [402], [310], [780], [11], [923, 891], [369, 379], [672], [329], [39, 44, 47], [147], [88], [582, 953, 954], [879, 638, 639], [687], [102], [97], [631], [917, 921], [969], [221], [85], [], [999, 435, 794], [246], [159], [76], [], [962, 923, 935], [155], [794], [951], [481, 482], [146], [31], [737, 519], [], [872], [563], [252], [300], [665], [317], [195, 245], [], [517], [126], [125], [29], [17], [230, 231], [25, 28], [453, 553], [48], [976], [767], [572], [201], [752], [352], [230], [671], [312], [110], [316], [711], [13], [670], [788], [551], [770], [903], [90], [771], [656, 468], [181], [464], [579, 881], [492, 588], [619, 846], [24], [957], [500], [582, 692], [817], [890], [], [986], [769], [785], [989], [13], [38], [165], [548], [457], [923, 934], [565], [263], [492], [858], [607], [744, 657], [417], [483], [466, 799], [795], [698], [747], [851], [161], [614], [627], [971, 542], [478], [896], [], [174], [934], [], [929], [332], [487, 457], [962, 923], [7], [], [245], [956], [286], [], [115], [], [481, 482], [609], [547], [356], [994], [637], [636, 748], [604], [448], [152], [163], [107], [], [97], [353], [898, 585, 631], [404], [946], [853], [292], [140], [860], [734], [739], [243], [964], [673, 526, 508], [519], [899, 968, 725, 504, 572], [341], [277], [759, 635], [707], [661], [286], [863], [401], [79], [299], [826], [274, 277], [31], [604], [88], [697], [755], [], [538], [], [305], [440, 441], [327], [867], [], [756, 412], [275], [521, 926], [138], [791], [365], [305], [976, 977, 978], [10, 858], [478], [489], [50], [896], [324], [86], [386], [789, 614], [699], [133], [408], [565], [], [611], [77], [998, 941, 987], [769], [248, 537, 250], [543], [255], [458], [573], [], [338], [944], [697, 819, 854], [797], [491], [714, 402], [578, 627, 982], [16], [382], [531], [58], [933], [269], [653], [960, 813], [200], [791, 922], [549], [522], [155], [632], [110], [453, 894], [182], [513], [396], [243], [], [368], [440, 455], [181], [438], [759], [], [331], [259], [728], [807], [596], [634], [517], [], [599], [157], [159], [324], [581, 479], [179], [115], [645], [816], [155], [604], [673, 526, 527, 664, 508], [29], [], [550], [972], [284], [403], [874], [315], [637], [393], [421], [], [459, 845], [221], [963], [558], [258, 222], [400], [40, 44], [991], [444, 670], [147], [50], [763], [502, 638, 639], [], [72], [648], [137], [119], [548, 851], [610], [475], [487], [72], [], [559], [858], [935], [731], [455], [], [973], [395], [786], [263], [734], [], [308], [728, 412], [256], [289], [79], [349], [461], [591], [72, 815], [], [408], [54], [634], [601, 578], [70], [671], [166], [439], [869, 841, 523], [72], [923], [327], [651], [856], [427, 756], [989], [834, 982, 906], [], [551], [389, 983], [559], [75], [75], [902], [830], [102], [369], [921], [551], [], [70], [366], [769], [107], [517], [314], [272], [434], [238, 239], [607], [58], [8, 7], [832], [220], [182], [975, 980, 703], [978], [869, 617], [378], [748], [248], [431], [866], [960], [659], [468, 919], [953, 954], [930], [873], [40, 46], [518], [466], [833], [408], [923], [937], [], [508], [314], [734], [696], [956], [615], [320], [946], [196], [914], [123], [903], [34], [56], [213], [512], [208], [99], [372], [205], [677], [786], [], [788], [772], [94], [99], [783, 677, 463], [], [555], [770, 836, 837, 733, 862, 610], [882], [937, 938], [592], [11], [], [13], [736, 515], [418, 767], [197], [914], [524], [233], [882], [], [], [315], [83], [788, 502], [124], [979], [320], [169], [491], [861], [784], [893], [517, 540], [340], [501], [52], [514], [72], [366], [961], [224, 208], [702], [275], [], [267], [421], [54], [963], [275], [756], [316], [945], [606], [198], [177], [928], [612], [497, 663], [587, 784], [], [375], [75], [709], [678, 638, 523, 818, 819], [360], [442], [330, 331], [566], [150], [], [], [], [8, 7], [557, 663, 442], [706, 421, 970], [458], [51], [2, 3], [702], [659, 923], [553], [8], [17], [17], [880], [734, 407], [273], [933], [953], [891], [464], [237], [860], [669], [857], [2], [448, 858], [869], [69, 110], [350], [273], [73], [], [727], [506], [306], [91], [], [181], [926], [17], [24], [545], [957], [845], [104], [513], [53, 759], [777], [847], [187], [378], [696], [940], [200], [155], [409], [229], [123], [850], [723], [578], [247], [847], [956], [51], [890], [907], [], [646], [182], [12], [958], [980], [172, 173], [999, 499, 700], [844], [21], [811], [830], [512], [531], [236], [581, 479, 511], [542], [565], [169], [], [453], [900], [], [740], [235], [192], [371], [475], [121], [126], [50], [161], [240], [421, 976, 978], [422], [172], [803], [], [673, 526, 527, 782, 664, 508], [230], [479], [978, 515], [841, 501], [545], [758], [4], [123], [329], [503], [966], [281], [209], [401], [687], [483], [30], [949], [873, 839], [974], [576], [514], [988], [173], [], [164], [991], [882], [609], [756], [], [], [936], [113, 125], [453], [], [471], [67], [242], [257, 850], [141], [571], [321], [631], [586], [902], [793], [378], [608, 421, 869], [520, 669], [748], [121], [84, 7], [396], [670], [453], [797], [917], [746], [608, 972], [421, 506], [997], [910], [257], [802], [688], [210], [905, 859], [337], [753], [519], [750], [625], [476], [651, 527, 664], [683], [962, 923], [800], [802], [874], [853], [625], [585], [93], [928], [407], [955], [807], [54], [], [402, 836, 837], [494], [87], [756], [], [244], [522], [347], [692], [886], [182], [864, 867], [884], [985], [914, 484, 780], [635], [304, 302], [18], [], [610], [440], [808, 968, 504], [850], [698, 483], [469, 926], [], [518, 568, 570], [575], [928, 762, 923, 927], [106], [977, 638, 639], [770], [519], [542, 559, 541], [456, 733], [], [617], [235], [200], [280], [842, 879, 977, 978], [191], [740], [553, 750, 831, 894], [576], [983], [962, 923], [217], [199], [7], [828], [656], [217], [23], [47], [439], [152, 155], [400, 667], [18], [197], [631], [442, 494], [235], [377], [], [966, 572], [777], [528], [238], [997, 947], [183], [133], [861], [394], [425], [389], [13], [294], [243], [69], [850], [56], [143], [360], [547], [462], [], [552], [611], [322], [572], [494], [197], [833], [708], [548, 851, 632], [918], [124], [459], [149], [361], [520], [458], [270], [186, 193], [667], [675, 850, 757], [453], [833], [716], [190], [], [30], [949, 954], [211], [834, 517, 906, 630, 671], [374], [], [670, 518], [450], [914], [39], [261], [], [463], [], [100], [488, 679], [995], [760], [230, 231], [110], [], [251], [], [814], [490, 600], [38], [683], [994], [553], [673, 508], [277], [839], [564, 669], [920], [483], [551, 629], [757], [217], [877], [60], [785], [533], [1], [401], [214], [853], [126], [295], [318], [892], [719], [462], [124], [240], [516], [535], [149], [521], [152], [393], [562], [195], [962, 933, 923], [419], [1], [103], [423], [824], [582], [780], [370], [228], [581], [456], [], [984], [], [997, 947], [114], [837, 841], [333], [490, 524, 461, 787], [889], [858], [93], [6, 983], [], [656], [986], [991], [812], [608, 465, 597], [857], [311], [652], [610], [445], [246], [231], [673, 664, 526, 527, 632, 508], [221], [], [149], [304], [], [], [285], [354], [966], [78], [], [31], [500], [617], [665], [946], [604], [130], [246], [464], [237], [339], [50], [809, 923], [859], [], [], [581], [550], [], [898, 585], [201], [701], [274], [12], [153], [12], [], [345], [], [368], [225], [], [9], [41], [527, 782, 916, 664], [932], [981], [776], [363], [239], [694], [], [232], [905], [669, 564], [850], [195], [179], [328], [849], [167], [539], [173], [166], [829], [680], [145], [37], [268], [523], [394], [718, 975], [779], [567], [377], [670], [965], [139], [472, 693], [355], [538], [167], [841, 501], [212], [788, 795], [918], [897], [610], [718, 888], [726], [158], [145], [868], [], [361], [654], [327], [869], [417], [305], [350], [578, 689], [879], [401], [241], [937], [600], [284], [537], [172], [494], [408, 414, 465, 608], [695], [696], [525], [805], [961, 909], [627], [949], [647], [35, 37], [911, 658, 824, 568], [944, 946], [923], [346], [457, 834], [349], [79], [], [612], [104], [104], [596, 284], [835], [614], [568], [322], [301], [265], [758], [866], [829], [358], [977, 978], [906], [24], [571], [334], [785], [694], [299], [], [654], [722], [511, 479], [272], [271], [409], [515], [6], [927], [337], [708, 557, 538], [997], [673, 664, 526, 527, 782, 632, 508], [895], [353], [], [385, 101], [236], [174], [214], [642], [932], [440], [904], [903], [766], [975], [11], [283], [416], [792], [36], [35, 37], [544, 521, 910, 926], [598], [578], [281], [990], [110], [391], [859], [], [959], [693], [688], [588], [497], [753], [350], [44], [529], [760], [945], [303], [985], [51], [], [111], [412], [708], [179], [52], [581], [852], [734], [884], [608, 610, 836, 837, 557], [625], [711], [960], [936, 938], [807, 637], [226], [], [276], [195], [863], [457], [88], [], [760], [180], [593, 650], [543], [654], [939, 943], [698], [956], [594], [841, 911], [], [694], [496], [544], [198], [693], [956], [243], [102], [118], [783], [248, 250], [189], [5], [479], [507], [438], [973], [168], [434], [814, 913], [214], [349], [817], [726], [821], [585], [9], [908, 895], [333], [334], [], [580], [201], [386], [985, 716], [195, 159], [430], [546, 776, 650, 819, 632], [207], [261], [209], [895], [358], [321], [681, 620, 951], [333], [711], [286], [445], [293], [880, 430], [], [818], [996], [327], [573], [526], [843], [713], [847], [179], [268], [248, 250], [337], [177], [968], [688], [652], [962], [383], [220], [815], [810, 878], [146], [39], [455], [52], [141], [463], [828], [981], [787], [497], [620], [786], [615], [240, 238], [893], [30], [486], [825], [418], [649], [64, 55], [779], [48], [621], [159], [570], [43], [539], [], [], [], [945], [392], [606], [208, 250], [538], [949], [91], [207], [985], [951], [580], [79], [259], [645], [826], [581, 751, 817, 479], [640], [47], [453, 454, 624], [896, 435], [725], [], [384], [121], [], [234, 214], [894], [991], [315], [374], [], [], [614], [569], [497], [605], [339], [], [378], [82], [], [576], [610], [905, 532, 441, 572, 834, 966], [416], [780], [129], [], [386], [573], [628], [853], [982], [786], [672, 970], [908], [325], [331], [380], [551], [487], [], [859], [882, 613], [125], [245], [379], [561], [840], [867], [437], [52], [646], [536], [382], [647], [323], [], [175], [874], [578, 903, 689, 885], [535], [937], [462], [433], [189], [654], [592], [357], [94], [341, 703], [468, 919, 920], [377], [148], [362], [14], [326], [319], [659], [857], [681, 620, 508], [849], [841, 608, 636], [289], [785], [779], [870], [], [302], [], [373], [29], [486], [201], [239], [735], [954], [143], [563], [48], [807], [430], [571], [345, 690], [690], [129], [399], [393], [181], [391], [907, 478], [400], [647], [544], [], [871], [697], [263], [774], [916], [708], [509], [135], [812], [385], [214], [285], [76], [261], [390], [590], [595], [397], [936], [168], [525], [], [502], [914], [449], [750], [471], [528], [19], [966], [879], [38, 44], [121], [837, 441], [801], [893], [], [793], [104], [873], [144, 977], [313], [267], [204], [868, 923], [488], [236], [334], [26], [427], [621], [715, 764], [692], [104], [627], [578, 903, 601], [595], [921], [785], [204], [759], [721], [214], [330], [564], [565], [59], [], [489], [515], [23], [191], [125], [629], [578], [514, 788], [580], [171], [444], [681, 620, 508], [215], [289], [742], [175], [821], [826], [93], [241], [], [719], [62], [11], [995], [497], [77], [162], [], [494, 442], [357, 337], [497], [977], [990], [363], [506], [264, 263], [103], [609], [671], [548, 905, 851, 831, 598], [306], [], [48], [445], [875], [387], [731], [361], [], [132], [82], [923, 924], [257], [945], [843], [819], [481], [147], [292], [968, 651, 504], [971, 815], [76, 568], [245], [82], [870], [671], [], [46], [463], [845], [944], [12], [602], [483], [4], [182], [958], [900], [301], [335], [734], [515], [428], [789], [481, 482], [902], [234, 852], [417], [802], [655, 500], [363], [354], [64, 55], [281], [796], [150], [668], [618, 284], [144], [159], [194], [345], [414], [482], [296], [446], [814], [516, 601], [425, 730], [534, 729], [729], [868, 923], [813], [475], [129], [740], [573], [437], [760], [792], [873], [644, 470], [279], [907], [434], [229], [610, 655], [795], [185], [521], [232], [672, 570], [165], [902, 769, 726], [649, 979], [400, 667], [788], [812], [715], [237], [747], [608, 728, 824, 630, 414], [39, 48], [424, 423], [590, 487], [291], [771], [15], [485], [836], [297], [38], [967], [329], [138], [739], [518, 652, 691, 570], [867], [608, 459], [804], [665], [515, 906], [9], [919], [111], [], [923], [5], [421], [533, 539], [674], [828], [836, 837], [963, 567], [399], [545], [892], [381], [597, 763], [473], [540], [248, 250], [80], [612], [806], [31], [965], [823], [446], [346], [921], [817], [195], [479], [725], [518], [467], [634], [968], [921], [141], [284], [546, 650, 402], [923], [454], [296], [171], [865], [276], [132], [970], [970, 980], [443], [653], [66, 68], [874], [259], [300], [445], [580], [738], [], [889], [904], [252], [], [897], [8, 7], [168, 205], [960], [235, 242], [510], [839], [752], [958], [436], [543], [797], [442, 538], [537], [7], [491], [160], [659, 438, 647], [872, 759, 622, 732, 414], [402, 703], [876], [465], [453, 454], [800], [757], [626], [912, 716], [880], [720], [880], [248, 249], [801], [452], [265], [379], [841, 447], [108, 991], [862], [394], [715], [812], [32], [452, 433], [678], [8], [79], [747], [316], [48], [], [232, 760], [648, 720], [588], [74], [563], [970, 518, 671], [972, 858], [566], [996], [596], [335], [476], [83], [513], [868], [], [968, 849, 725, 504], [892], [309, 984], [496], [673, 742, 664, 526, 527, 508], [632], [40, 46], [], [366], [570], [11], [691], [], [427], [850], [446], [434], [48], [], [20], [796], [467], [528], [363], [871], [112], [222], [872], [434], [561], [238], [962, 692], [713], [41], [338], [847], [943], [15], [774], [610], [148], [497, 442], [84], [836, 837, 678], [130], [885], [288], [340], [844], [654], [213], [974], [849], [419], [669], [35], [280], [323], [142], [157], [64], [553], [931], [379], [357], [466], [646], [967], [377], [256], [283], [289], [486], [], [722], [850], [], [92], [], [330], [378], [538], [151], [], [122], [6], [443], [670], [672, 471], [946], [38], [224], [752], [444], [420], [906], [372], [755], [480], [490], [613], [259], [578, 639], [646], [793], [235], [], [737], [777], [584], [553], [822], [388], [585], [862], [523], [745], [582, 941, 728], [82], [56], [195, 697], [801, 570], [787], [885], [740], [122], [618], [389], [153, 204], [563], [190], [610, 487], [967], [822, 542], [399], [565], [649], [696], [298, 299], [964, 951], [207], [26], [206], [635], [739], [523], [512], [385], [487], [500], [675], [38], [777], [934], [777], [624, 283, 453, 454], [805], [960, 967, 968], [859, 868, 521, 651], [337], [621], [169], [515, 680], [863], [886], [626], [406], [630], [45], [365], [512], [333], [415], [909, 926, 968, 504], [495], [886], [91], [105], [273], [421], [829], [153], [417], [510], [519], [979], [941], [569], [992], [263], [948], [819], [936], [], [84], [750], [139], [716], [395], [677], [949], [], [968, 504], [509], [378], [423], [305], [451], [59], [968, 504], [206], [32], [506, 421], [24], [140], [804], [715], [581, 656, 436, 479], [312, 311], [938, 923, 935], [684], [316], [94], [988], [42], [908, 404, 895], [908], [963], [252], [322], [900, 540, 812], [], [426], [180], [821], [], [502], [739], [261, 174], [650], [494], [581, 479, 817], [35], [90], [591], [432], [613], [626], [102], [489], [411], [168], [627], [834, 458], [903, 558], [], [372], [900], [49], [206], [766], [53], [783], [265], [71], [812], [136], [589], [61], [], [64, 59], [69], [710], [129], [], [496], [15], [911, 474], [659], [120], [], [432], [428], [140], [801], [217], [669], [994], [330, 332], [999, 281, 700], [958], [12], [608], [487, 402], [548, 851], [334], [750, 721], [241], [624, 453, 454], [392], [736], [301], [441, 572], [386, 101], [], [581, 436, 479], [810, 878], [568], [106], [482], [850], [28], [770], [1], [843], [655], [94], [464, 597], [741], [693], [468], [660], [917], [329], [], [654], [871], [390], [342], [], [572, 966], [950], [120], [146], [302], [], [519], [], [], [197], [505], [155], [825], [188, 189], [96], [237], [726], [325], [229], [507], [457, 834], [93], [], [260], [930], [510], [346], [983], [395], [317], [289], [554], [34], [713, 742], [992], [162], [211, 159], [401], [88], [559], [760], [484], [636], [309], [14], [78], [725, 901], [378], [431], [267], [223], [423, 424, 589], [973], [681, 810, 620], [618, 469], [], [167], [383], [117], [], [302], [479, 436], [389], [663], [346], [323], [822], [126], [432], [524], [994], [968], [], [355], [562], [420, 683, 875], [789], [847], [60], [842, 638], [720], [724, 536], [373], [398], [780], [673, 620, 664, 526, 527, 846, 632, 508], [540], [616], [104], [873], [417], [436], [277, 278], [668], [945], [184, 191], [682, 708], [225], [], [546], [674], [146], [580], [903], [665], [821], [682], [216], [684, 784], [571], [621], [287], [120], [774], [849], [223], [498], [608], [193, 194, 187], [982], [1], [771], [882], [469], [], [388], [344], [377], [610], [816], [621], [940, 463], [435], [515], [603], [402, 559, 836], [450], [], [800], [628], [865], [610], [], [15], [762], [775], [539], [531], [185], [579], [482], [398], [419], [976], [650], [771], [491], [910], [69], [207], [939], [], [100], [134], [506, 421], [249], [525], [171], [999, 861], [287], [497, 884], [], [249, 250], [600], [765], [609], [216], [788, 831], [210], [781], [923, 550, 967, 968, 762], [781], [198], [673], [235], [684], [429], [828], [86], [869], [215], [209], [435, 151], [397], [430], [791], [187], [436], [849], [603, 764], [144], [591], [808], [793], [909, 827, 926], [272], [], [80], [313], [923], [251], [53], [430], [119], [562, 825], [499], [919, 733], [359], [57], [820], [131], [330], [507], [781], [975, 703], [286], [761], [231], [841, 885, 630, 636], [128], [713], [780, 724], [604], [], [307], [880], [955], [910, 729, 828], [338], [928], [], [494], [340], [822, 577], [500], [859], [202], [975, 562], [633], [856], [210], [834, 836, 837, 650, 906, 819], [], [658], [366], [634], [160], [134], [277, 278], [155], [570], [102], [27], [421], [50], [401], [785], [906], [288], [487], [966, 572], [671], [788], [759], [377], [690], [816], [655], [72], [748], [592], [241], [893], [560], [18], [246], [901], [270], [782, 664, 830], [414], [819], [196, 198], [122], [839], [622, 759], [456], [278], [724], [333], [664, 971], [610, 841], [498], [965], [409], [241, 238], [136], [114], [453, 553, 894], [8], [869], [932], [587], [519, 950], [354], [648], [], [489, 22], [903], [442], [987, 998], [44], [795], [265], [933], [911], [748], [23], [396], [795], [1], [802], [], [], [479], [81], [525], [836, 837, 841, 978, 501], [626], [356], [610], [470], [666], [846], [91], [137], [], [529], [569], [993], [452], [616], [940], [293], [351], [604], [244], [551], [47], [354], [481], [800], [455, 440], [711], [23], [5], [700, 999], [148], [536], [886], [368], [246], [468], [672], [879], [171], [541, 62], [714], [28], [169], [993], [17], [442, 497, 858], [839], [679, 721], [160], [845], [251], [898], [423], [480], [581, 468], [500], [396], [883, 572], [431], [956], [361], [53], [817], [49], [729], [522], [], [939], [338], [391], [965], [625], [884, 406], [774], [546, 776, 158], [839, 718], [458], [213], [48], [950], [478], [431, 697], [34], [352], [703], [931], [830], [968, 504], [], [938], [320], [195], [121], [774, 977, 978], [437], [563], [26], [362], [16], [328], [841], [673, 526, 527, 782, 664, 508], [469], [13], [463], [14], [922], [231], [26], [921, 445], [], [22], [996], [222], [440, 737, 455], [232], [133], [607], [293], [117], [343], [476], [291], [565], [521], [825], [724], [295], [219, 220], [364], [258], [], [483], [], [710], [474, 911], [538], [64, 55], [], [539], [573], [603], [], [393], [923], [934], [922], [469], [871], [], [402], [474, 799], [616], [544], [50], [], [414], [595, 866], [825], [], [131], [515], [351], [297], [976], [577], [764], [903], [699], [335], [229], [666], [444], [168], [560], [847], [], [286], [], [6], [64], [218], [747], [669], [287], [825], [], [370], [957], [662], [875], [963], [165], [260], [646], [778], [197], [753], [996], [930], [453, 742, 681, 620], [677], [518], [63], [346], [517], [610], [672, 797], [276], [721], [383], [571], [787], [735], [75], [834, 681, 906, 526], [345, 346, 730], [54], [443], [597], [652], [770], [212], [116], [368], [388], [87], [690], [368], [854], [117], [], [105], [457, 834], [93], [], [342], [96], [834], [406], [17], [798], [866], [930, 415], [471], [574], [83], [698], [799], [24], [208], [459, 445], [946], [981], [887], [732], [687], [68], [966, 572], [999], [478], [263], [417], [244], [128], [974], [580], [515], [2], [893], [532], [56], [169], [714], [617, 691, 570], [366], [141], [38, 45], [309], [731, 861], [957], [845], [], [732], [411], [668], [850], [747], [565], [989], [508], [322], [547], [50], [752], [455], [806, 630], [103], [752, 852], [483], [845], [56], [427], [10], [881], [426], [300], [864], [184, 191], [316], [158], [557, 718], [253], [550], [260], [638, 639], [250], [842, 814, 977, 978, 693, 445, 639], [366], [], [388], [237, 158], [252], [743], [391], [816], [76], [399], [897, 285], [441], [], [6], [98], [289], [653, 493], [914], [696], [863], [701], [100], [825], [977, 978], [965], [384], [70], [605], [937, 962, 935], [979], [22], [669], [899], [64, 59], [640], [345, 347], [354], [491], [29], [141], [963], [27], [563], [250], [155], [236], [793], [969], [739], [780, 914], [125], [813, 567], [920, 779], [429, 463], [303], [665, 518], [12], [673, 810, 527, 664, 508], [370], [429], [793], [809], [16], [276], [], [679], [748], [323], [204], [201], [784], [286], [138], [303], [192, 185], [774], [], [669], [288], [489], [659], [588], [912], [735], [611], [99], [], [938], [358, 359], [218], [807], [907], [550], [36, 37], [834, 655], [904], [919], [699], [840], [698], [578, 819], [592], [767], [518, 670], [117], [258], [], [592], [256], [], [666], [265, 267], [833], [602], [474], [541], [614, 894], [760], [241], [507], [557, 22], [854], [418], [260], [673, 892, 681, 620, 526, 508], [269], [203], [277], [491], [48], [738, 999, 905, 700], [455], [239], [642], [236], [178], [403], [], [25], [546, 819], [834, 906], [776], [755], [816], [338], [778], [89], [], [560], [], [665], [939, 943], [914], [29], [683], [130], [0], [], [57], [335], [190], [971], [294], [175], [955], [524, 461], [583], [], [346], [157], [134], [], [112], [987, 998, 809, 923, 925], [90], [], [39], [345, 690], [678], [175], [749], [149], [813, 910, 926], [965], [101], [491], [122], [954], [434, 797], [311], [679], [597], [], [362], [786], [767], [27], [751], [724], [409], [694], [724, 536], [904], [197], [692, 790, 509], [], [901], [554], [928, 930, 923], [865], [65], [789], [958], [427], [927], [950], [274], [379, 381], [656], [320], [132], [855], [10], [41], [938], [553], [557], [897, 651, 760], [839], [811], [692, 760, 700], [616, 830], [593, 650], [610], [366], [885], [835], [291], [543], [448], [435], [486], [679], [750], [919], [734], [], [534], [964], [82], [], [287], [294], [714], [784], [991], [103], [925], [226], [63], [214], [578], [872, 681, 620, 622, 759, 414], [738], [135], [434], [610], [907, 440], [703], [112], [], [772], [606], [137], [162, 167], [744, 657], [], [277, 278], [321], [763], [104], [466], [303], [238], [726], [358], [216], [112], [], [32], [70], [594], [392], [159], [12], [206], [238, 216], [536], [791], [190], [674], [223], [610, 402], [44, 26], [539], [479], [81], [194, 203], [247, 215], [880], [17], [794], [], [], [420], [896], [], [409], [42], [114, 947], [433, 460, 975, 977], [710], [989], [745], [907, 440], [261], [303], [657], [518], [565], [614], [847], [607], [866], [371], [676, 199], [726], [478], [490], [700, 999], [978], [836, 837, 619], [305], [], [768], [648, 631], [265], [399], [523], [], [896, 648], [], [410], [913], [211], [512], [522], [336, 337], [681, 620, 632], [159], [307], [], [944], [808, 638, 639], [731], [796], [20], [392], [571], [576], [518, 830], [873], [789], [928], [311], [24], [858], [974], [422], [241], [729], [569], [494], [684], [387], [746], [45], [95], [582], [819, 854], [380], [521], [252], [504], [], [439, 541, 542], [174], [630], [937], [349], [542], [577], [465], [239], [378], [568], [218], [969], [610, 898], [844], [975, 703], [479, 817], [999, 434], [103], [865, 850], [210], [244], [881], [127], [426], [728, 790], [763], [903], [280], [744, 884], [301], [931], [822], [127], [256], [48], [418, 629], [352], [736], [343], [733], [405], [685], [638, 639], [808], [207], [722], [992], [985], [850], [506], [31], [], [739], [601], [344], [190], [876, 435], [810, 878], [673, 419], [850], [460, 975, 536], [874], [103], [852], [750, 242, 831], [176], [992], [895], [785], [281], [994], [378], [622], [374], [140], [414], [952], [678], [51], [321], [898], [586], [858], [602], [843], [440, 441], [285], [], [553, 493], [699], [109], [945], [948], [746], [293], [217], [223], [474], [42], [955], [332], [424, 423], [], [913], [678], [972], [131], [34], [850], [857], [619, 750, 846, 721], [769], [], [28], [742], [766], [836, 638, 639], [238], [744, 657], [233], [], [762, 923, 959], [], [135], [645], [964, 923], [559], [609], [78], [894], [800], [803], [636], [469], [167], [196], [247], [711], [275], [659, 959, 762, 923], [138], [730], [695], [992], [88], [], [407], [41, 44], [988], [239], [932], [152], [678], [156], [615], [601], [295], [925], [735], [639], [683], [822], [732], [], [665], [651], [859], [619, 846], [500, 825], [736], [388], [346], [183], [943], [152], [443], [479], [52], [150], [174], [911], [828], [281, 282, 539], [56], [595], [49], [699], [589], [817, 573], [80], [538], [130], [], [315], [917], [766], [498], [678], [617, 823, 153], [619, 846, 750, 721], [154], [930], [96], [289], [737, 455], [874], [308], [884], [898, 455, 680, 711, 968, 473, 826], [], [578], [795], [218], [693, 472], [375], [311], [137], [755], [566], [], [107], [606], [540], [774], [510], [911, 824], [392], [], [233], [570], [462], [88], [893], [763], [926], [142], [877], [371], [673, 681], [479], [975, 977], [163], [196, 837, 198, 836], [265], [416], [377], [256], [148], [397], [571], [876, 435], [380], [561], [243], [834], [932], [150], [585], [688], [382], [0], [322], [388], [946], [75], [473], [458], [375], [660], [687], [882], [583], [967, 968], [527], [255], [], [604], [937, 942], [], [249], [680], [250], [243], [62], [791], [62], [154], [73], [596], [754], [47], [], [488, 841, 843], [37], [18], [288, 290], [], [244], [224], [237], [12], [], [624, 453], [443], [727], [], [384], [327], [472], [257], [944], [787], [889, 486], [977, 978, 445], [334], [], [157], [412], [], [892], [26], [40], [815], [603], [265], [977, 978], [16], [547], [352], [49], [339], [608, 610], [349], [742], [401], [495], [], [509], [814], [146], [604], [341], [602], [578], [702], [996], [107], [95], [736, 515], [577, 641], [116], [44, 26], [276], [279], [558], [386], [748, 600], [133], [242], [616], [379], [850], [349], [552], [635], [384], [292], [798], [457], [995], [], [429], [109], [814], [895], [80], [], [723], [335], [810, 878], [449], [245], [159], [907], [209], [933], [80], [762, 959], [690], [728], [184], [522], [109], [208], [551], [984], [982], [138], [], [891], [], [428, 792], [51], [416], [636], [750, 721], [100], [114], [109], [670], [727], [511], [754], [300], [724], [703], [636], [481, 485, 632], [], [189], [460, 437], [621], [513], [150], [755], [875], [351], [759], [301], [202], [198], [324], [144], [119, 120], [171], [971], [620], [656], [305], [907], [113], [865], [270], [345], [706], [980], [], [479], [416], [180], [93], [199], [105], [94], [677, 587], [185], [], [394], [352], [550], [908], [31], [147], [884, 406], [928, 850], [557], [528], [148], [4], [278], [474], [919, 733], [650], [465], [279], [512], [841], [439], [56], [349], [], [747], [271], [740], [916], [112, 506], [100], [449], [319], [375], [513, 579, 881], [542], [300], [220], [496], [866], [645], [107], [816], [506], [32], [472], [850], [330, 331], [598], [653], [360], [179], [172], [175], [984], [806], [970, 915], [579], [544, 926], [226], [401], [117], [372], [], [335], [951], [750, 721], [491], [856], [165, 234], [743, 905], [898, 585], [566, 439], [488, 843], [987, 998], [987, 998], [899], [132], [571], [778], [543], [88], [924], [767], [569], [55, 59], [113], [542], [704], [44], [884, 532, 762, 923, 572], [459], [750], [29], [152, 157], [61], [], [437], [863], [875], [164], [722], [785], [927], [], [751], [364], [864], [250], [700], [554], [830], [794], [365], [219], [], [650, 558], [], [237, 180], [], [773], [295], [413], [177], [914], [563], [569], [303], [921], [], [670], [140], [738, 957], [274], [785, 180], [26], [311], [8], [945, 939, 943], [450], [754], [228], [239], [566], [561], [486, 889], [237], [874], [362], [264, 263], [662], [], [977], [199], [254, 262], [289], [304], [839, 718], [248, 250], [804], [900], [364], [182], [284, 861], [421], [65], [445], [916], [26], [709], [955], [135], [630], [421], [919], [217], [5], [790], [237], [997], [686], [31], [460], [88], [738, 421], [296], [45], [470], [825], [], [323], [956], [570], [352], [442, 494], [366], [311], [749], [87], [479, 817, 511], [894], [868, 470, 923], [213], [981], [347], [533], [483], [724, 536], [76], [395], [903], [367], [], [293], [780], [909], [342], [955], [803], [768], [], [948], [414, 478], [701], [777, 623], [758], [367], [543, 422], [708], [514], [488, 695], [692, 917], [836, 837, 977, 978], [915], [966], [437], [207], [85], [341], [232], [654], [263], [779], [394], [476], [], [367], [643], [741], [883], [412], [327], [758], [291], [936], [739], [560], [778], [141], [153], [890], [207], [734], [846, 619], [896, 999], [979], [570], [903], [109], [868, 987], [], [93], [890], [], [489, 15], [140], [570], [512], [770], [74], [529], [233], [669], [281], [72, 815], [312], [410], [440], [363], [231], [110], [992], [786], [765], [], [578], [619, 858], [351], [619], [905, 495], [857], [518, 652, 691, 570], [253], [799], [129], [686], [916], [100], [26], [299], [617], [745], [214], [], [577], [967], [963], [64, 55], [538], [22], [296], [709], [454, 652], [604], [428], [482], [53], [696], [544], [819], [546, 650, 819, 822, 542], [463, 925], [382], [], [], [362], [8], [726], [625, 554], [771], [717, 733], [767], [356], [554], [293], [396], [684], [235], [552, 733], [932], [3], [679], [507], [203], [398], [943, 945], [470], [50], [662], [936], [114], [508], [574], [846], [125], [628], [637], [358], [56], [], [576], [906], [309, 410], [873], [388], [728], [119], [864], [], [911], [239], [938], [745], [580], [576], [90], [405], [695], [215], [], [600], [519], [868, 532, 762, 923, 572], [436], [465], [372], [423], [878], [853], [696], [93], [976], [53], [360], [955], [942], [679], [], [252], [959], [426], [680], [585], [749], [393], [283], [601], [70], [], [448], [247], [711], [854], [638, 639], [507], [32], [805], [957], [795], [807], [838, 631], [960], [489], [820], [489], [204], [241, 238], [802], [364], [871], [228], [790], [174], [690], [], [540], [466], [], [886], [66], [481, 482], [734], [371], [785], [279], [636], [518], [167], [582], [679], [13], [915], [552], [878], [552], [489, 273], [617, 823], [111], [582, 790], [505], [314], [711], [419], [267], [719], [786], [838], [298, 357], [89], [13], [68], [441], [2, 3], [514], [64], [332], [547], [185, 186], [], [73], [643], [386, 101], [], [752], [647], [], [470], [343], [302], [181], [493], [831], [394], [39, 47], [232], [949], [638, 639], [745], [485, 761], [834, 630, 637], [224, 852, 205], [26], [992], [186], [769, 587], [579, 881], [850, 854], [507], [814], [19], [632, 851, 548], [875], [168, 159], [317], [912], [732], [747], [464], [566], [235], [105], [593], [71], [575], [218], [254], [167], [110], [267], [87], [172], [49, 50], [966, 572], [870], [64], [], [418, 767], [579], [538], [388], [651], [465], [166], [325], [574], [681, 810, 620, 508], [543], [978], [76], [538], [806], [993], [], [964], [104], [204], [696], [370], [610], [949, 647], [208], [558, 699, 541], [894], [164], [763], [428], [485], [514], [220], [211], [98], [399, 501], [114], [749, 542], [32, 30], [780], [395], [333], [626], [488], [841, 523, 412], [433], [566], [82], [], [145], [465], [652, 764], [66, 68], [643], [968], [896, 725], [122], [515, 869, 763], [967], [27], [121], [200], [308], [113], [243], [874], [997], [173], [444], [91], [145], [744, 812, 657], [43], [555], [555], [372], [545], [617, 823, 487], [368], [266], [419], [901], [470], [610], [608, 748], [3], [], [], [910], [386, 101], [486], [336], [760], [130], [513, 776, 875], [976], [132], [541, 542], [], [309], [407], [500], [101], [174], [535], [228], [794], [299], [906], [762, 554], [24], [115], [299], [], [809, 532, 923, 925, 926], [78], [652], [], [666], [451], [391], [784], [243], [924], [655], [], [609], [191], [607], [636], [318], [908, 404], [338], [57], [480], [374], [923], [505], [671, 898, 535], [682, 562], [70], [814], [548], [514], [478], [353], [185], [48], [328, 109], [436], [], [250], [398], [374], [385], [293], [48], [509], [462], [423], [981], [932, 415], [920], [796], [346], [247, 159], [550], [146], [652], [2], [497], [693], [256], [996], [277], [241], [423, 424, 831], [663], [462], [261], [213], [769], [440], [], [640], [228], [8], [450], [101], [948, 950, 957], [48], [57], [608, 523], [405], [805], [369], [133], [467], [], [232], [675, 208], [], [870], [322], [872, 622, 759], [123], [623], [49], [977, 978], [220], [877], [778], [22], [165], [719], [372], [95], [123], [337], [889], [776], [115], [574], [217], [938], [973], [887], [168, 178], [291], [888], [469, 919], [515], [669], [576, 693, 954], [913], [866], [375], [9], [236], [24], [369], [952], [923, 809, 947], [122], [584], [397], [806, 559, 463, 610], [84], [818], [], [394], [778], [619, 846, 721, 831], [212], [754], [245], [654], [244], [250], [156], [562], [933], [202], [910, 567], [764, 413], [], [427, 756], [155], [197, 199], [334], [24], [118], [110], [397], [420], [244], [640], [933], [228], [659, 952], [582, 680, 791], [463], [92], [138], [692, 960, 582], [77], [939], [38], [842, 433], [381], [174], [431], [987, 938, 923], [440], [971], [560], [424], [92], [508], [839], [698], [558], [729, 495], [711], [669], [811], [84], [744, 657], [426], [662], [26], [960, 931, 415], [799], [964, 813], [651], [813], [874], [366], [], [183], [738], [878], [975], [916], [149], [923], [451], [944], [761], [836, 837, 610, 870], [], [249], [960], [168], [610], [834, 588], [996], [747], [738], [892], [145], [426], [987, 998], [205, 213], [546, 650, 819], [239], [781], [673, 664, 526, 527, 782, 632, 508], [127], [726], [117], [562], [653], [404], [476], [9], [288], [531], [172, 177], [513, 715, 439], [586], [364], [453], [133], [180], [899, 619, 849], [553, 728], [836, 837, 885], [474], [858], [692], [252], [161], [525], [737], [487], [686], [73, 74], [339], [979], [810, 878], [230, 231], [426], [687], [835], [187], [998], [546], [37], [593], [990], [367], [508], [526], [36], [217], [], [473], [242], [207], [963], [750, 721], [563], [281, 282], [], [678], [742], [208], [18], [218], [212], [728], [367], [74], [520], [890], [570], [692], [275], [971], [428], [408], [442], [274], [702], [131], [849, 505], [994], [400, 667], [216], [501], [453, 624], [729], [844], [397], [], [], [], [987], [459], [173], [513], [650], [609], [581, 656], [865], [647, 659], [544], [870], [137], [522], [681, 620], [867], [817], [300], [675], [205], [463], [223], [], [52], [830], [443], [431], [893], [512], [461], [402], [41], [257], [750, 846, 721], [69], [127], [700], [759], [608], [384], [937], [298], [492], [362], [14], [958], [705], [827], [613], [427], [783], [673, 742, 526, 527, 782, 664, 508], [376], [577], [569], [894], [384], [262], [556], [162], [394], [898], [439], [48], [998], [188], [700], [459], [933], [985], [828], [399], [396], [], [801], [411], [769], [198], [829], [493], [632], [751, 479], [211], [222], [163], [979], [594], [189], [557], [927], [], [783], [114], [49], [885], [490, 524, 461], [893], [872], [128], [488], [], [472, 693], [694], [150], [796], [22], [608, 873, 414], [867], [372], [711], [900], [204], [175], [606], [392], [283], [692], [821], [], [20], [255], [880], [105], [710], [], [737], [183], [], [399], [462], [963], [816], [783], [38], [20], [809, 925], [18], [489], [809, 659, 925], [619, 846, 818], [916], [257], [526, 539, 588, 738, 883], [815], [360], [128], [608, 515], [138], [415], [987, 998], [499], [216], [], [804], [878], [505], [242], [598], [56], [865], [166, 167], [544], [677], [370], [558, 889], [487], [28], [678], [713, 742], [324, 946], [73, 74], [747], [584], [96], [382], [578], [161], [291], [940], [129], [929], [749], [], [349], [454], [44, 634], [362], [473], [552], [798], [87], [953], [676, 597], [643], [47], [915], [968, 534, 504], [896], [435, 876], [879], [563], [871], [], [59], [682], [645, 735], [], [487], [737], [], [43], [210], [103], [584], [595], [834, 906], [274], [], [860], [], [], [977, 978], [928, 659, 949, 927], [574], [630], [901, 725], [555], [503], [397], [414], [717], [727], [503, 828], [631], [783], [795, 862], [457], [27], [447], [365], [342], [], [48], [216], [724], [840], [934], [111, 114], [255], [544, 909, 469, 926], [93], [245], [563], [347], [814], [278], [810, 878], [185, 193], [313], [465], [358], [752], [457], [204], [5], [601], [937], [818], [490], [632], [559], [67, 54], [987, 998], [787], [741, 885], [], [221, 206], [252], [52], [546], [666], [749], [196], [724], [963], [], [955], [321], [223], [63], [759], [442, 497, 409], [47], [42, 44], [412], [637], [974], [388], [328], [162], [292], [825], [692], [192], [], [519, 478], [375], [918], [147], [992], [29], [173], [61, 62], [709], [889], [685], [109], [321], [580], [754], [], [315], [159], [772], [693], [349], [607], [], [699], [118], [305], [126], [], [], [606], [769], [387], [220], [55], [], [516], [470], [75], [48], [947], [126], [361], [494], [392], [780, 914, 536], [277], [268], [635], [274], [397], [394], [586], [703], [458], [402], [], [143], [949, 923], [177], [892], [478], [500], [451], [820], [958], [15], [113], [532, 762, 923, 572], [5], [995], [530], [258], [974], [661], [731], [140], [975], [421, 825], [863], [180], [739], [709], [548], [821], [653, 535], [866], [555], [765], [448], [336], [147], [207], [], [229], [67], [720], [822, 541, 542], [750], [651], [], [703], [953], [652, 847, 471], [612], [481, 485, 592, 605], [18], [681, 810, 620, 508], [59], [113], [524, 461], [977, 978], [261], [819, 541, 542], [153], [403], [910], [541, 542], [316], [558, 917, 921], [845, 638], [509], [768], [477], [704], [72], [470], [41], [775, 842, 977, 978, 445], [236, 237], [55], [153], [], [184, 202, 191], [442], [], [20], [920], [328], [23], [77], [994], [654], [489], [547], [], [308], [], [472], [64], [991], [], [723], [649], [99, 100], [970, 795], [681, 620], [727], [785], [486], [106], [], [137], [102], [705, 466, 799], [647], [], [395], [2], [61], [845], [894], [647], [843], [823], [472, 693], [336], [127], [], [153, 204], [], [685], [149], [851, 532, 831], [929, 227], [781], [329], [987, 998], [387], [721], [119], [502], [802], [373], [523], [398], [896, 999, 281, 700], [372], [369], [675], [261], [944, 946], [725, 572], [785], [461], [229], [568], [130], [59], [367], [752, 852], [358], [456], [555], [320], [957], [716], [846], [9], [], [630], [149], [], [515, 836, 559], [839], [198], [], [103], [696], [774, 614, 879], [157], [841, 825], [895], [], [476], [415], [509], [], [552], [70], [856], [36], [294], [225], [649], [840], [184], [489], [210], [133], [196], [307], [896, 876, 435], [694], [136], [710], [336], [400, 857, 667], [271, 277], [228], [699], [], [536], [347], [216, 716, 220], [203], [822, 542], [275], [714], [828], [571], [137], [6], [654], [679, 459], [663], [187], [260], [464], [670], [72], [612], [985], [5, 6], [471], [], [206], [244], [524], [971], [659], [642], [598], [264], [714], [156], [420], [420, 650, 402, 818, 819, 889], [129], [223], [903, 501], [479, 511], [612], [713], [720], [452], [283], [120], [836, 453, 837], [521, 962], [748, 636], [919], [251], [972, 23], [481], [594], [579], [171], [859], [769, 767], [26], [625], [306], [913], [236], [679], [], [152], [611], [], [490], [476], [376], [840], [249], [953], [938], [872], [507], [202, 189], [947, 997], [464], [627], [326], [865], [388], [], [870], [777, 596, 597, 763], [971], [197, 183], [811], [], [181], [1], [51], [194], [566], [855], [805], [635], [452], [58], [716], [752], [264], [345], [143], [619, 846], [441], [39], [179], [193], [917, 921], [538], [231], [466], [169], [776], [64], [484], [258], [275], [977, 978], [706, 423, 532, 923], [173], [277], [361], [536], [718, 510], [587], [859], [430], [977, 978, 853], [506, 733], [337], [986], [351], [679], [533], [666], [337], [350], [50], [968, 504], [852], [837, 703, 921], [674], [215], [755], [311], [88], [15], [253], [553], [616], [790], [963], [717], [822], [23], [], [786], [], [403], [], [732], [725], [72, 815], [], [394], [0], [333], [339], [461], [145], [903], [500], [977, 978], [23], [561], [921], [607], [708], [291], [292], [682], [617], [278], [957], [206, 221], [668], [40], [293], [594], [655], [344], [475], [142], [160], [469], [108], [780], [641], [229], [96], [88], [411], [249, 537], [849], [773], [108], [740, 587, 784, 477], [600, 823], [554], [770, 898, 649], [280], [248, 249, 250], [288], [340], [809, 943, 499], [972], [626], [255], [479], [396], [592], [169], [912, 339], [57], [586], [665], [687], [533], [850], [243], [], [956], [702], [408], [], [622], [778], [657], [429], [138], [455], [927], [985], [242, 703], [204], [0], [78], [677], [87], [421], [567], [706, 879], [140], [141], [159], [754], [113, 125], [790], [453, 740], [355], [491], [524], [5], [290], [139], [708], [917], [], [698], [609], [73], [545], [399], [7], [166], [347], [970], [790], [253], [440], [86], [954], [444], [902], [652], [37], [71], [338], [661], [790], [802], [], [455], [12], [394], [871], [828], [531], [852, 187], [740], [681, 810, 620, 508], [152], [834, 457, 906], [659], [], [122], [785], [484, 871], [362], [647], [199], [770], [859], [937], [440], [421], [424], [984], [660], [443], [759], [353], [665], [701], [137], [61], [784], [391], [], [311], [890], [812], [580], [571], [385], [237], [307], [376], [], [670], [821], [352], [328], [913], [836, 703, 796], [216], [908], [], [160, 177], [136], [650], [290], [604], [529], [581], [805], [595], [368], [462], [953], [456, 341], [395], [437], [567], [492], [972], [271], [158], [273, 274], [], [548, 598], [112], [798], [524], [412], [314], [995], [512], [261], [681, 620, 527, 664, 508], [176], [522], [521], [52, 60], [286], [344], [309], [763], [173], [209], [278], [752], [], [900], [721], [923], [992], [891], [339], [569], [0], [190], [168], [380], [401], [13], [426], [342], [276], [673, 681, 526, 527, 664, 508], [770], [500], [489, 507], [375], [855, 828], [409, 892], [198], [335], [682], [835], [606], [499], [916], [907, 499, 411], [701], [640], [797], [815], [19], [520], [36, 37], [956], [169], [0], [54], [365], [962], [878], [854], [568], [629], [300], [744, 657], [728, 545], [366], [572], [923], [132], [992], [650, 558], [0], [218], [], [953], [496], [20], [502], [154], [482, 605], [165], [343], [991], [434], [], [518], [977, 978], [965, 440], [892], [677], [24], [249], [818, 437], [288], [91], [6], [619, 818], [612], [688], [601], [819], [923, 868], [], [221], [514], [631], [957], [489, 695], [731, 861, 999], [753], [262], [744, 657], [753], [919], [156], [147], [683, 699], [730], [771], [464], [315], [121], [962, 935], [209], [715, 524, 461], [779], [970], [263], [556], [995], [606], [673, 526, 527, 664, 508], [140], [540], [390, 973], [4], [379], [76], [957], [267], [626], [992], [526, 720], [158], [640], [211, 159], [405], [383], [879, 412], [272], [], [213], [828], [252], [570], [223], [112], [602], [457], [37], [125], [398], [477], [201], [903, 689, 601], [799], [375], [57], [515, 413], [744, 657], [], [697, 610], [538, 668], [674], [69], [428], [131], [508], [278], [898], [205], [643], [], [37], [964], [7], [379], [588], [834, 532, 505], [755], [548, 553, 527], [986], [484], [5], [370], [696], [], [400, 667], [267], [3, 147, 149], [186], [], [890], [416], [681, 620, 916, 664], [], [651], [116], [396], [235], [17], [92], [404], [504], [270], [492, 519], [643], [162], [939, 582], [643, 903], [720], [175], [197, 233], [927], [987], [834, 906], [342], [660], [], [136], [869, 655, 630, 539], [239], [795], [74, 815], [701], [374], [], [87], [], [134], [82], [420], [464, 763], [758], [485, 685, 475, 511], [158], [645], [396], [465], [366], [438], [154], [437], [271], [948], [53], [335], [995], [83], [915], [245], [251, 246], [546], [], [198], [95], [488], [763], [911, 701], [283], [837, 978, 890], [979], [], [650], [831], [], [990], [251], [887, 501, 439], [269], [852], [540], [573, 479], [959], [487], [99], [376], [], [550], [763], [8], [25], [396], [263], [424], [455], [965], [320], [731], [172], [320], [302], [896, 804, 434], [557, 472], [906], [644], [485, 592], [640], [340], [195], [310], [181], [127], [386], [842, 977], [721], [], [195, 179], [145], [191], [375], [931, 950, 954, 923], [], [847], [92], [601], [566], [851], [705], [113], [897], [360], [615], [666], [526, 765], [230], [606], [849, 505, 859], [7], [162], [112], [176], [148], [467], [725, 872], [26], [103], [141, 976], [268], [25], [935], [836, 837, 630], [27], [438], [82], [625], [], [382], [281], [599], [570], [479], [41], [230], [742], [], [788], [62], [769, 398], [146], [515], [453, 606], [39, 119], [211], [520, 669], [583], [377], [481, 482], [191], [804], [497], [843], [229], [343], [], [550], [241, 238], [195], [170, 177], [419], [139], [79], [565], [], [355], [310], [564], [898, 414, 608], [], [741], [406], [816], [846], [349], [669], [112], [131], [824], [888, 705], [6], [673, 526, 527, 782, 664, 508], [776, 650], [721, 831], [], [134], [182], [689, 819, 578, 488, 885], [888], [449], [140, 142], [760], [628], [984], [396], [913], [915], [], [100], [801], [], [204], [746], [216], [532], [501], [456], [473], [716], [], [200], [588], [997], [690], [697, 443, 828], [904], [669], [621], [170], [706, 879, 401, 762], [247], [259], [929], [220], [516], [25, 28], [569], [91], [398], [810, 508], [229], [957], [], [], [554], [192], [700], [570], [492], [675, 478], [614], [654], [736], [869, 671], [652], [372], [948], [403, 536], [674], [805], [454, 921], [445], [363], [16], [], [928, 949, 927], [253], [88], [208], [561], [839], [629], [842, 445], [252], [], [36, 37], [], [571], [], [815], [528], [354], [615], [81], [980], [884], [371], [11], [380], [], [606], [713], [207], [544], [796], [102], [902], [99], [24], [387], [886], [773], [370], [836, 970, 414], [201], [455], [764, 413], [917], [84], [246], [72], [534, 729], [780, 724], [88], [178], [321], [911, 533], [426], [232], [632], [], [154], [133], [729], [630], [809], [89], [218, 156], [804], [701], [644], [775], [832], [238], [362], [797], [584], [359], [668], [977, 978, 445], [622, 759], [], [], [958], [268], [34], [62], [443], [618, 809, 659], [547], [133], [208], [597], [810, 664, 527, 782, 508], [346], [283, 750], [735], [760], [440, 574], [866], [], [549], [802], [44, 60], [146], [260], [115], [40], [313], [930], [769, 587], [474], [957], [734], [301], [469], [807], [281], [289], [143], [290], [368], [589], [814], [792], [722], [], [583], [], [265], [], [666], [970, 795], [621], [26], [223], [451], [196, 198], [707], [313], [619, 750, 846, 721], [305], [29], [382], [340], [180, 243], [739], [977, 638, 639], [546], [514], [352], [368], [347], [770], [425], [608, 610], [685], [514], [811], [], [], [582], [], [475], [952], [928], [784], [607], [336], [945], [96], [785], [297, 295], [545], [4], [64], [44], [18], [801, 397, 983], [609, 977, 978], [208], [252], [358], [423], [130], [704], [424], [143], [286], [107], [93], [], [350], [866], [100], [719], [], [137], [129], [958], [], [809, 762, 923, 926], [170, 177], [855], [543], [803], [626], [248], [717], [450], [895], [178], [971], [423, 424], [923, 960], [530], [384], [422], [24], [370], [400, 667], [324], [984], [654], [302], [690, 345], [640], [184], [722], [805], [860], [100], [971], [141, 142], [159], [], [273], [394], [826], [709], [784], [428], [198], [286], [969], [221], [851, 548], [120], [358], [645], [939, 940, 943, 950], [543], [132], [159], [537], [234], [602], [51], [], [639], [330], [261], [533], [], [500], [926], [191], [346], [173], [], [3], [280, 278], [393], [374], [386], [791], [143], [422], [237], [755], [127], [410], [913], [176], [], [509], [85, 86], [927], [903, 526, 528, 782], [891], [971], [], [112], [985], [156], [188], [332], [487], [326], [744, 657, 403], [933], [556], [134], [206], [786], [235], [685], [957], [965], [255], [862], [827], [25], [779], [637], [542], [896, 434, 861], [495], [112], [344], [750, 564], [201], [632], [417], [19], [822], [441], [758], [352], [681, 620, 508], [840, 463], [], [144], [105], [134], [867], [57], [], [548, 851, 598, 632], [985], [628], [37], [648], [405], [332], [938], [319], [320], [71], [944], [936], [48], [95], [568], [441], [810], [586], [396], [8], [156], [45], [685], [734], [775], [167], [203], [822], [39], [111], [288], [638, 639], [463], [140], [802], [604], [975], [769, 418, 709], [896], [375], [247], [860], [52], [561], [], [943], [744], [814], [411], [317], [516, 520, 431], [63], [510], [386], [], [313], [859], [576], [469], [251], [863], [592], [447], [], [420], [8], [697], [873], [798], [365], [], [117, 62], [726], [606], [560], [], [692, 567], [969], [267, 852], [177], [410], [781], [77], [580], [670], [234, 165], [974], [61], [310], [828], [], [749], [607], [831], [954], [153], [577], [17], [280], [386, 101], [618], [263], [247], [984], [575], [105], [166], [912], [634], [581, 479, 656], [203], [26], [226], [774, 470], [788], [90], [516], [548], [220], [], [556], [725], [328], [525], [667], [238, 240], [875], [892], [934], [17], [612], [474, 911], [57], [914], [419], [490], [], [997], [294], [909], [214], [778], [772], [922], [30], [136], [153], [195], [415], [315], [435], [178], [828], [383], [277, 278], [207], [821], [712], [408], [], [536], [681, 620, 603], [898], [0, 133], [868, 415], [135], [209], [12], [251], [113], [537], [], [857], [541], [351], [806], [], [614], [553], [45], [382], [790, 998], [510], [497, 663], [526, 782, 851], [206], [536], [786], [257, 222], [939, 945], [838], [547, 820], [470], [701], [576], [917], [14], [], [736], [464], [966, 907], [979], [688], [548], [562], [686], [], [553, 851], [628], [14], [487], [215], [145], [183], [227], [839], [408], [968, 504], [988], [870], [1], [627], [905], [922], [766], [529, 692], [271], [852], [546, 453], [237], [259], [629], [655], [998, 987, 575], [515], [229], [427, 756], [869, 433], [650, 851, 541], [205], [757], [940], [159], [37], [845], [97], [284], [773], [140, 142], [838], [10], [373], [49], [492, 786], [91], [177], [146], [614], [779], [], [976], [458], [275], [791], [883], [775], [11], [334], [303], [120], [935], [], [792], [441], [285], [387], [568], [162], [83], [132], [678], [305], [684], [127], [701], [421, 818, 506], [836, 837, 552], [327], [325], [623], [189], [712], [638, 639], [284], [697], [838, 680, 631], [958], [758], [355], [907, 692], [12], [896, 434], [113], [137], [939, 943], [803], [245], [], [886], [986], [808], [377], [581, 479], [13], [24], [758], [898], [417, 701], [750], [28], [434], [991], [66], [257], [], [543], [617], [937], [904, 969], [289], [419], [472], [609, 586, 652], [333, 335, 760], [88], [393], [941], [697], [759], [368], [417, 869, 501], [320], [834, 977, 978, 982], [438], [275], [877], [520], [208], [245], [], [514, 664, 655], [311], [987], [462], [418], [399], [794, 799], [249], [18], [957], [76], [400, 667], [986], [653], [80], [615], [1], [458], [690], [468], [], [], [591], [152], [409, 826], [563], [750, 721], [939, 943], [196], [58], [48], [203], [], [518], [922], [35], [714], [], [665], [754], [210], [687], [851, 548], [198], [210, 852], [890], [127], [559, 594], [689, 443], [832], [487], [444], [55], [430], [663], [425], [808, 977, 978, 445], [], [377], [387], [916], [533], [], [], [828], [611], [884, 538], [697], [758], [316], [197], [392], [557, 751, 733, 479], [425], [715], [769], [410], [94], [391], [638, 639], [581, 479, 661], [821], [267], [365], [587, 499], [476], [584], [939], [820], [923], [39], [187], [947], [62], [654, 656], [884], [338], [557, 562], [938], [345], [32], [9], [438], [801], [404], [894, 281, 285], [896], [809, 659, 762], [346], [51], [4, 394], [586], [861], [455], [433, 793], [947], [874], [307], [445], [267], [493, 526], [953], [328], [949], [], [141], [386], [234, 165], [955], [967, 968], [667], [], [583], [162, 167], [180], [944], [959], [], [729], [], [106], [51], [646], [670], [698], [586], [120], [], [980], [730], [159], [973], [383], [713], [635], [], [281], [734], [938], [528], [315], [911], [598], [747], [178], [480], [925], [946], [546, 402, 819], [629], [], [970], [174], [689], [431], [867], [34], [760, 827], [466], [530], [131], [972], [28], [511], [475], [305], [712], [512], [950], [], [], [623], [605], [253], [809, 909, 926], [267], [10], [555], [466], [668], [225], [700, 999], [610, 823], [801, 973, 983], [259], [825], [683], [116], [350], [209], [327], [510], [406], [844], [644], [743], [], [519], [281], [681, 620], [385], [85], [759], [747], [766], [196, 198], [224, 223], [198], [559], [188], [66, 68], [744, 657], [], [919], [761], [756], [244], [543], [703], [719], [628], [217], [529, 219], [210], [487], [393, 108], [190], [127], [443], [296], [940], [431], [296], [744, 655, 657], [387], [804], [], [790], [710], [785], [531], [984], [667], [766], [], [218], [560], [843], [49], [638, 639], [9], [553], [233], [890, 445], [15], [], [484], [497], [782, 664, 810], [154], [787], [314, 861], [621], [730], [321], [673, 681, 810, 620, 526, 664, 508], [760], [706], [494], [591], [906], [382], [182], [239], [792], [487], [514], [696], [632], [], [946], [803], [792], [450], [], [185], [489], [], [73, 77], [529, 728], [501, 887], [650], [155], [629], [888], [132, 211], [425], [98], [77], [852], [645], [637], [956], [687], [25], [446], [632], [579], [505], [697], [286], [751], [390], [49, 50], [907], [450], [349], [], [782, 664], [456], [406], [274], [489], [331], [476], [825], [756], [877], [926], [], [], [523], [143], [254], [587], [868, 415], [387], [452], [716], [592], [366], [371], [594], [864, 586], [831, 967, 968, 608, 504], [885], [247], [546, 650, 819], [19], [355], [994], [769, 709], [223], [467], [584], [411, 678, 868], [107], [58], [698], [532, 762, 923, 572], [974], [], [521], [399], [412], [384, 375], [841], [20], [654], [844], [271], [331], [522], [499, 600], [39, 44], [314], [22], [736], [715], [759], [840], [729], [905, 532], [177, 490], [841], [845, 470], [], [640], [850, 823], [160], [471], [35, 49], [906], [79], [717, 581, 479], [801], [430], [934], [40, 44], [992], [894], [873], [37], [711], [80], [543], [107], [], [721], [732, 759], [330], [597, 763], [319], [548, 556, 851], [573], [224], [194], [538], [686], [384], [160], [414], [721], [392], [874], [], [592], [645], [280], [948, 949, 923], [260], [868, 923], [228], [881], [982], [771], [748], [], [740], [307], [708], [489, 852], [], [210, 211], [515], [531], [], [824], [], [680], [215], [984], [572], [148], [967, 923], [654], [707], [253], [], [746], [684], [333], [584], [185], [747], [635], [411], [455], [893], [165], [581, 518, 479, 661], [963], [], [877], [768], [139], [410, 309, 599], [137], [], [681, 620, 526], [644], [], [283], [113], [320], [283], [10], [], [632], [385], [664, 526, 527, 632], [631], [850], [537], [451], [611], [268], [634], [96], [149], [318], [61], [281], [427], [642], [291], [604], [], [991], [437], [254], [212], [995], [488, 439], [946], [857], [750], [840], [644], [378], [778], [], [108], [150], [59], [554, 628], [369], [993], [61], [342], [343], [44], [977, 978], [833], [451], [721, 831], [258], [544, 813, 910, 926, 469, 827], [750, 915], [143], [72], [612], [181], [12, 957], [198], [379], [745], [749], [206], [145], [515, 204], [486, 889], [668], [533], [573], [678, 518], [608, 872], [121], [119], [582], [43], [322], [562], [], [216], [697], [315], [921], [618, 623, 499], [188], [], [277], [305], [997], [], [977, 978], [889], [], [768], [359], [48], [840, 463], [356], [136], [981, 429], [780], [904], [145], [372], [555], [531], [332], [868], [832], [814], [548], [616], [423], [134], [317], [168], [616], [653], [439], [825], [421], [828], [27], [883], [987, 998], [397], [793], [418, 709], [], [542], [102], [40], [103], [27], [743], [448], [414], [], [664], [119], [199], [420], [940], [255], [281], [323], [383], [483], [255], [550], [874], [], [36], [637], [193, 187], [696, 806], [721], [471], [819, 822], [868, 923, 118], [40], [488, 843], [919], [325], [825], [341], [184], [264], [737, 907, 760, 440], [309], [404], [55], [491], [8], [531], [575], [912], [324], [168], [134], [13], [992], [683], [307], [855], [706, 559, 976], [], [618], [407], [842, 433], [269], [724], [90], [161], [715], [780, 914], [474], [476], [143], [736], [238], [476], [759, 475], [558], [268], [849], [151, 158], [232], [866], [526], [801, 691, 983, 570], [], [57], [279], [197], [103], [248], [418], [360], [403], [385, 386], [361], [], [109], [89], [721], [358, 359], [706, 846, 789, 765], [816], [360], [391], [256], [470], [748], [952], [0], [], [387], [71], [92], [674], [707, 709, 528], [], [936], [240, 241], [49, 50], [905], [369], [854], [385], [438], [19], [520], [223], [588], [363], [780], [150], [374], [333], [204], [939], [553], [716, 853], [47], [821], [918], [504, 957], [781], [], [731], [], [978], [755], [641], [], [432], [673, 681, 620], [813], [581, 479], [25], [737, 455], [784], [368], [], [506], [753], [917, 921], [773], [71], [894], [], [610, 543], [331], [554], [991], [44, 26], [66], [260], [849], [135], [785], [732, 759], [869], [418], [32], [630], [796], [970], [421], [], [659], [454, 917], [170], [563], [802], [500], [95], [255], [23], [852], [982], [39], [397], [239], [225], [701], [], [903, 691], [759], [72, 815], [518, 671], [518, 671], [16], [948], [], [484], [711], [615], [779, 414], [984], [38], [197], [855], [162], [908], [117], [944], [487], [953], [949], [243, 254], [431], [333], [560], [364], [354], [748, 667], [171], [127], [626], [640], [959], [535], [446], [], [904], [999, 700], [776, 683, 889], [774, 655, 831, 502], [263], [132], [441], [802], [342], [865, 207], [323], [96], [652, 413], [615], [], [815], [140, 142], [159], [317], [107], [], [182], [961], [], [4], [702], [673, 453, 526, 527, 664], [178], [923], [308], [191], [747], [], [911, 735], [453], [86], [170], [133], [142], [7], [], [403], [135], [884], [944], [336], [945], [], [880], [927], [24], [520], [27], [407], [492], [393], [16], [531], [970], [209], [489], [365], [647, 659], [958], [64], [727], [116], [574], [8], [225], [161], [489, 919], [99], [440], [], [872, 622, 759], [755], [956], [896], [941], [33], [], [17], [294], [42], [449, 975], [794], [874], [683], [121], [706], [742], [911, 533], [264], [963], [752], [564], [734], [561], [280], [932], [227, 805], [906], [], [147], [721], [540], [965], [673, 526, 527, 846, 831, 664, 508], [156], [29], [425], [325], [359], [535], [834, 906], [527, 664, 508], [], [366], [], [109, 973], [838], [570], [433], [143], [92], [735], [239], [874], [957], [170], [254], [400, 667], [721], [229], [405], [766], [18], [330], [515, 752], [422], [956], [], [], [74, 815], [888], [908, 895], [265], [363], [770], [670], [323], [794], [39], [40], [20], [369], [944], [831], [368], [942], [121], [406], [525], [284], [731], [37], [66, 68], [392, 973], [738, 427], [652, 413], [317], [918], [286], [822], [704], [860], [42], [808], [288, 290], [390], [577], [453, 799], [697], [638, 639], [868, 964], [783], [44], [], [773], [89], [151], [158, 263], [], [957], [902], [888], [732], [825], [472], [323], [490], [708, 682], [974], [530], [647], [258], [818], [], [224], [418], [528, 707], [396], [896], [667], [429], [298], [574], [540], [180], [137], [119], [167], [588], [920], [324], [291, 292], [459], [365], [235], [987, 998], [612], [147], [363], [103], [631], [339], [318], [476], [384], [603], [95], [805], [832], [818, 729], [683, 579, 731], [534], [670], [153], [726], [688], [433], [359], [310], [627], [659, 949, 923], [513, 776], [80], [583], [], [277], [874], [423], [789], [811], [79], [436, 581, 479], [350], [969, 504], [392], [69], [821], [625], [393], [74, 815], [], [411], [833], [144], [108], [644], [291], [14], [602], [653], [901], [26], [115], [612], [618, 666], [800], [], [609], [28], [441], [859], [449], [25], [806], [752], [30], [896], [], [867], [738], [310], [429], [748], [552, 716], [195], [601], [952], [691], [73], [641], [410], [192], [547], [210], [578, 982], [634], [467], [83], [451], [44], [210, 164], [692], [566], [737, 582], [710], [395], [966, 948, 923, 572], [104], [], [232], [211], [570], [232], [40], [862], [79], [345, 690], [213], [749], [812], [828], [358], [], [76], [291], [], [654], [351], [826], [245], [162], [425], [72], [550], [825], [32, 31], [971], [295], [493], [151], [], [327], [872, 759], [286], [834, 906], [342], [833], [292], [352], [284], [563], [607], [433, 793, 638, 639], [629], [513, 543], [306], [889], [222], [513], [223], [152], [273], [157], [443, 841], [532], [155], [363], [353], [235], [868], [406], [453, 454], [906], [781], [712, 719], [205], [273], [35], [797], [153], [554], [748], [320], [321], [908, 895], [810, 878], [524, 461], [], [], [97], [766], [324], [880], [5], [318], [999, 247], [558], [682, 708, 562], [81], [485], [371], [], [685], [167], [221], [827], [429], [591], [723], [168], [841], [771], [715], [249], [426], [776], [], [913], [209, 703], [77], [487], [], [280], [410], [453, 589], [869], [604], [702], [748], [993], [755, 733], [317], [444], [82], [440], [763], [876, 435, 282], [909], [646], [650, 402], [399], [488], [], [802], [732], [839, 718, 821], [572], [832], [], [], [581], [696], [13], [494], [434], [946], [301], [389], [863], [127], [735], [], [82], [534], [73], [522, 281], [791], [341], [329], [514], [614], [107], [79], [645], [964], [911, 735], [483, 975], [922], [716, 309, 599], [518], [155], [779], [888], [909, 987], [223], [98], [839], [480], [360], [164], [900], [703], [944], [418], [715], [102], [532, 411, 931, 933], [449], [841], [317], [313], [757], [336], [341], [], [94], [200, 155], [690], [], [], [306], [], [989], [348], [435], [376], [], [652, 764, 413, 734], [642], [883], [597], [780], [322], [649], [], [1], [13], [475], [], [130], [709], [529, 631], [242], [120], [681], [652, 413], [179], [447], [48], [673, 681, 526, 527, 664, 508], [888], [789], [584], [], [896, 414, 487], [528], [447], [762], [963], [893], [818, 610], [460], [774], [], [84], [891], [942], [673, 681, 526, 782, 664, 508], [240], [769], [884], [777], [491, 477], [285], [869], [732], [475], [918, 281], [892], [684], [800], [577], [641], [470], [424], [453], [602], [301], [294], [755], [731], [759], [61, 62], [794], [769, 302], [422], [708], [450], [350], [124], [326], [982], [479, 660, 511], [662, 719], [574], [887, 501], [152], [579], [627], [578, 689], [235], [538, 668], [548, 851, 632], [418, 845], [0], [596], [403], [34], [831], [376], [561], [359], [733], [890], [87], [202], [397], [67, 68], [205], [777], [992], [631], [295], [868], [582], [684], [205], [], [], [639], [432, 683], [462], [928, 960], [716], [419], [80], [432], [189], [577], [888, 821], [796], [792], [637], [606], [], [524, 461], [], [849, 883], [], [709], [265], [857], [213], [460], [162, 164], [377], [], [588, 790], [942, 952], [406], [994], [417], [798], [719], [258], [557, 914], [], [611], [773], [13], [752], [263], [621], [432], [471], [932], [706], [625], [213], [916], [813, 567], [597, 763], [747], [533], [632], [156], [862], [829], [971], [557], [289], [638, 639], [413], [], [400], [906], [234], [759], [37], [595], [99], [676], [843], [475], [116], [638, 639], [780], [518, 958, 671], [910], [812], [543], [489, 207], [616], [310, 301], [956], [692, 968], [938], [317], [950], [960, 868, 845, 927, 415], [63], [823], [403], [993], [329], [484], [486], [704], [113], [344], [953], [44], [552], [84], [80], [316], [987, 998], [918], [489, 315], [749], [459], [], [954], [17], [638], [613], [352], [612], [323], [397], [905, 846, 725], [241], [557, 497], [873], [125], [492], [911], [333, 151], [883], [527, 916, 664], [173], [798], [30], [320], [935], [625], [219], [39], [198], [652], [666], [387], [157], [919], [960, 967, 968, 504, 923], [560], [102], [138], [365], [923], [888], [208], [253], [650], [284], [783, 784], [556], [150], [0, 389, 758], [22], [395], [763], [199], [], [122], [740], [704], [537], [146], [], [429], [90], [772], [443], [196, 198], [531], [], [195], [796], [984], [], [696], [709], [543], [235], [375], [481], [690], [639], [593], [337], [544, 469], [957], [878], [141], [825], [941], [949], [857], [655, 721, 831], [410], [66, 68], [253], [624], [85], [116], [583], [248, 249], [707], [578], [45], [273], [32], [948], [922], [398], [167], [645], [224], [392], [664, 526, 782], [938, 923], [395], [544], [963], [903], [248, 249], [537], [382], [31], [828], [274], [270], [971], [367], [797], [267], [737], [843, 977, 978], [357], [6], [723], [975], [172], [404], [421], [452], [588, 790], [38], [315], [324], [987], [105], [770], [679], [], [66, 68, 54], [888], [553], [476], [910, 948], [673], [836, 837, 906], [497], [361], [860], [893], [440], [759], [218], [744, 657], [513, 875], [153], [928, 960, 923], [], [309], [724], [], [352], [156], [197, 199], [714], [56], [209], [110], [45], [815], [738], [91], [248, 537], [137], [599], [673, 453, 553, 526, 527, 664], [634], [947], [90], [324], [274], [824], [222], [542], [984], [748, 636], [545], [871], [], [444], [216, 219, 214], [581, 511], [3], [61], [150], [52], [108], [564], [681, 620, 284], [341], [570], [749], [133], [281, 282], [340], [752, 852], [48], [291], [723], [865], [346], [712], [621], [579, 881], [432, 566, 683], [174], [844], [291], [66], [266], [574], [], [69], [382], [649], [992], [774, 655], [416], [199], [42], [903, 786], [652, 764, 413], [616], [658, 533], [631], [949, 990], [106], [672, 797], [654], [133], [851, 548, 453], [281], [], [930], [256], [104], [746], [531], [382], [528], [403], [457, 834], [955], [976, 972, 437], [424], [849], [707], [349], [155], [184], [430], [782, 916, 664, 508], [586], [288], [460], [273], [133], [472], [289], [869, 454, 824], [334], [601, 578], [417, 971], [352], [81], [288], [873], [413], [334], [418], [171], [474], [113], [425], [137], [783], [422], [291], [603], [475], [673, 526, 527, 664], [525], [31], [71], [248, 250], [], [508], [57], [841], [137], [138], [252], [677], [578, 846, 689, 601], [956], [27], [], [466], [954], [576], [252], [724], [], [795], [544], [953], [954], [923, 470], [933], [123], [984], [286], [992], [733], [193], [810, 508], [893], [321], [703], [519], [443], [687], [482], [967], [252], [190], [756], [475], [461], [135], [755], [636], [299], [416], [112], [966, 883, 572], [172], [873], [], [800], [175, 184], [498], [98], [914], [656], [689], [172], [572], [479, 661], [657], [366], [949], [340], [107], [322], [721], [421], [0], [571], [532, 762], [899, 725], [295], [95], [486], [443], [147], [553], [939], [606], [188], [184], [479], [139], [909, 827, 926], [652, 465], [69], [641], [145], [218], [898], [320], [194], [8, 7], [778], [148], [529], [806], [636], [], [882], [645], [931], [], [643], [737, 455], [333], [388], [694], [455, 440], [836, 837], [4], [801], [], [201], [814], [281], [542], [42], [132], [645], [], [323], [30], [677], [454], [836, 837, 869, 636], [933], [419], [110], [969], [208], [545], [375], [467], [], [455], [899, 868], [550], [437], [723], [735], [224], [56], [724], [336, 337], [917], [953], [468], [183], [], [88], [622], [25], [899, 505], [543], [896], [484], [972], [966, 907, 572], [592], [882], [540], [936, 939, 943, 945], [], [436], [96], [632, 605], [297], [250], [182], [433, 445], [930], [173], [472, 693], [265], [187], [466], [578], [702], [23], [497, 442, 858], [256], [129], [337], [383], [954], [935, 910, 659], [40, 46], [726], [905, 526, 664, 831, 846, 721, 851, 750, 894], [792], [439], [955], [316], [996], [853], [985], [], [574], [], [934], [496, 245], [907, 532, 762], [128], [327], [588, 606], [752], [503], [172], [949], [177], [738, 580], [448], [729], [759], [15], [949, 950, 923, 957], [873], [833], [133], [434, 435], [248], [796, 911], [852], [], [688], [361], [], [370, 375], [517, 718], [968, 969], [444, 518], [575], [82], [], [], [112], [472], [], [476], [], [312], [191], [447], [486], [320], [417, 575], [676, 263], [806], [358, 359], [521], [429], [987, 335], [217], [238], [724], [875], [612], [453, 881], [199], [915], [], [330, 331], [950], [529], [870], [164], [864], [822, 541, 542], [931], [194], [73], [225], [448], [471], [379], [718, 839], [121], [729, 534], [476], [601], [259], [43], [903], [349], [], [], [374], [413], [572], [388], [], [949, 950], [716], [698], [67], [35], [241], [884], [298], [213], [655], [], [251, 212], [240, 241], [141], [244], [370], [944], [515, 238, 596], [34], [460, 975], [769], [277], [807], [912], [273], [957], [988], [710, 767], [696], [942, 658], [984], [39], [535], [988], [400, 667], [656, 475], [321], [27], [875], [497], [64], [530, 479], [414], [559], [888], [223], [314], [912, 425], [655], [107], [622, 784], [119], [673], [139], [], [], [850], [930, 934], [874, 757], [136], [112], [], [651], [193, 187], [603], [382], [780], [776, 819], [904], [], [283], [920], [270], [846], [637], [889], [976], [579, 881], [609], [961], [508], [436], [582], [802], [449, 718], [515, 552, 834], [10], [], [], [223], [288, 290], [47], [43], [], [866], [435], [797], [307], [995], [192], [70], [37], [367], [965], [702], [276], [194], [343], [19], [654], [496], [241, 238], [269], [515], [646], [], [143], [302], [111], [740, 783], [557], [629], [488, 679], [476], [464], [26], [464], [388], [234], [279], [685], [841, 728], [362], [174], [862], [720], [88], [763], [451], [512], [434], [585, 589], [16], [968, 659], [848], [908, 895], [787], [167], [591], [927, 928], [], [4], [1], [196], [455], [215], [545], [620, 499], [603], [451], [], [], [532], [737], [338], [296], [300], [73], [692, 772], [815], [106], [235], [660], [240], [343], [886], [265], [320], [172], [], [438], [886], [688], [546, 650, 402, 819], [884], [829], [610], [969, 508], [555], [697], [759, 872], [426], [610, 841, 697], [592], [277], [457], [235], [569], [614], [481], [215, 216], [752], [229], [204], [908], [572], [952], [249], [703], [832, 979], [364], [14], [383], [971], [821], [572, 415], [405], [615], [826], [634], [218], [42], [533], [645, 733], [206], [654], [207], [420], [489], [705], [287], [775], [235], [995], [121], [630], [558], [709], [697], [832], [898, 455], [813], [856, 958], [453], [], [454], [340], [281], [101], [148], [278], [908], [387], [570], [389], [], [412], [115], [305], [830], [836, 837], [802], [60], [915], [747], [872], [282], [154], [], [296], [295], [], [608, 882], [964], [288], [582, 791], [150], [402, 546], [446], [229], [87], [779], [651], [581], [752], [421], [871], [381], [633], [507], [646], [769], [438], [506], [401], [613], [832], [147], [573], [608, 824], [999], [987], [923], [487], [881], [236], [276], [], [40], [228], [486], [452, 245], [987, 998], [398], [632], [688], [854], [268], [62], [428], [884, 406], [376], [203], [800], [297], [235, 172], [581, 479], [764], [405, 839], [583], [798], [669], [759], [315, 462], [331], [402], [393], [616], [], [603], [], [28], [492], [522], [49], [713], [800], [973], [381], [962, 923], [307], [109], [], [759], [975, 703], [267], [962, 935], [94], [], [808], [449], [813], [248, 250], [500], [548], [128], [211], [21], [637], [], [478], [870], [993], [69], [715], [493], [925], [263], [861], [292], [485, 592], [789], [129], [86], [623], [566], [939], [868, 923, 572], [218], [57], [658], [870], [349], [393], [453, 463], [543], [554], [330], [220], [218], [855], [293], [665], [420], [788], [237, 158], [875], [673, 681, 526, 782, 664], [654, 757], [959, 762, 923], [448], [921], [499], [289], [884], [488], [], [198], [985], [777], [921], [], [250], [369], [], [], [443], [245], [608, 474], [685], [280], [255], [261], [673, 681, 620, 697], [], [413], [410, 309, 599], [750, 721, 414], [21], [338], [524, 461], [946], [353], [488, 695], [987, 567, 923], [154], [27], [382], [769], [583], [584], [726], [659], [484], [103], [120], [756], [453, 624, 765], [475], [326], [441], [397], [345], [472], [141], [385, 386], [836, 837, 678, 977, 978], [158], [873], [651], [968], [883], [], [31], [486], [871], [221], [867], [496], [496], [934], [11], [632], [244], [21], [806], [512, 623], [53], [619, 846], [756], [842, 445], [523], [87], [748], [463], [20], [443], [797, 282], [463], [962, 935], [335, 703], [85], [687], [679], [434], [783], [957], [887], [221], [9, 876, 435], [987, 998], [292], [359], [672], [431], [199], [409], [], [739], [907, 572, 966], [225], [], [321], [211], [346], [656], [942], [322], [129], [725], [581, 479, 817], [518, 570], [218], [429], [968], [244], [373], [22], [878], [421, 428, 834, 869, 501], [453, 454, 624], [38], [11], [28], [48], [837, 806], [836, 837], [649], [808, 978], [13], [254], [261], [375], [403], [972], [148], [383], [692, 478], [942], [248], [679], [240], [792], [608, 799], [560], [568], [492], [874], [721], [234, 236], [579], [36], [808, 822], [769, 35], [518], [227], [976], [940], [628], [], [578], [264], [77], [619, 846], [916], [589], [305], [595], [686], [868], [], [867], [892], [9], [326], [181], [851], [836, 837, 608], [819, 608], [740], [331], [416], [], [538, 668], [427], [149], [117], [923], [840, 462], [662], [383], [517, 488, 600], [515], [644], [748], [98], [435], [832], [912], [899, 725], [519], [933, 923], [853, 762], [305], [330], [], [32], [263, 264], [638, 639], [688], [270], [], [], [177], [515], [325], [], [805], [554], [12], [275], [275], [185], [631], [547], [513, 683], [666], [809, 967, 968], [219, 836, 837], [119], [84], [113], [578], [162], [727], [354], [607], [721, 831], [322], [256], [79], [871], [294], [560], [46, 47], [21], [35], [315], [659, 809], [59], [211], [169], [54], [834], [145], [780], [810, 878], [986], [682], [10], [617, 845], [], [281], [342], [], [587, 677], [246], [938], [784], [353], [54], [242], [381], [613], [303], [119], [199], [362], [911, 796], [924], [533], [921], [208, 211], [441], [21], [794], [94], [717], [571], [421], [619], [545, 846], [326], [829], [179, 180], [916], [723], [754], [579, 881], [234], [197], [978, 445], [953], [748], [517, 554, 625, 536, 510], [999], [513], [654], [12], [129], [650, 401, 402, 818, 819, 632], [833], [92], [], [], [191], [484], [344], [654, 656, 479], [235], [659], [536, 403], [402], [804, 631], [959], [849], [], [246], [665], [650, 593], [672], [248], [662], [], [294], [], [3], [975, 693, 472], [676], [261], [507], [223], [699], [422], [65], [932], [283, 478], [836, 837, 869, 650, 818, 819], [645], [992], [502, 539], [949], [], [258], [409, 437], [113], [13], [336], [265], [170], [179], [139], [836, 837], [579], [13], [1], [794], [143], [784], [609], [856], [937], [691], [89], [748], [896], [252], [120], [428, 954], [557, 701], [333], [517, 536], [640, 562], [677], [299], [40, 44], [896], [313], [792], [683, 432], [192], [], [546], [380], [518], [419, 617, 823], [661, 479], [], [610, 602], [721], [345], [], [721, 831, 281], [770, 703], [300], [354], [272], [890], [813], [264], [325], [738], [859], [747], [], [221], [100], [], [647], [735], [830], [821, 839], [348], [], [652, 764], [203], [68], [715], [723], [545], [414, 893], [955], [121], [952], [38, 44], [232], [275], [711], [], [513], [], [132], [], [589], [939, 943, 945], [525], [981], [565], [52], [655], [128, 127], [454, 919], [78], [836, 837], [689, 601], [139], [237], [913], [504], [8], [806, 655], [], [94], [207], [996, 109], [650, 402, 819], [580], [641], [673], [72], [118], [983], [524, 652, 465], [612], [254], [1], [790], [401], [941], [591], [3], [474], [570], [634], [554], [574], [488, 975], [357], [], [], [930], [431], [184, 189, 191], [81], [612], [950], [879], [957], [877], [992], [650, 402, 819], [386, 101], [630], [161], [983], [254], [194], [6], [293], [960], [822, 541], [874], [140], [393], [994], [434], [510], [590], [341], [968, 725, 504], [515, 610, 714, 402], [905, 532, 831, 799], [254], [836, 837, 445], [37], [281], [930, 868, 441, 762, 923], [36], [992], [915], [512], [615], [653, 728], [459], [852, 219], [], [281], [966, 503], [407], [532, 789], [812], [103], [910], [610], [940, 942], [485, 553, 632], [575], [450], [], [274], [66], [269, 272], [364], [740], [22], [237, 158], [717], [494], [416, 602], [951, 122], [307], [399], [998], [968, 849], [256, 205], [489, 93], [271], [257, 222], [0], [809], [801], [605], [695], [916], [760], [434, 756, 793], [757], [63], [483], [185], [201], [528], [61], [905], [529], [385], [136], [482], [448], [], [36], [90], [275], [800], [764], [602], [], [504], [], [762, 559], [245], [650, 558, 819], [353, 351], [53], [221], [17], [], [940, 941, 942], [193], [544], [699], [], [626], [372], [220], [225], [], [441], [527, 782, 664], [530], [88], [834, 906], [826], [62], [67, 68], [478], [978], [582], [673, 664, 527], [], [427], [899], [714, 402], [107], [884, 406], [412], [676], [283], [810, 878], [800], [281], [122], [390], [988], [918], [], [99], [325], [546, 650, 402, 819], [74], [287], [169], [160], [771], [53], [610], [169], [479, 751, 879, 817], [525], [440], [141], [150], [136], [708], [], [823], [239], [123], [740], [170], [166], [982], [58], [106], [89], [728, 858], [685], [581], [661], [569], [250], [564], [713], [401], [301], [533], [775], [478], [752, 852], [785], [423, 424, 585, 589, 526, 782, 851, 664], [769, 515], [], [866, 595], [695], [206, 221], [513, 875], [106], [476], [4], [251], [939], [921], [437], [774], [359], [248], [880], [242], [748], [63], [837, 639], [174], [251], [265], [89], [469], [547, 565], [420], [988], [763, 597], [254], [42], [857], [54], [181], [967, 504], [107], [240, 239], [705], [154], [524], [993], [696], [905], [45], [640], [247], [835], [661], [551, 629], [700], [778], [118], [910], [488, 778, 600], [417], [690, 984, 345], [128], [530], [677, 783], [25], [876, 589, 207, 435], [146], [11], [271, 277], [593], [791], [673], [589], [794], [660], [518, 830], [270], [399], [], [862], [566], [832], [78], [282], [412, 335], [705], [474], [106], [557], [311], [569], [234], [215], [788], [133], [252, 262], [825], [], [513], [677], [777], [985], [204], [532, 572], [955], [29], [896, 804], [781], [367], [724], [13], [738, 580], [], [794], [], [487, 761], [314], [716], [541, 542], [699], [20], [], [389], [569], [923, 965], [608, 770], [554], [166], [225], [244], [62], [], [478], [463], [732], [595], [211], [584], [943], [30], [917], [726], [838], [808], [932, 478], [855], [541], [], [], [781, 557], [271], [803], [656], [445], [], [336], [210], [53], [609, 479], [319], [521], [415], [244], [11], [119], [233], [], [485, 754, 632], [776, 819], [462], [134], [419], [22], [309], [27], [511], [502], [681, 620, 526, 527, 782, 664, 508], [524, 461], [417], [610], [975], [951], [755], [510], [192, 463], [53], [603], [84], [161], [877], [971], [855], [343], [297], [168], [318], [214], [881], [453, 454, 624], [892], [717], [497], [320], [695], [104], [406], [991], [363], [825], [], [16], [], [791], [833], [155], [818], [515, 870], [684], [757], [367], [413], [], [194], [327], [306], [886], [752, 852], [954], [993], [382], [807], [311, 312], [96], [53], [827], [330], [338], [865], [694], [588], [25], [483, 979, 825], [848], [807], [525], [195], [22], [136], [774], [816], [231], [866], [567, 827], [], [7], [982], [343], [933], [958], [700], [921], [143], [709], [157], [680, 805], [223], [574], [133], [932], [721, 831], [], [852], [13], [155], [407], [535], [629], [750], [67], [245], [68], [220, 213], [521, 813, 909, 910, 567, 926], [625], [71], [809, 762, 923, 926], [673, 810, 508], [], [342], [217], [71], [785], [325], [990], [114], [589], [118], [277], [304], [738], [866], [572], [994, 116, 126], [13], [654], [], [529], [973], [696], [252], [899], [268], [190], [911], [544], [256], [426], [488, 826], [805], [624], [612], [], [142], [148], [720], [974], [748, 636], [376], [628], [92], [501], [866, 595], [110], [409, 892], [328], [341], [417, 616], [896, 861], [155], [711], [424], [939, 940, 941, 942, 943], [526], [411], [8, 84], [63], [485], [582], [547], [827], [928, 659], [321], [962, 923], [252], [488], [751], [22], [749], [723], [397], [21], [695], [609, 660], [803], [966], [640], [514], [252], [756], [489], [373], [500], [581, 479, 511], [923, 964], [430], [370], [971], [412], [917], [898], [283], [128], [302], [385], [], [655], [856, 958], [144], [653], [182], [988], [], [474], [433, 639], [905, 283], [583], [25], [333], [161], [348], [495], [836, 837, 906, 656, 785], [873], [405], [309, 599], [616, 843], [814], [645], [604], [223], [248, 250], [576], [102], [729], [275], [43], [64, 59], [523], [387], [991], [93], [246], [], [], [517], [453], [908, 812, 404], [835], [618], [107], [129], [575], [462], [765], [208], [311, 312], [960], [403, 536], [814], [376], [713], [991], [302], [329], [217], [], [40, 46], [199], [105], [753], [670], [482], [363], [516, 520], [777], [484, 871], [479, 817], [208], [604], [230], [381], [474], [909], [984], [799], [441], [76], [669], [339], [441], [380], [924], [40], [825], [323], [950], [45], [800], [617, 720, 823], [234], [33], [39], [182], [832], [234], [287], [481, 626], [698], [431], [666], [806, 911, 658], [349], [9], [641], [57], [335], [253], [774], [865, 850], [473], [15], [506], [450], [372], [344], [832], [230, 231], [361], [783], [387], [92], [732], [936], [995], [], [625], [33], [892], [346], [712], [308], [175], [970, 795, 796], [75], [235], [132], [15], [287], [448], [302], [555], [118], [590, 605], [339], [339], [759], [498], [950, 951], [252], [433, 638, 639], [134], [535], [236], [740], [934], [701], [430], [970], [940], [555], [160], [505], [624, 453, 454], [337], [308, 309], [361], [418], [470], [977], [347], [750, 533], [708], [249], [643], [928, 960], [778], [373], [260], [432], [947], [865], [558], [307], [162], [109], [455, 760, 440], [440], [450], [], [423, 424, 585], [97], [79], [], [580], [469], [435], [545], [387], [673, 526, 527, 782, 664, 508], [404], [737, 898], [467], [], [709], [251], [660], [], [829], [518], [532, 470], [508], [357], [465], [42], [], [199, 588], [904], [393], [470], [546, 841], [212], [479], [489], [586, 652], [143], [797], [260], [858], [814], [674], [], [63], [106], [788], [905], [572], [424, 423], [695], [628], [62], [], [2], [38], [], [711, 721], [232], [64], [769], [794], [608, 610, 559], [833], [190], [98], [898], [862], [456], [798], [319], [892], [228], [108], [706], [402], [208], [233], [810, 508], [], [], [705], [828], [744, 657], [378], [75], [795], [254], [916], [690, 958, 345], [852], [553], [369], [232, 264], [793, 794], [269], [649], [532], [534], [431], [874], [114], [392], [562], [453], [432], [797], [756], [903, 585], [573], [722], [748], [553], [243], [750, 721], [499], [297], [897, 971], [645], [275], [666], [], [780], [773], [567], [286], [347], [77], [581, 479, 656], [250], [847], [910], [106], [937], [261], [355], [625], [149], [656], [505], [959], [808], [712], [996], [493], [421], [915], [264, 263], [], [487], [869], [867, 675], [615, 890], [467], [739], [833], [594], [618], [226], [313], [219], [399], [59, 64], [295], [683, 558, 432, 566], [371], [742], [242], [809, 532, 762, 923, 959], [424, 423], [520], [340], [], [205], [78], [712], [246], [327], [914], [605], [], [144], [788, 502], [879], [408, 575], [63], [136], [601], [447], [760], [486], [568], [178, 282], [], [963], [499], [560], [858], [779], [134], [572], [673, 526, 527, 664], [879, 689], [576], [803], [514, 515, 655], [993], [63], [894], [896, 495], [940, 942], [610], [964], [899], [869], [738], [453, 917, 921], [333], [163], [590], [976], [752], [619, 846, 721, 892, 831], [205], [806, 655, 630, 502], [918], [308], [], [776], [483], [570, 518], [354], [192], [407], [544], [443], [], [], [195], [973], [171], [580], [205], [344], [291], [568], [75], [734], [483], [298, 63], [712], [70], [704], [530], [417], [388], [602], [659], [422], [524], [20], [560], [529], [422], [544, 909, 762], [16], [559], [990], [101], [562], [313], [522], [658], [387], [932], [330], [610], [854], [914], [759], [], [373], [539], [439], [533], [252], [912], [261, 174], [248, 250], [689, 594, 601], [535], [431], [724], [656, 784], [100], [332], [416], [259], [268], [142], [962, 923], [982], [121], [961, 923], [592], [483], [180], [836, 837], [], [685], [408], [322], [695], [843], [], [], [195], [182], [133], [581, 479, 436, 535, 511], [420], [269], [30], [739], [811], [191], [], [352, 351], [5], [529], [928], [214], [214], [994], [153], [729], [], [936], [125], [37], [123], [550], [243], [541, 542], [849], [659], [125], [902], [936], [], [], [898, 918], [767], [], [971], [40, 46], [55], [130], [6], [879], [284], [214], [222], [402], [392], [], [215, 218], [237], [556], [380], [342], [757], [], [881, 486], [175], [330], [749], [38], [669], [993], [597], [48], [826], [923, 926], [52], [277], [479], [347], [966], [946], [544, 827], [691], [137, 146], [384], [663], [95], [197, 183], [185], [957], [784], [283], [535], [292], [238], [80], [466], [148], [705, 547], [673, 526, 527, 782, 664, 508], [883], [808], [300], [279], [432], [323], [53], [481], [836, 638, 639], [102], [821], [357], [393], [471], [447], [838], [451], [766], [950], [586, 977], [652], [724, 733], [705], [268], [897], [831], [804], [60, 62], [], [953], [740, 359], [926], [480], [993], [950], [867], [79], [486], [831, 282], [277], [], [255], [919], [799], [647], [168], [899, 901], [108], [228], [348], [805], [884], [934], [53], [426], [268], [994], [8], [849, 504, 505], [338], [110], [130], [354], [427], [711], [161], [156, 285], [505], [84], [839], [512], [884], [545], [118], [546], [715, 524, 787], [], [886], [514], [388], [41, 44], [91], [915], [916], [513, 650, 819], [563], [], [324], [909, 926], [152], [158], [170], [383], [831], [909, 849], [8], [375], [414], [], [119, 120], [69], [230, 231], [912, 716], [325], [59], [46], [268], [951], [666], [106], [], [685], [588], [992], [721], [798], [715], [458], [], [402], [95], [53], [560], [440, 441, 455], [], [374], [327], [128], [478], [513, 439], [746], [510], [526, 844, 742], [483], [280], [265], [932], [518], [499], [62], [203], [212], [318], [], [310], [291], [815], [695], [635], [70, 904], [485, 592], [803, 228], [293], [267], [917], [141], [52], [812], [351], [545], [24], [796], [485, 530], [480], [608], [530], [744, 657], [724], [498], [143], [570], [693, 472], [560], [194], [999, 281, 700], [783, 784], [676], [919], [727], [550], [573], [109, 973], [327], [], [787], [963], [425], [505], [368], [74, 815], [], [498, 854], [822], [258], [731], [861], [138], [626], [551], [312], [305], [372], [393], [321], [806, 831], [345], [185], [972], [269], [520, 669], [550], [379], [], [532], [818], [592], [697], [107], [21], [377], [445, 638, 639], [831], [472], [6], [], [852], [779], [472, 693], [224], [809, 659, 923], [732], [842, 638, 639], [155], [650], [303], [582, 519, 950], [731, 861], [34], [], [801, 445], [822], [155], [366], [815], [376], [593], [311], [55], [895], [750], [105], [839], [545], [626], [179], [423], [561], [596, 639], [636], [352], [152], [774], [371], [991], [844], [688], [840], [914], [850], [28], [640], [389], [137], [929], [204], [632], [245], [868, 923], [970, 795], [876], [762], [418, 487, 620], [996], [424], [803], [21], [409], [849], [158], [452, 911], [307], [331], [377], [651], [215], [658, 911], [867], [201, 254], [118], [914], [343], [894], [340], [925], [364], [], [279], [410], [424], [907], [146], [612], [669], [], [196], [674], [476], [4], [389, 391], [72], [927], [975], [157], [148], [], [476], [367], [970, 795], [494, 497, 442, 858], [658], [192], [332], [69], [497], [601], [2, 814], [978], [], [165], [673, 526, 527, 782, 664, 508], [807], [639], [199], [642], [340], [135], [446], [541], [363], [451], [309], [], [104], [487, 810, 590], [218], [492], [862], [905], [529, 977, 978], [333], [194], [], [650, 401, 402, 546, 559, 818, 819, 889], [545], [307], [609], [517], [205], [45], [477], [716], [36], [940], [17], [677], [244], [581, 479, 511], [409, 892], [656], [791], [777], [147], [195], [219], [516], [546], [735], [954], [227], [359], [902], [216], [783], [471], [13], [161], [938], [427], [2], [66], [393], [10], [45], [783], [257], [520], [], [354], [479], [415], [771], [977, 978], [448], [502], [350], [741], [513], [361], [480, 886], [741, 884], [7], [329, 973], [161], [31], [936], [631], [738], [160], [403], [248, 250], [165], [979], [346], [847], [635, 767], [374], [739], [350], [763], [927], [813, 567], [700], [20], [292], [9], [960, 470, 923], [], [19], [], [318], [434], [803], [28], [879], [502], [554], [484], [630], [532, 923], [390], [123], [872], [678], [782, 664], [655], [851], [767], [479, 511], [519], [97], [144], [302], [231], [407], [602], [629], [96], [103], [805], [332], [865], [214], [384], [753], [895], [214], [951], [699], [255], [625], [421, 841], [292], [948], [731], [823], [728], [937], [118], [714], [551], [98], [903, 617], [87], [190], [878], [410], [611], [230, 231], [274], [513], [578, 834, 982], [234, 805], [154], [572], [983], [650, 541], [190], [379], [963], [], [794], [419], [445], [447], [408], [719], [900], [206], [260], [552], [859], [750], [928], [242], [602, 638, 639], [511, 479], [183], [462], [806], [962], [351], [756], [729], [416], [], [910], [778, 467], [570], [498], [427], [283, 284], [6], [65], [673, 526, 527, 782, 664, 508], [555], [175], [281], [236], [], [626], [508], [824], [535], [900], [673, 526, 527, 916, 664, 508], [980], [964], [910], [765], [920, 733], [141], [479], [466], [254], [411], [430], [404], [586, 437, 408], [956], [284], [121], [], [567, 411], [161], [953], [211], [116], [416], [13], [], [279], [866], [395], [203], [13], [466], [843], [254], [603], [572], [707], [507], [523, 830], [176], [388], [198], [709], [439], [258], [11], [367], [513], [189], [736], [573], [936], [969, 440, 572], [69], [836, 837, 842, 445], [846], [778], [107], [693], [495], [270], [942], [593], [742], [134], [336], [344], [572], [929, 912], [132], [341, 342], [416], [418], [361], [], [704, 656, 479], [891], [999, 692], [173], [269], [568], [858], [35, 37], [847], [314], [977, 978], [673, 526, 527, 782, 664, 508], [807], [747], [37], [706], [987], [612], [500], [867], [], [355], [444], [670], [873], [940, 941, 942], [927], [53], [567], [422], [650, 402], [900], [318], [778], [509], [726], [583], [404], [217], [124], [767], [383], [759], [720], [129], [], [408], [679], [968], [133], [], [], [], [766], [], [22], [425], [220], [897, 651, 760], [385], [346], [924], [62], [576], [454], [118], [832], [492, 493, 495], [379], [997], [192], [712], [842, 433, 638, 639], [697], [300], [720], [736], [400, 667], [965], [843], [779], [566], [682, 562], [403], [4], [], [291], [968], [5], [132], [405], [], [545], [843, 445], [6], [898], [140], [286], [559], [102], [], [172], [798], [78], [81], [70], [523, 869], [33], [], [194], [191], [973, 983], [891], [576], [977, 978], [], [525], [386], [76], [692], [905], [986], [587], [488, 679], [652, 413], [776], [328, 108], [658], [], [880], [842], [198], [915], [400], [457, 834], [], [328, 116], [588, 790], [649], [307], [570], [774], [303], [754], [274], [317], [192], [322], [435, 876], [183], [525], [770], [976], [743], [721], [535], [749], [444], [756], [473], [518], [67], [56], [95], [], [974], [780], [754, 605], [840], [583], [3], [], [784], [923, 964], [963], [908, 404], [411], [586], [456], [774], [79], [577], [61], [52], [991], [559], [582, 851], [700], [813], [111], [436], [483, 958], [967], [571], [917, 413], [522], [243], [992], [952], [145], [973], [798], [473], [749], [94], [], [47], [841, 918], [374], [152, 155], [680], [698, 538], [96], [417], [99], [738, 559], [912], [809, 923, 924], [499], [416], [616], [699], [332], [743], [233], [64], [489], [751, 468, 479], [701], [91], [964], [], [967, 968], [217], [452], [], [836, 837], [64], [357], [874], [236], [789], [187], [365], [195], [9], [778], [484], [28], [170], [], [753], [90], [684], [681, 620], [144], [106], [601], [141], [688], [46], [756], [195], [896], [148], [691], [309], [763], [307], [152], [], [5], [836, 837], [543], [], [732], [323], [219], [91], [879, 977, 978], [282], [154], [941], [351], [], [22], [503], [992], [122], [891], [74], [390], [43], [126], [304], [69], [71], [407], [195], [488, 600], [935], [56], [825], [975, 977, 979], [903], [271, 280], [182], [594], [23], [331], [879], [597], [987, 998], [199], [977, 978, 728], [300], [943], [834, 906], [], [792], [280], [811], [914], [545], [288], [179], [701], [411], [120], [448], [607], [506, 421], [687], [59], [74], [733], [767], [], [87], [278], [], [304], [], [174], [936], [408], [153], [245], [551], [156], [934], [606], [657], [791], [], [716], [142], [315], [], [409], [270], [434, 794], [57], [532], [979], [502], [774], [917], [616], [12], [39], [923], [594], [421], [77], [836, 837, 844], [494], [824, 474], [518, 665], [962, 923], [735], [148], [876, 435], [844], [158], [903], [763], [178], [439], [540], [992], [], [431], [94], [48], [909], [849], [233], [588, 790], [310], [354], [829], [11], [789], [712], [650, 819], [975, 671], [348], [889], [694], [892], [354, 349, 350], [880], [117], [901], [365], [842, 879, 977, 978], [224], [581, 479, 717], [587, 677], [679, 435, 578], [969], [856], [478], [168], [688], [], [274], [], [749], [984], [492], [128], [361], [453], [473], [292], [283], [100], [668], [644], [34], [11], [859], [416], [995], [945], [140], [366], [7], [345], [695], [24], [450], [699], [994], [675], [564], [731], [260], [658], [20], [184], [33], [460, 718, 150], [375], [360], [366], [810, 878], [735], [576], [116], [145], [670, 518], [405, 839], [309, 917, 599], [567, 827], [588], [712], [], [595], [988], [820], [451], [110], [490], [565], [442], [918], [200], [786], [261], [573], [521], [294], [448], [71], [386, 101], [548], [760], [585], [587, 784, 596, 477], [896, 804, 999, 794, 861], [12], [681, 620], [563], [185, 186], [595], [867], [474], [332], [215, 218], [], [661], [301], [500], [337], [997], [435, 876], [888], [43], [], [309], [567], [], [101], [85], [410], [758], [160], [896], [993], [939], [802, 518], [], [435], [332], [552], [76], [282, 797], [256], [834, 906], [692, 950], [658], [978, 824], [732], [709], [905, 589, 740], [875], [636], [406], [947], [896], [487, 681, 620, 916, 508], [334], [130], [513, 776, 683, 875], [0], [980], [411], [417], [871], [141], [], [558], [876, 435], [], [523], [210], [71], [59], [535], [726], [675, 580, 608, 889], [862], [490], [914], [858, 445], [603], [854, 406], [], [849], [638, 639], [610, 836, 837], [680, 750, 697], [349], [813], [689, 887], [545], [295], [589], [7], [656], [888], [], [194], [573], [164], [332], [420], [994, 114, 947], [939], [439], [729], [440], [66, 67], [356], [474], [], [696], [387], [842, 977, 978], [778], [261], [836, 837], [200], [867, 864], [677], [419], [990], [98], [739], [72], [359], [214], [977], [682], [836, 837], [], [407], [734], [224], [219], [], [52], [103], [716], [717], [916], [140], [912], [663], [911], [270], [335], [], [659, 923], [240], [759], [832], [975], [990], [427], [756, 792], [345], [799], [381], [287], [529, 823], [333], [681, 620, 526], [819], [617], [58], [239], [134], [666], [846, 750], [673, 664, 526, 527, 632, 508], [265], [418, 709], [297], [], [134], [349], [194], [538], [114], [458, 703], [755], [934, 692, 948], [559], [396], [57], [64, 55], [226], [977, 978], [208], [562], [306], [780], [135], [997], [481], [500], [406, 892], [237], [494], [738], [314], [424], [], [688], [139], [881], [217, 215], [564], [2], [256], [115], [302, 306], [225], [366], [578, 552, 689, 982], [547], [696], [31], [619, 846], [934], [619, 846], [308], [950, 954], [26], [411], [668], [400, 667], [], [545], [673, 526, 527, 782, 664, 508], [371], [227, 232], [350], [70], [283], [99], [365], [405], [12], [844], [107], [964], [360], [765], [596], [784], [418], [515, 230], [867, 569], [896, 804, 794, 861], [941], [], [922], [624], [761], [386], [641], [575], [693], [658, 760], [358], [615], [], [352], [714], [417], [111], [], [563], [297, 295], [417], [565], [674], [973], [967, 968, 504, 923], [561], [398], [449], [], [390], [281], [628], [531], [270], [533], [153], [608, 474], [807], [737], [528], [548], [742, 681, 620, 526], [11], [522], [277], [166], [893], [], [51], [4], [597], [422], [145], [232], [138], [884], [978, 611], [769], [625], [880], [392], [476], [658], [327], [524, 461], [557, 538, 698], [312], [358], [557], [904, 905], [431], [269, 272], [560], [299], [582, 948, 951], [784, 587, 477, 740], [606], [67], [970], [518], [809], [787], [24], [], [239], [888], [462], [431], [], [242], [], [708], [263], [261], [46], [617], [390], [921], [42], [881], [999], [447], [351], [418], [660, 436], [218], [349], [782, 664, 508], [70], [270], [728], [53], [593, 650], [103], [975, 693, 472], [94], [], [863], [770, 539], [989], [947], [5], [864], [121], [610], [215], [720], [511], [670], [915], [296], [778, 485], [183], [516, 905, 526, 493], [330], [788, 630], [741, 399], [4], [333], [733], [89], [536, 913, 724], [63], [706, 519, 428, 716], [283], [301], [617], [44], [899], [925], [907], [772], [987, 998], [959], [670], [435], [425], [354], [299], [257, 222], [248, 250], [581], [517], [750, 564, 669], [193], [729], [793], [800], [424], [41], [475], [659], [149], [659], [605], [277], [354], [701], [528], [281], [720], [349], [54], [710], [286], [4], [460], [418, 709, 767], [272], [281], [611], [236], [548, 493, 851], [423], [162, 676], [720], [], [520], [], [233], [867], [213], [827], [634], [489, 638, 639], [690, 345], [242], [494], [646], [672], [224], [85], [44], [553], [583], [103, 395], [344], [135], [770], [892], [251], [656, 879], [365], [578, 689], [605], [441], [772], [347], [900, 756], [746], [915], [390], [960], [269], [210], [489, 219], [959], [896], [625], [], [432], [236], [645], [670], [738], [911], [], [652, 465, 597, 413], [900], [296], [333], [468, 603], [239], [], [40, 46], [], [], [309], [576], [66, 68], [54], [277], [910], [551], [860], [581], [10], [829], [974], [414], [316], [520], [638], [281], [], [773], [343], [83], [], [], [], [670], [656], [122], [179], [506], [839, 405], [335], [964], [237], [543, 433, 445, 638, 639], [500], [529], [577], [497, 538], [690], [895], [70], [188], [791], [159], [757], [294], [297], [29], [911], [716, 757], [116], [292], [32], [234], [838, 720, 631], [991], [], [956], [758], [209], [656, 479], [481, 482], [615], [640], [889], [444], [941], [857], [452, 911], [514, 515], [28], [11], [519], [198], [725], [497], [], [996], [170], [935], [176], [10], [779], [35, 37], [728], [203], [962, 923], [119], [349], [384], [110], [805], [365], [75], [536], [938, 935], [496], [], [554], [721], [202], [125], [109], [854], [146], [232], [476], [498], [309], [504], [237], [686], [244], [145, 148], [236], [217], [], [878], [482, 548, 851, 598, 632], [354], [292], [744, 908], [213], [697], [877], [965], [863], [302], [642], [220], [923], [283], [], [722], [618], [661], [152], [64, 55], [851], [822], [525], [363], [186], [654, 757], [], [338], [511], [963], [608], [831], [926], [106], [655], [523], [294], [44], [94], [], [840], [638, 639], [936], [148], [69], [563], [97], [552], [37], [311], [], [583], [42, 44], [817], [589], [576], [318], [198], [400, 667], [979], [468, 479], [109], [897], [632], [357], [555], [372], [447], [762, 853], [976], [554], [162], [834, 652, 906], [225], [797], [708], [978], [624], [819, 541], [319], [], [275], [707], [184], [250], [681, 810, 620], [737], [939], [265, 267], [700], [570], [728], [13], [156], [907, 966], [952], [685], [422], [301], [131], [437], [865], [411], [259], [], [553], [433], [809], [208, 243], [299], [311], [744, 657], [267], [], [986], [34, 977], [548, 850, 851], [632], [64], [548], [110], [372], [828], [996], [611], [355, 489], [184], [64, 55], [273], [], [419], [], [193], [425], [562], [289], [359], [160], [513], [124], [937], [452], [610], [908, 895], [281], [965], [396], [973], [335], [387], [875], [642], [52], [486], [698], [], [361], [189], [901], [635], [238], [94], [25], [574], [639], [492], [], [822], [643], [], [505], [709], [157], [406], [194], [488], [610, 759, 794], [541, 542], [985], [105], [291], [744, 657], [832], [217], [989], [307], [147], [592], [170], [612], [108], [306], [314], [41, 46], [34], [324], [7], [31], [239], [753], [557, 733], [902], [324], [336], [720], [703], [378], [650], [652], [18], [578, 216], [763, 597], [827], [], [769], [673, 526, 527, 782, 664, 508], [683], [488, 616, 887], [681, 620], [234, 214], [267], [341, 342], [], [], [670, 655, 414], [298], [322], [681, 810, 620, 508], [939, 945], [576], [671], [], [806, 630], [27], [805], [149], [62], [518], [308], [615], [393, 973], [455], [422], [487], [379], [276], [17], [497], [217, 212], [4], [], [109], [779], [713], [841, 731], [16], [153], [988], [507], [40], [400, 667], [65], [679], [982], [428], [728], [522], [100], [358], [497], [594], [667], [], [42], [2, 3], [], [722], [247], [915], [26], [981], [79, 630], [298], [], [501], [614, 584], [], [79], [497], [733, 557], [589, 639], [553], [594], [821], [896, 910, 608], [670], [375], [524], [211], [983], [892], [172], [85], [318], [409, 892], [256], [405], [682], [517], [744, 652, 657, 471], [], [], [56], [992], [579], [917], [499], [195], [823], [966, 572], [67], [661], [], [205], [20], [632], [272], [582, 937, 938], [193], [596], [870, 825], [912, 348], [688], [285], [234, 236], [725], [944, 946], [184], [957], [453], [401], [320, 319], [657], [975], [139], [900], [948], [787], [756], [32, 26], [], [75], [460], [518], [501, 885], [564], [643], [635], [529], [77], [627], [378], [119], [858], [497], [575], [241, 238], [], [334], [976], [989], [774], [433], [617], [552], [248, 250], [961], [1], [884], [262], [438], [641], [686], [486], [239], [625], [533], [879], [193, 201], [423, 424], [421], [186], [208], [786], [968], [693], [140], [422], [713], [953], [623], [360], [958], [2], [263], [251], [169], [839], [72, 815], [672], [404], [169], [919], [215], [933], [550], [], [43], [162, 168], [136], [664], [244], [418], [396], [756], [604], [636], [28], [208], [942], [39, 43], [951], [19], [591, 850], [358, 359], [701], [512, 907, 950, 951, 954, 572], [111], [518], [17], [986], [554], [634], [20], [88], [882], [903], [128], [570], [421], [667], [210], [513], [], [122], [866], [177, 170], [663], [160], [378], [512, 473], [], [932], [149], [955], [], [548, 651, 831], [195], [765], [], [560], [], [199], [836, 837, 748], [578, 689, 885], [742], [51], [619, 818], [329], [853], [586], [], [41], [84], [129], [485, 592], [933], [926, 544], [309, 599], [987, 998], [243], [952], [662], [834, 906], [395], [996], [], [624, 453], [429], [298], [488, 858], [841, 823], [185], [745, 851, 598], [529], [525], [], [176], [608], [847], [429], [950], [385, 386], [816], [108], [326], [691], [977], [671], [219], [2], [], [166], [605], [52], [], [246], [243], [164], [362], [315], [584], [224], [], [542], [770, 841, 970], [679], [583], [528], [543], [742], [], [879], [664], [327], [301], [800], [209], [], [], [829], [608, 514, 610, 655], [119], [31], [316], [387], [487], [638, 639], [80], [950, 954], [348], [966, 720, 572], [171], [761], [531], [507], [255], [717, 479], [70], [797, 765], [], [212], [118], [187], [890], [781], [202], [123], [551], [273], [797], [448], [821], [769], [321], [463], [407], [144], [911], [44], [818], [554], [966, 907], [138], [427], [865, 610], [660, 799], [568], [529, 478], [951, 725], [27], [284], [332], [254], [281, 282], [422, 747], [521], [516, 520], [805, 261], [2, 3], [192], [5], [146], [406], [264, 263], [], [458], [854], [500], [608, 514, 515], [991], [778], [100], [293], [479], [996], [936], [340], [781], [765], [64, 55], [800], [453, 454, 624], [520], [287], [821, 839], [311, 312], [37], [376], [940], [535], [163], [182], [29], [768], [337], [], [973], [420], [], [596], [990], [536], [611], [396], [682], [932], [87], [], [801], [315], [743], [478, 722], [910], [929], [518, 414], [94], [92], [81], [47], [740], [593], [], [492], [164], [668], [332], [487], [596], [304], [244], [], [968], [155], [59, 916, 55], [330], [697], [904], [295], [29], [225], [746], [77], [238], [880], [100], [], [581], [521], [805], [67], [469], [172], [271], [937, 938], [370], [575], [495], [430], [75], [514], [557], [524], [563], [312, 311], [], [], [745], [374], [706], [621], [565], [428], [492], [644], [16], [269], [619], [273], [882], [334], [140, 142], [850, 282], [937], [770], [587, 784], [205], [983], [], [540], [284], [198, 199], [], [187], [399], [582, 948, 949, 950, 954], [215], [976], [], [783], [869], [539], [930, 582, 415], [39, 26], [337], [435], [361], [325], [677], [618, 926], [910], [57], [425], [912], [908], [578, 982, 571], [], [900], [371], [931], [940], [920], [505], [339], [], [581, 479, 717], [386, 101], [939], [280], [536, 628], [454, 655], [], [868, 951, 923], [892], [752, 852], [217], [952], [29], [448], [341], [211], [677, 587], [], [409, 892], [120], [186, 193], [62], [], [], [20, 13], [539], [744, 657], [413], [], [351], [], [11], [470], [326], [799], [849, 850], [567], [430], [301], [316], [222], [919], [969, 470, 923], [425], [182], [443], [301], [566], [299], [55], [299], [822], [842], [554], [575], [101], [994], [337], [309], [736, 762], [], [238], [518, 665], [313, 315], [875], [845], [816], [943, 953], [769], [393, 108], [83], [113], [557], [453], [242], [713], [], [133], [751, 979, 479], [211], [5], [100], [210], [567], [278], [333], [755], [765], [613, 810, 508], [942], [892], [740], [852], [181], [82], [], [310, 504], [956], [373], [49, 50], [635], [485, 754], [522], [], [458], [684], [571], [995], [], [571], [209], [755], [0], [226], [612], [540], [197, 198], [785], [572], [379], [], [833], [546, 650, 819], [626], [903], [806, 610], [282], [], [484], [943], [39], [801, 983], [888], [365], [926], [256], [897], [48], [718, 821], [220], [861], [433], [849], [854], [711, 631], [31], [682], [381], [81], [190], [442, 663], [218], [522], [926], [986], [185], [726], [362], [539], [638, 639], [581, 479], [863], [343], [697], [925], [565], [940], [618, 923], [641], [], [972, 825], [], [339], [992], [], [185], [914], [197], [717], [], [832], [76], [93], [], [718], [294], [844], [753], [], [668], [838], [232], [303], [176], [224], [125], [319], [64, 59], [75], [360], [204], [42], [913], [552], [909], [330], [471], [758], [156], [265, 267], [898], [857], [51], [145], [374], [928], [509], [12], [525], [894], [946], [], [840], [923], [804], [886, 440, 860], [661], [606], [789], [909, 987, 926], [841], [519], [176], [316], [177], [66, 68], [808, 515], [531], [388, 872], [243], [135], [684], [242, 159], [872], [606], [296, 427, 756], [678, 487, 854], [883], [904], [803], [520, 529], [581, 656, 479], [], [754], [749], [764], [372], [693], [549], [], [447], [143], [463], [25], [922], [160], [726], [992], [453, 454, 624, 402], [], [302], [765, 706], [812], [645], [140], [301], [159], [488], [307], [142], [449, 858, 733], [41], [836, 747], [272], [659], [177], [236], [664], [18], [772], [679], [654], [565], [549], [383], [728, 478], [970], [959], [735], [952], [15], [434], [687], [871], [217], [825], [358], [109], [495], [30], [853, 645], [805], [207], [165, 234], [894], [536], [215], [312], [392], [776], [610, 47], [505], [75], [393], [173], [720], [531], [], [487, 681, 590], [942], [129], [886], [284], [409], [298], [928], [724], [737], [604], [0], [0], [640], [232, 151], [410], [591], [680], [], [421], [717, 733, 479], [], [363], [210], [13], [219], [755], [263], [147], [287], [115], [491], [448], [780], [249, 250], [926], [], [761], [692], [303], [972], [836, 837, 958], [40, 46], [710], [293], [979], [173], [257], [681, 620], [749], [488], [288], [916], [941], [], [792], [154], [691], [], [], [640], [759], [611], [118], [], [63], [193, 235, 852], [871], [19], [400, 667], [896, 804, 999, 905, 861], [80], [433], [608, 414], [245], [880], [185], [292], [169], [85], [902], [], [567], [962], [649, 977, 978], [269], [427], [482], [382], [488, 723], [638], [505], [959], [364], [805], [497], [587, 596], [457, 834], [977, 150], [], [743], [145], [73, 77], [578, 689, 601], [168, 159], [830], [109], [766], [130], [763], [448], [993], [788], [491], [738, 944], [375], [435], [700, 999], [79], [146], [447], [269], [622], [420], [510], [578, 689], [283], [417], [673, 508], [186], [619, 846], [], [925], [467], [468], [180], [879, 912], [578, 601], [688], [102], [553], [483], [218, 156], [387], [196, 198], [487], [738, 428], [689], [323], [591], [], [9], [871], [749], [950, 951], [466], [615], [314], [615, 597], [609], [316], [488], [184], [128, 856], [669], [615], [249], [56, 472], [], [520], [189, 190], [822], [361], [537], [394], [417], [527], [242, 243], [], [385], [697], [158], [732], [172], [755], [], [132], [984], [550], [453, 454, 526], [910], [230], [771], [278], [31], [536], [586], [715], [909, 926], [97], [327], [122], [759], [157], [162], [], [732], [933], [649], [763], [788], [29], [598], [568], [422], [896, 804, 838, 585, 631], [822], [192, 193], [713], [586], [807], [75], [322], [120], [472], [737, 455], [588], [173, 958], [19], [349], [286], [701], [692], [194], [649], [769], [390, 395], [987, 935, 923], [47], [62], [570], [983], [130], [100], [519], [619, 846], [619, 846], [161], [768], [214], [254], [90], [234], [694], [311], [720], [], [780], [], [397], [], [349], [704], [628], [332], [337], [793], [757], [865, 850], [270], [], [989], [], [], [51], [49], [187], [254], [178], [], [245], [424], [13], [766], [584], [409, 892], [116], [17], [19], [613], [454], [751], [157], [994], [951], [111, 52], [997], [672], [77], [345], [581, 479], [30], [476], [587], [189], [550], [22], [0], [456], [200], [], [704], [49], [532, 923, 572], [], [313], [379], [420], [], [258], [28], [253], [606], [968, 504], [915], [950], [403], [535, 671], [378], [376], [565], [495], [], [414], [303], [546], [406, 887], [113], [105], [518], [164], [789, 539], [990], [938], [347], [740], [53], [172], [90], [59], [466], [906], [933], [53], [444], [140], [769, 709, 710, 767], [193], [230, 231], [561], [306], [], [614], [439, 764], [118], [808], [], [268], [577], [652, 413], [529], [367, 369], [], [492], [24], [681, 620], [137], [978], [627], [549], [136], [], [777], [182], [362], [329], [671], [1], [112], [883], [987], [703], [], [], [786], [536], [867], [104], [928], [235], [862], [828], [427], [929], [23], [958], [549], [43], [342], [971], [814], [140], [575], [552], [301], [676, 197], [430], [608, 977, 978], [303], [235], [544], [645], [807], [110], [114], [836, 976], [454], [419], [642], [581, 479, 817], [591], [79], [856], [177], [930, 844], [765], [496], [478], [231], [773], [97], [674], [991], [375], [102], [486, 650, 558, 819], [85], [109], [573], [78], [479], [401], [846], [268], [301], [892], [466], [], [497], [908], [577, 488], [308], [506], [497], [939, 943], [455], [977], [988], [89], [508], [554], [128], [30], [316], [12], [687], [423, 424], [], [553, 493], [19], [52], [76], [690], [872, 841], [553], [514], [548, 851], [374], [878], [896], [238], [45], [989], [763], [418, 720, 872, 759, 622], [18], [590], [684], [957], [673, 681, 526, 527, 782, 664, 508], [270, 279], [985], [895], [535], [129], [653], [932], [90], [331], [131], [346], [495], [495], [386, 101], [167, 212], [109, 828], [59], [293], [765], [217], [668], [653], [352], [118], [], [652, 413], [698], [568], [793], [932], [413, 670], [641], [822], [620, 508], [], [743], [202], [480], [981], [569], [61], [701], [417], [958], [535], [293], [], [753], [352], [609], [355], [553], [976], [292], [], [910], [509], [716, 637], [468], [858], [85], [511], [18], [692], [351], [382], [844], [939], [816], [], [704], [678], [342], [425], [194], [386], [153], [118], [799], [600], [452], [287], [630], [309], [613], [87], [647], [721], [578, 982, 703], [755], [475], [721], [19], [548], [869], [959], [57], [886], [453], [411], [302, 305], [923], [696, 463], [123], [109], [982], [818], [611], [152], [406], [745], [592], [950, 951], [442, 494], [593], [297, 295], [671], [42, 44], [994], [538], [556], [584], [92], [269], [938], [278], [64], [670], [364], [0], [], [844], [958], [813, 910, 954], [749], [881], [725], [743], [171, 172], [168], [372], [931, 790, 415], [908, 404], [251], [], [369], [58], [436, 479], [762, 532], [], [951], [30, 31], [715], [894], [867], [716], [], [440, 412], [513, 875, 822], [], [970, 795], [347], [937, 567], [427], [595], [915], [344], [679], [572, 966], [234], [288], [338], [654], [221, 206], [37], [986], [883], [312], [663], [387], [435], [294], [577], [], [649], [769], [837], [308], [570], [913], [779], [753], [955], [277], [363], [], [547, 820], [608, 597, 763], [850], [], [62], [287], [413], [], [155], [80], [908, 895], [407], [489, 781], [], [53], [435, 876], [460], [731], [558], [], [601], [186], [502], [140, 142], [535], [514], [489], [542], [87], [], [37], [319], [655], [339], [894], [579, 432, 819], [582], [173], [360, 337, 357], [340], [939, 943], [568], [932, 868], [865], [87], [916], [41], [387], [981], [818, 884], [849], [116], [352], [292], [147], [72], [536], [515, 764], [614, 966, 532, 762, 923, 572], [892], [715], [], [424], [327], [670], [673, 664, 526, 527, 508], [39, 46], [732], [383], [], [550], [320], [62], [], [617], [], [186], [963], [660], [96], [446], [393, 108], [3], [512], [709], [294], [], [295], [760], [561], [650, 479, 608, 609, 610], [839], [704], [117], [971], [188], [162], [30], [515], [547, 820], [439], [112], [521, 926], [797], [738], [129], [748], [], [821], [438], [], [939, 940], [355], [824], [629], [], [147], [472], [376], [782], [884], [639], [424], [981], [69], [701], [608, 824], [130], [30], [737, 920, 762], [526, 786], [666], [571], [132], [709, 696], [430], [758], [261], [428], [], [], [550], [], [875, 819], [644], [222], [221], [490], [101], [457, 617, 712, 633], [616], [311], [178], [430], [495], [995], [492], [], [512], [996], [537], [771], [894], [], [860], [709], [187], [264], [225], [483], [478], [933], [218], [915], [190], [754], [980], [], [405], [68], [557], [650], [496], [795], [779], [511], [138], [344], [748], [157], [], [184], [769, 418, 767], [240, 241, 238], [147], [893], [360], [391], [298], [806, 655], [156], [573], [], [410, 309, 599], [929], [240, 238], [619, 846], [617, 823], [625], [108, 991], [718], [626], [219], [691, 570, 958], [867], [512, 473], [638, 639], [439], [99], [926], [242, 243], [112], [397], [708, 682, 458, 439], [962, 659, 923], [719], [542], [853], [802], [107], [725], [132], [404], [420], [44], [373], [825], [583], [61], [475], [793], [920], [82], [67], [722], [168, 159], [298], [502], [861], [815], [311], [599], [111], [893], [908, 895], [371], [332], [557], [192], [346], [87], [25], [737], [534], [], [167], [937], [607], [156], [663], [169], [], [144], [899], [974], [684], [24], [575], [], [682], [286], [], [49, 50], [420], [635], [], [435], [806, 630], [16], [118], [352], [42], [14], [673, 674], [548], [755], [16], [145], [673, 742, 526, 527, 782, 664, 508], [979], [615], [404], [], [867], [259], [906], [800, 903, 552], [806], [15], [969], [807], [153], [625, 724], [852], [624], [12], [717], [261], [445], [203], [872, 759], [], [228], [711], [948], [825], [], [796], [861], [518, 842], [278, 280], [466], [327, 123], [363], [548, 851, 632], [588], [756], [579], [263], [577], [52], [722], [715], [554], [45], [110], [546, 714, 402], [922], [902], [608], [673, 968, 526, 504, 508], [], [255], [173], [986], [382], [568], [496], [87], [293], [468, 919], [608], [416], [372], [979], [376], [121], [815], [451], [768], [32, 30], [265], [715, 744], [114], [405], [652, 413], [704], [427], [229], [977, 775], [853], [809, 618, 659, 925], [750, 917, 697, 921], [171], [654], [951], [480], [], [973], [894], [354], [52], [341], [738], [793], [241], [96], [742], [677], [849], [396], [996], [572], [215], [295], [395], [679], [274], [245], [118], [816], [435, 631], [21], [892], [560], [], [144], [834, 906], [914], [533], [199], [576], [432], [71], [982], [186], [641], [165], [293], [391], [], [251], [902], [937, 939, 943, 950, 951, 954], [510], [290], [399, 728], [278], [587], [600], [397], [951], [248], [216], [625], [676], [], [840], [215], [900], [47], [167], [391], [698], [787], [302], [165], [604], [496], [290], [801], [715], [508], [516, 520], [39], [624, 453, 454], [903], [788], [373], [801, 329, 842], [679], [110], [430], [], [301], [289], [942], [705], [206], [810, 508], [985], [979], [246], [922], [820], [485, 754], [], [146], [269], [591, 434], [570], [], [49], [310], [455], [31], [658, 911], [198], [259], [943, 931, 933], [525], [438], [513], [691], [744, 657], [649, 487], [193], [535], [809, 909, 923, 926], [814], [635], [135], [953, 954], [465], [260, 232], [242], [685], [610, 836, 837], [516], [948], [373], [797], [], [61], [912], [897], [763], [], [191], [532], [931], [975], [162], [494], [644], [737], [629], [791], [801], [466], [532, 762], [716], [], [525], [339], [542], [521], [175], [339], [999, 159], [267], [326], [892], [880], [561], [131], [836, 837, 841, 610], [953], [218], [4], [581], [432], [470], [208], [4], [831], [668], [113], [107], [690], [579], [995], [106], [407], [425], [405], [538], [118], [368], [78], [434], [808, 642], [], [], [967], [331], [], [267], [234, 165], [199], [387], [444], [892], [883], [899], [41], [978], [104], [211], [51], [608, 630], [488], [648], [873], [199], [], [630], [127], [], [88], [363], [536], [888], [239], [802, 621], [483], [752], [532], [218], [564], [884], [655], [637], [38], [877], [877], [170], [611], [969, 659], [214], [320], [808], [692], [419], [591], [132], [167, 173], [434], [99, 100], [927], [95], [], [112], [449], [], [301], [74, 815, 309], [332], [508], [116], [20], [632, 851, 548], [81], [916], [15], [725], [194], [208], [77], [677], [355], [136], [779], [375], [298], [135], [212], [866], [410], [867], [190], [349], [507], [199], [140], [356], [222], [614], [615], [391], [964], [792], [353, 343], [851], [37], [831], [390], [980], [693], [93], [986], [471], [419], [], [371], [353], [238], [744], [], [192, 185], [729], [103], [768], [264, 171], [589], [994], [673, 664, 526, 527, 632, 761, 508], [286], [305], [733], [], [483], [237], [67], [], [379], [33], [7], [476], [378], [588], [746], [726], [234], [664, 851], [428], [116], [914], [759], [], [983], [172], [27], [410, 309, 599], [148], [285], [234, 177], [], [792, 834, 630], [89], [158], [752, 852], [741], [836, 837], [158], [378], [152], [669], [369, 381], [601], [231], [648, 720], [690], [608], [210], [344], [733], [610], [699], [512], [481], [], [340], [569], [], [], [731], [316], [44], [239], [455], [261, 230], [765], [], [945], [808], [], [662], [206, 221], [661], [650], [247], [810, 878], [606], [886], [208], [44], [], [133], [248], [679], [188], [], [587, 784, 477], [638, 639], [908, 404], [389], [503], [428], [303], [9], [994], [995], [162, 167], [501], [688], [974], [693], [923, 982, 762], [445], [563], [402, 546], [997, 947], [406], [144], [476], [354], [], [307], [518, 671], [65], [459], [831], [707], [15], [159], [129], [79], [207], [716], [483], [198], [171], [896, 804], [392], [223], [197], [961, 659], [258], [672, 797], [834, 457, 527, 664, 508], [410], [205], [775, 699], [486], [510], [806, 911, 496], [183], [524], [893], [829], [376], [11], [317], [976, 977, 978], [272], [529], [161], [727], [904], [474], [314], [780, 724], [935], [354], [863], [987, 998], [95], [], [948, 949], [], [836, 638, 639], [571], [49], [342], [178], [], [195], [292], [801], [515, 808], [191], [879], [235], [574], [593], [66], [505], [225], [], [907, 966], [625], [180], [466], [639], [380], [426], [945], [37], [161, 162], [103], [751], [611], [936], [759], [701], [943], [629], [714], [389], [224], [815], [601], [819], [655], [301], [408], [740], [831], [282], [984], [], [389], [564], [25], [960], [474], [688], [957], [97], [312], [443], [846], [941], [262], [492], [985], [414, 608], [507], [578, 495, 601], [275], [205], [588], [193, 187], [89], [224], [890], [497], [583], [239], [990], [367], [], [543], [480], [989], [520], [484], [249], [593], [349], [344], [897, 799], [968, 504], [901], [829], [508], [821], [364], [165], [871], [480], [212], [], [499], [617], [400, 667], [222], [338], [413], [], [290], [], [897], [], [397], [286], [721, 831], [952], [112], [582], [558, 541, 542], [], [483], [449], [], [980], [332], [136], [56], [716], [690, 345], [835], [768], [558, 432, 889], [141], [444], [270], [637], [749], [123], [572], [538, 727], [952], [600], [33], [419], [286], [186], [397], [797], [495], [997], [692, 623], [805], [2], [609], [793], [698], [991], [217], [259], [583], [273], [900], [500], [857], [461], [996], [7], [3, 147], [110], [752], [355], [757], [99], [646], [719], [378], [293], [773], [2, 3], [531], [896, 651, 827], [774, 608, 610], [288, 290], [716], [], [673, 526, 527, 782, 664, 508], [418], [803], [768], [348], [640], [365], [220], [402], [378], [], [948], [], [635], [291], [944], [730], [1], [308], [112], [165], [616], [254], [707], [532, 762, 572], [305], [209], [], [679], [733, 858], [], [965, 923], [800], [604], [104, 489], [441], [436], [465, 597, 734], [280], [164, 165], [480], [997], [402], [70], [767], [454], [171], [391], [282, 539], [474], [526, 527, 664, 508], [533], [595], [573], [511], [908], [176], [915], [197, 199], [530, 409], [], [810, 878], [783], [666], [538], [435], [850], [609], [71, 119], [], [], [671], [411], [535], [395], [231], [234], [249], [666], [888], [610], [997, 947], [314], [167], [557], [315], [473], [968, 504], [502], [3], [409, 892], [335], [859], [238], [581], [748], [450], [397], [737, 455, 440], [], [284], [727], [556, 827], [171], [480], [35], [384], [556], [940], [611], [447], [806], [463], [994], [594], [909, 567, 478], [999], [226], [35, 876], [73, 77], [127], [889], [69], [435], [237, 158], [466], [766], [308], [759], [994], [774, 655, 825], [698], [124], [538], [731], [484, 871], [30], [561], [441], [161], [832], [769], [898, 836, 837], [880, 518], [392], [51], [659, 923, 928, 945, 959], [280], [207], [429], [314], [566], [451], [547], [686], [972], [442], [473], [851, 633], [882], [235, 676], [157], [927], [972], [658, 824], [206], [960], [597], [], [620, 508], [460], [473], [718, 975, 437], [947], [615], [336], [815], [974], [707], [858], [849], [398], [780, 914], [363], [239], [908], [514, 788], [147], [25], [547], [697], [131], [600], [354], [165], [772], [572], [175], [399], [719], [338], [300], [655, 630], [968], [337, 943], [581, 479], [899], [815], [424], [330, 331], [48], [515, 420], [952], [288], [771], [341], [842], [562], [989], [], [730], [892], [324], [268], [974], [571], [550], [651, 412, 60, 868, 616], [770], [233], [758], [863], [618], [730], [842], [404], [264], [453], [272], [342], [294], [239], [114], [487], [824, 678], [608], [927], [969], [642], [], [542], [453], [880], [436], [355], [787], [128], [999, 700], [627], [581, 479, 817], [], [614], [873], [548], [543], [858], [465], [57], [29], [442, 858], [233], [988], [323], [255], [90], [630], [738], [170], [456], [7], [52], [868, 651, 659], [560], [685, 785], [], [383], [273], [339], [425], [609], [624], [968, 911, 849, 505], [74], [617], [966, 572], [317], [289], [610], [517, 600], [788], [989], [171], [11], [911, 658], [334], [187], [791], [458], [86], [], [333], [288], [949], [619, 846, 851], [641], [248], [733], [180], [667], [674], [639], [667], [230], [75], [479], [231], [747], [591], [157], [172], [410], [906], [677], [766], [], [420], [483], [], [26], [902], [113], [989], [270, 272], [597], [799], [86], [19], [456], [857], [396], [962, 923], [952], [500], [321], [526], [41], [679], [467], [334], [460], [573], [892], [607], [841], [470], [382], [918], [879], [133], [316], [581, 751, 468, 895, 479], [591], [545], [806, 459], [289], [784], [582], [130], [311], [214], [259], [932], [251], [358, 359], [], [470], [], [], [], [], [804, 503], [606], [32], [703], [612], [407], [305], [602], [681, 810, 620, 526, 508], [900], [], [339], [418], [433], [765], [], [618], [609], [932], [937], [535], [869], [981], [610], [122], [627], [], [118], [542], [175], [295], [692, 487], [56], [599], [793], [765], [23], [323], [551, 748, 629], [801, 570], [], [342], [69], [540], [259], [998], [], [797], [252], [568], [834], [], [96], [82], [486], [471], [320], [702], [921], [525], [], [690], [51], [113], [865], [919], [498], [], [325], [297], [606], [611], [496], [858], [136], [740, 756], [681, 620, 664, 526, 527, 632, 508], [548, 851], [652, 413], [886], [423], [857], [218, 156], [925], [], [353], [236], [216], [786], [488], [171, 172], [], [], [116], [666, 924], [649], [615], [686], [296], [242], [228], [668], [940], [891], [819], [279], [712], [459], [822], [777], [276], [702], [898], [884], [326], [472], [630], [932], [453], [130], [917], [], [555], [173], [973], [225], [931], [683, 594], [], [380], [192], [966], [138], [908], [53], [], [74], [144], [814], [516], [73], [845], [770, 608, 610], [298], [618], [104], [289], [850, 855], [484], [579], [57], [708, 887], [320], [929], [603], [109, 973], [5], [73], [668], [615, 652, 465, 413], [568], [649], [], [869], [105], [531], [135], [963], [366], [852], [468], [701], [740, 519], [985], [332], [524], [346], [336], [178], [2], [506], [300], [83], [251], [435, 151, 156], [853], [196], [434], [405], [911], [789], [251], [660, 557], [143], [306], [428], [], [619], [978, 638, 639], [156], [622], [387], [928, 960], [908], [508], [850], [436], [822], [298], [952], [408], [], [47], [573], [79], [168, 159], [633], [], [297, 295], [], [22], [], [], [512], [308], [433, 638, 639], [177], [32], [], [51], [105], [], [908], [189], [453, 454, 624], [816], [626], [975], [170], [825], [801, 838, 570], [749], [480], [510], [270], [476], [941], [900], [972, 437], [474], [170], [703], [330], [617, 823], [648], [910, 567], [953], [306], [104], [548, 453, 553, 851], [458], [309, 599], [273, 274], [341], [727], [149], [956], [477, 868, 623], [495], [792], [899], [674], [676], [677], [7], [], [72, 74], [90], [860], [677], [779], [750, 211], [868], [78], [189], [527], [253], [291], [385], [434], [687], [146, 147], [41], [548], [110], [757], [221], [692], [812, 908, 404], [834, 806, 630], [257, 222], [611], [831], [983], [281], [354], [650, 526], [355], [281], [33], [652, 465, 570, 413], [515], [385], [547], [614], [], [144], [169], [845], [915], [244], [], [40, 46], [662], [184], [958], [355], [304], [212], [63], [722], [819], [308], [882], [533], [467, 341], [659, 923], [250], [852], [], [979], [212], [939], [999, 905, 700], [610, 678], [226], [14], [99], [30], [751, 479], [453], [318], [830], [971, 502], [777, 524, 461, 596], [978, 445], [646], [911], [744, 657, 812], [257], [898], [275], [131], [547], [], [626], [335], [981], [410], [266], [343], [783], [434], [140, 142], [], [445], [557, 468, 733], [592], [738], [364], [508], [877], [448], [377], [233], [376], [627], [], [973], [997, 947], [575], [], [], [277], [351], [746], [836, 837, 605], [788], [284], [996], [542], [487], [550], [508], [69], [886], [528], [83], [583], [841], [673, 681, 620, 526, 527, 664, 508], [197], [540], [774, 977], [902], [863], [], [], [205], [], [881], [729], [463], [968, 504, 505], [271, 274], [191], [864], [], [264], [901], [], [762], [843], [853], [822, 541, 542], [], [214], [69], [264], [706], [418], [56], [53], [383], [504], [869, 445, 638], [461], [213], [], [709], [879], [554], [93], [333], [308], [958], [738], [479], [50], [861], [615], [833], [], [987, 998], [805], [870], [], [700], [611], [], [279], [492, 630], [487], [293], [460, 975, 437, 733], [685], [], [410], [854], [196, 198], [594], [656], [677, 587], [450], [858], [561], [773], [407], [691], [32], [490], [343], [769], [276], [144], [621], [452, 911, 658], [453, 885], [169], [308], [296], [407], [595], [453, 454, 921], [816], [476], [933], [576], [563], [369], [615], [842, 977, 978], [290], [440], [347], [206, 221], [785], [20], [919, 920], [488, 679, 714], [967, 968, 504], [814], [317], [681, 810, 620], [41], [190], [791], [431], [315], [766], [294], [942], [563], [788], [784, 923], [207], [113], [722], [111], [756], [475], [573], [520], [170, 177], [], [847], [929], [200, 155], [227], [674], [734], [52], [537, 248], [], [296], [738], [515], [760], [709], [928, 868, 923, 927], [253], [26], [611], [835], [], [305], [642], [188], [482], [], [852], [167], [352], [652], [379], [464], [649], [531], [446], [677], [887], [744, 657, 733], [], [330], [953], [589], [4], [831], [808], [616], [324], [457, 834, 906], [851], [838], [733], [], [155, 204], [794], [29], [709], [249], [364], [421], [583], [1], [820], [151], [341], [521], [296], [], [94], [572], [683], [536], [591], [532, 760], [383], [858], [7], [801, 983], [38, 44], [312, 314], [383], [79], [651], [323], [642, 542], [161], [494, 7], [70, 123], [556], [315], [990], [610, 750, 564, 697], [443, 411], [161], [19], [741], [586], [660], [263], [265], [400], [111], [610, 836, 837], [990], [976, 978], [709], [279], [295], [555], [158], [768, 610], [554], [408], [261], [211], [664], [502], [394], [439], [], [12], [893], [880], [338], [349], [656, 791], [79, 988], [574], [925], [604], [653], [966], [71], [], [721, 750], [], [], [265], [243], [89], [354], [], [], [260], [812], [298], [617], [427, 509], [792], [511], [365], [450], [503], [852], [851], [404], [757], [655], [756], [546, 650, 819, 542], [161], [118], [406], [42], [65], [484], [672], [825], [53], [914], [937], [756], [941], [769, 777], [498], [241, 238], [311], [90], [162], [534], [952], [185], [647], [393, 973], [141], [590], [433], [862], [394], [309], [987], [274], [616], [884, 406], [68], [617, 823], [324], [981, 429], [949, 951], [72], [973], [797], [920], [127], [363], [659], [], [132], [550], [705, 547], [46, 47], [50], [81, 82], [514], [239], [484, 871], [890], [932], [219], [284], [673, 664, 526, 527, 508], [48], [802], [68], [], [777], [954], [425], [775], [696], [450], [834, 906], [846], [544], [599, 951], [15], [835], [136], [205], [929], [931, 587, 792], [328], [829], [919], [984], [976], [453, 409], [396], [547], [683], [565], [260], [116], [187], [423], [697], [671], [54], [], [544], [308], [938], [190], [887, 406], [910], [649], [893], [367], [564], [327], [672], [441], [], [839], [313], [584], [203], [304], [560], [364], [948], [929], [309], [799], [565], [19], [630], [445], [607], [125], [746, 622], [634], [49], [362], [854], [840], [538], [869, 636], [817, 511, 479], [491], [118], [231], [519, 478], [230], [177], [141], [185], [791, 582], [80, 136], [286], [441], [], [517], [284], [421, 539], [83], [985, 324], [], [395], [21], [650, 822], [44, 26], [705, 489], [701], [351], [183], [771], [757], [679], [739], [992, 947], [565], [147], [270], [982], [21], [892], [], [745], [449], [776], [287], [163, 168], [965], [904, 981], [694], [777, 531, 587, 487], [835], [460], [604], [480], [72], [367], [260], [771], [20], [742], [814], [815], [476], [572], [67], [213], [824], [168], [163], [556], [761], [23], [90], [745], [619, 846], [80], [241], [96, 904], [709, 767], [532, 953, 762, 923], [471], [759], [407], [429], [419, 741], [390], [581], [908, 895], [834], [245], [162], [424, 423], [40], [283], [215], [446], [435], [126], [785], [997], [29], [183], [139], [428], [453, 526], [483], [909], [119, 120], [981], [574], [513], [], [154], [1], [248, 249, 250], [835], [], [557, 762, 733, 670], [280], [576], [310], [265, 266], [687], [122], [801, 973, 983], [676], [840], [567], [], [909], [350], [389], [142], [185], [296], [994], [652], [341], [169], [366], [579], [863], [185], [185], [119], [485], [796], [459, 445], [431], [625, 724, 540], [1], [164], [305, 302], [419], [407], [881], [931], [609], [216], [791], [185, 189], [977, 978], [], [500], [916], [218], [407], [778, 526], [631], [242], [489, 695], [882], [488, 671], [728], [982], [360], [177], [983], [354], [324], [463], [734], [513], [479, 661], [659], [899, 647], [702], [280], [492], [68], [655], [565], [410], [182], [560], [668], [207], [367], [549], [772], [], [674], [586], [132], [868, 966, 923], [472], [550], [882], [674], [687], [911, 824], [480, 707], [534], [525], [410, 599], [596], [145], [10], [548], [521], [223], [648], [814], [480], [643], [618, 813, 910], [872, 652, 413], [532], [401], [194], [518, 465, 597, 413], [849], [513], [10], [659], [34], [512], [96], [56], [513, 776, 875, 541], [520], [770, 788, 630, 502], [624], [84], [30], [330], [732], [466], [89], [866, 958], [116], [968, 504], [568, 765], [], [154], [449], [631], [996], [162], [884, 406], [642], [129], [970, 349], [814], [378], [560], [324], [510], [641], [581, 479], [351], [31], [556], [443], [537], [616], [898], [353], [79], [571], [902, 488], [964], [955], [], [418, 563], [945], [112], [], [730], [220], [384], [158], [610], [210], [966, 907, 572], [878], [125], [362], [119, 39], [722], [466], [286], [815], [150], [93], [898, 455], [368], [542], [363], [425], [703], [721], [583], [311], [232, 249], [866, 595], [], [243], [415], [73, 815], [902], [913], [33], [772, 488], [806], [368], [499], [54], [183], [480, 478], [864], [275], [], [593], [293], [666, 924], [850], [614, 696], [819, 854], [456], [495], [546], [560], [22], [217], [28], [616], [993], [974], [925], [218], [], [28], [69], [605], [832], [612], [512], [], [999], [62], [447], [994], [276], [489, 236], [812], [643], [921], [408], [], [], [292], [278], [286], [913], [957], [992, 528], [], [871], [249], [236], [417], [874], [38], [21], [505], [200, 204, 155], [115], [798], [230, 231], [895], [144], [288, 290], [455], [288], [488, 679], [102], [40], [587], [387], [315], [324], [375], [592], [64], [911, 658], [526], [218], [], [978, 638, 639], [539], [], [680, 697], [16], [317], [772], [675], [873], [86], [592], [47], [], [124], [619], [605], [942], [954, 950], [423, 424], [666], [475], [645], [863], [22], [442, 663, 858], [689, 601], [524], [321], [49], [528], [905], [742], [614, 697], [921], [533], [459], [894, 759], [521], [608], [104], [665], [915], [601], [135], [], [253], [356], [897, 851], [], [63], [791], [689], [24], [429], [136], [532], [373], [383], [80], [373], [874, 829], [638, 639], [748], [948, 950, 951], [177], [407], [379], [740], [349], [176], [353], [301], [626], [716], [236], [472], [310], [567], [661], [667], [650, 828], [467], [974], [51], [], [460, 974], [897], [153], [492], [386, 101], [221], [239], [556], [819, 854], [973], [251], [818, 920], [792], [409], [532, 831], [355], [708, 884], [205], [548, 526, 851, 532], [816], [470], [766], [881], [476], [579], [212], [910, 567], [950], [653], [282], [238, 240, 241], [62], [732], [668], [942], [999, 434, 861], [909], [55], [500], [217], [184], [969, 987], [240, 241], [914], [484], [32], [288], [290], [253], [63], [416], [999, 794], [261, 254], [336], [777], [312], [325], [], [245], [990], [231], [537], [774], [180], [582], [271, 277], [573], [455], [], [657], [50], [385], [], [15], [918], [118], [339], [816], [403], [549], [861], [820], [372], [230], [470], [670], [128], [569], [529], [317], [415], [], [553], [], [456], [], [986], [473], [730], [936], [237, 151], [388], [452], [120], [], [672], [260], [630], [685], [922], [931], [938], [], [103], [661], [94], [402], [577], [384], [613], [799], [768], [889], [748], [], [35], [680, 470], [704], [807], [], [], [499], [786], [28], [14], [468], [678], [396], [596], [83], [405], [574, 575], [551], [453], [957], [875], [666], [551], [305], [178], [926], [965], [235], [], [990], [967, 968], [464, 763, 597], [173], [654], [4], [819, 541, 542], [341], [660], [991], [145], [372], [58], [375], [119], [24], [388], [78], [959], [137], [434], [98], [676], [389], [209, 850], [84], [682], [707], [524, 461], [654, 656, 792], [236], [99], [365], [757], [954, 651], [210, 211], [256], [162], [895], [423], [216], [366], [201], [673, 742, 526, 527, 664, 508], [706], [211], [315], [426], [], [209, 805], [255], [654, 733], [], [866], [504], [645], [449, 976], [459, 655], [], [255], [681], [777], [321], [666], [401], [119], [801, 836, 842, 433, 638, 639], [548], [550], [261], [], [869, 652], [], [913], [596], [], [608, 610, 836, 837], [], [986], [845], [], [594], [608, 610, 903], [865], [54], [534], [297, 369], [391, 801, 983], [601], [746], [784], [996], [486], [673, 681, 810, 620, 508], [558], [620], [614], [82], [834, 650, 906], [609, 860], [903, 836, 837, 465, 501, 763], [534], [762], [300], [227], [483], [193], [441, 572], [343], [814], [8, 7], [650], [449, 975], [133], [819, 854], [863], [256], [65], [518], [683], [938], [449], [425], [921], [740], [186], [720], [681, 620], [393], [697, 589], [169], [886], [153], [712], [968, 504], [95], [205], [59], [673, 526, 527, 662, 664, 508], [137], [658], [5], [918], [719], [], [949, 923], [744, 657], [961], [862], [378], [694], [815], [505], [], [86], [268], [397], [375], [306], [742], [902], [778], [605], [252], [518], [196], [863], [581], [388], [232], [378], [947], [764, 413], [251], [475], [], [57], [50], [933], [321], [690], [329], [500], [854], [679], [393], [882], [595], [942], [144], [549], [976], [424, 423], [317], [], [825, 858, 958], [502], [740, 459], [309, 599], [632], [378], [311], [40, 44], [12], [647], [78], [260], [788], [464, 950, 954], [493], [644], [992], [160], [891], [399], [567], [836, 837], [604], [293], [836, 837], [223], [449], [289], [171], [742], [191, 189], [153], [467], [720], [353], [987], [907, 892], [643], [829], [924], [624, 453], [546], [374], [419], [980], [793], [640], [611], [350], [91], [588, 790], [488, 679], [867], [], [573], [809], [708], [378], [252], [130], [168, 211], [740], [824], [816], [382], [329], [987, 998], [42], [536, 517, 510], [149], [288, 290], [337], [334], [901], [521], [], [667], [518], [64], [100], [823], [], [310], [617], [197], [693], [548, 664, 526, 851], [], [547], [], [41, 44], [707, 528], [306], [262], [922], [], [32, 30], [331], [951], [428], [618, 659, 926], [479], [64, 55], [385], [448], [680], [882], [536], [832], [346], [82], [380], [981, 429], [791], [940], [920], [181], [258], [806, 630], [477], [721], [329], [509], [195], [455], [544], [], [222], [929], [516], [383], [43], [814], [472, 693], [652], [13], [528], [419], [300], [207], [417], [140], [581], [70], [746], [61], [579], [703], [88], [680], [778], [159], [330], [178], [809, 659, 923], [621], [265, 266], [], [710], [], [487, 681, 620, 281], [994], [144], [313], [382], [63], [], [524, 461], [38], [400, 667], [336], [943, 923], [869], [303], [486], [265], [479], [838], [967], [929], [579], [578, 689, 562, 601], [186], [878], [395], [801, 983], [352, 351], [541], [283], [235], [111], [842, 978], [898], [389], [144], [711], [65], [386], [947, 997], [382], [707, 484, 914], [468], [581, 734, 479], [643], [767], [546], [756], [607], [336], [755], [630], [619], [985], [578], [546, 650, 819], [277, 278], [929], [613], [592], [820], [313], [250], [604], [740], [319], [391], [366], [327], [45], [248], [560], [507], [908, 404], [859], [605], [55], [410], [522], [92], [195], [314], [], [909, 469], [902], [812], [259, 526], [726], [513], [962], [976, 150], [986], [349], [273], [], [965], [923], [683], [673, 681, 620, 526, 527, 664, 508], [269], [700], [468], [28], [], [679, 488], [601], [383], [347], [416], [762], [763], [518, 616], [44], [970], [116], [], [835], [808], [614], [6], [], [353], [351], [406], [382], [881], [643], [232, 267], [717], [863], [757], [], [326], [101, 386], [308], [548, 851, 598, 632], [346], [923, 959], [271], [771], [864], [561], [563], [682, 698], [487], [125], [543], [432], [543], [110], [968, 849], [890], [399], [524, 461], [381], [], [973], [165], [393], [648], [758], [271], [530], [804, 631], [], [542], [222], [922], [785], [109], [495, 532, 729], [599], [239], [304], [138], [], [266, 570], [137], [617], [], [949], [299], [579, 881], [327, 328, 112], [99], [503], [954], [780], [806], [683, 819], [310], [813], [962], [107], [488, 695], [990], [621], [770], [21], [66, 68], [361], [132], [83], [888], [912], [834, 755], [304], [400], [864], [296], [649], [83], [568, 869], [418], [532], [296], [393], [527, 664], [187], [564], [926], [394], [154], [453, 454], [730], [278], [879], [919], [], [354], [202], [1], [28], [802], [264], [553, 526], [989], [581], [984], [810, 508], [195], [163], [65, 973], [383], [315], [512], [293], [824], [629], [223], [673, 742, 681, 620, 526, 527, 664, 508], [420], [139], [46], [330], [325], [910], [832], [782, 851], [129], [237], [301], [500], [854], [180], [774], [955], [507], [898, 711], [486], [935], [524, 461], [226], [405], [554], [435, 876], [901], [532, 762, 923, 572], [800], [300], [82], [973], [18], [893], [584], [913], [902], [156], [296], [247], [798], [653], [755], [893], [405], [534, 729], [796], [611], [457], [327], [56], [700], [680], [889], [806], [322], [825], [412], [36], [33], [673, 619, 526, 527, 782, 846, 664, 508], [407], [994], [564], [907], [847], [406], [225], [324], [806], [393], [122], [501, 568], [640], [155], [708], [331], [312], [309, 599], [41], [174], [604], [707], [286], [972, 976], [760], [946], [930], [849], [558], [589], [594], [60], [817, 479], [342], [580], [651], [56], [175], [733], [665], [7], [445], [444], [385], [376], [721, 636, 831], [690, 471], [258], [843], [725], [575, 479], [172], [954], [577], [70], [373], [409], [198], [774, 655, 703], [770, 543], [512], [619, 846], [610], [783], [907, 883, 532], [245], [857], [30], [508], [545], [269], [967, 504], [232], [223], [640], [], [192, 185, 186], [275], [45], [463], [392], [209], [337], [947], [], [100], [221], [685], [458], [771], [914], [14], [878], [325], [737], [281], [308, 79], [380], [585], [601], [281], [896, 435, 794], [761], [23], [666], [642], [155], [375], [681, 810, 620, 508], [525], [82], [995], [973], [415], [155], [912], [], [809], [895], [781], [147], [16], [860], [830], [239], [82], [297], [297], [29], [916], [82], [487], [808], [739], [921], [495, 532, 725], [496, 765], [164], [514], [357], [174], [559], [820], [477], [661], [436], [834, 630], [448], [248], [27], [540], [523, 414], [175, 189], [1], [985], [128], [646], [235], [722], [553], [661], [801], [940], [90], [], [586], [356], [341], [981], [579, 401, 881], [318], [72, 815], [71], [661], [756], [310], [293], [354], [438], [181], [23], [828], [989], [578, 982], [467], [289], [595], [569], [788], [370], [44], [241], [660, 733], [741], [], [800], [669], [850], [973, 983], [673, 681, 620, 526, 664], [348], [448], [592], [890], [81], [845, 720, 692], [341], [507], [954], [367], [364], [158], [697], [35], [520, 516], [727], [243], [489, 270], [546, 650, 819, 542], [566], [75], [615], [140], [706, 765], [236], [472], [83], [987, 998], [533], [], [319], [658], [832], [111], [605], [543], [973, 801, 983], [124], [365], [320], [616], [526, 527, 782, 664, 673, 508], [], [736], [447], [621], [830], [979], [145], [420], [653], [357], [355], [], [109], [], [298], [612], [642], [], [15], [479, 511], [42], [7], [897], [794], [705, 547], [571], [428], [233], [916, 664], [681, 620, 508], [46], [522], [229], [124], [609], [924], [399, 501], [156], [926], [258], [688], [808], [411], [751, 479], [510], [651], [302], [851], [], [864], [787], [103], [652], [], [], [40], [280], [606], [836, 837, 869], [868, 987, 809, 923], [975], [850], [551, 629], [729], [975, 703], [155, 204], [887], [981, 781], [703], [920, 717], [920, 414], [872, 818, 759], [816], [673], [669], [339], [636], [498], [392], [545], [592], [34], [38], [481, 453, 485, 632], [966], [429], [], [388], [], [836, 837, 775], [617], [464], [255], [375], [115], [195, 790], [479, 656], [213], [603], [711], [293], [822, 871], [35], [273], [875], [314], [], [86], [144], [856], [548, 851, 598, 632, 281, 285], [908, 718, 888], [659], [572, 966], [213], [849], [905], [215], [805], [872], [496], [766], [713], [36], [304], [821], [724], [182], [88], [652], [846], [150], [375], [71], [311], [725], [189, 191], [97], [542], [650, 818, 819, 822], [], [689, 501], [909, 926], [400, 667], [214], [103], [132], [191, 189], [950, 951], [259], [489], [577], [769], [617, 731, 823], [113], [927], [456], [103], [528], [], [], [203], [673, 508], [222], [822], [59], [270], [300], [111], [455, 907, 440], [766], [298], [835], [711], [670], [264, 253], [762, 532], [222], [589], [901], [15], [63], [424], [714], [292], [232, 217], [807, 654], [130], [984], [919], [928, 949], [129], [757], [371], [394], [84], [738], [421], [980], [341, 719], [388], [454, 487, 728], [229], [856], [495], [275], [812], [], [636], [64], [], [152], [95], [649], [92], [92], [710], [], [439], [908], [616], [694], [890], [822, 542], [770, 478], [270], [], [], [688], [578, 601, 982], [772], [243], [180], [482], [804], [417], [134], [526], [838], [987, 998], [486], [800], [33], [48], [904], [567, 827], [645], [236], [223], [910], [798], [842], [697], [904], [784], [56], [442], [949], [273], [67, 68], [948], [], [56], [152], [511, 479], [135], [], [548, 851], [228], [701], [451], [322], [209, 805], [520], [218], [173, 180], [437], [56], [14], [775], [871], [75], [], [920], [499, 955], [997], [874], [958], [], [613, 526, 527, 664], [858], [182, 185], [401], [238, 239], [156], [49, 50], [591], [730], [33], [682], [115, 327], [469], [66, 68], [174], [], [404], [134], [636], [284], [995], [], [571], [946], [642], [517], [457], [258], [601], [50], [903], [590], [175], [649], [551], [654], [3], [], [], [642], [769], [690], [753], [981], [440, 441, 572], [581], [877], [43], [593], [372], [197], [727], [347], [275], [887], [211, 210], [834, 906, 762, 638], [344], [773], [719], [], [856], [252], [439, 461, 465], [849, 505], [715, 251], [789], [659, 760], [967], [505], [], [511], [34, 977, 978], [765], [233], [132], [539], [175], [736], [893], [119], [22], [447, 600], [], [829], [459, 445], [555, 652], [620, 681, 470], [772, 679], [463], [208, 282], [785], [121], [195, 151], [466], [939], [828], [643], [664, 527, 761], [123], [674], [715, 764], [77, 319], [85], [834], [677], [801, 842, 433, 983], [951], [537], [722], [104], [713], [954], [267], [154], [678], [443], [247], [945], [814], [495], [], [701], [590], [946], [996], [], [59], [137], [230, 231], [], [659], [923], [591], [713], [795], [328], [], [656, 479], [735], [201], [485], [923], [298], [31], [624, 453, 454], [759], [994], [234], [431], [922], [562, 663, 442], [528], [149], [208], [115], [699], [660], [452, 793], [634], [118], [672], [], [779], [999], [190], [192], [858], [171], [], [], [143], [146], [518], [877], [87], [263], [104], [742, 553], [356], [346], [767], [295, 297], [485, 664, 526, 527, 851, 632], [499], [576], [529], [774, 655, 836, 837, 636], [618], [156], [133], [793], [796], [24], [], [41], [251], [196], [696], [884, 406], [], [309, 599], [579], [892], [124], [460, 718, 468, 733], [515], [618], [997], [260], [280], [12], [714], [904], [879], [41], [149, 150], [], [], [145], [685], [311], [800], [817, 751, 479], [369], [723], [896], [734], [356], [88], [692], [633], [957], [393], [862], [905], [578, 585], [172], [566], [126], [515], [323], [113], [230], [814], [688], [250], [191], [70], [318], [429], [84], [584], [81], [511, 479], [29], [308], [147], [682], [669], [243], [934, 567], [904], [858], [331], [750, 726], [906], [440], [939], [164], [91], [28], [154], [740, 477], [882], [366], [574], [], [699], [661], [662], [521], [472, 693], [155], [896], [774, 655], [931], [461], [327], [915], [89], [734], [246, 159], [488], [358], [546, 921], [49], [788], [], [446], [59], [919], [], [63], [505], [478], [], [832], [178], [64], [528], [363], [674], [427, 756], [752, 852], [456], [356], [], [822], [581, 479], [690], [], [864], [997], [849], [35], [33], [233], [915], [470], [987], [697], [195, 245], [719], [857], [248, 249, 250], [896, 859, 495, 827], [281], [668], [808], [198], [177, 170, 676], [104], [598], [477], [735], [135], [415], [361], [5], [86], [], [156], [600, 769], [777, 623], [], [385], [992], [314], [986], [964], [684], [642, 542], [710], [349], [267], [194], [400], [659], [565], [945, 943], [], [279], [12], [860], [466], [769, 455], [274], [967], [74], [281], [576], [915], [821], [619, 846], [52], [602], [911], [455], [993], [], [386], [218, 215], [841], [971], [267], [378], [166], [977, 978], [], [382], [474], [717], [946, 309], [], [385], [252], [977, 978], [909], [29], [137], [554], [564], [670], [67], [137], [612], [], [183], [25], [751], [46], [146], [], [803], [854], [75], [], [611], [498], [329], [276], [309], [723], [255], [334], [568], [263], [131], [456], [63], [73, 74, 815], [802], [557, 442], [585, 838], [605], [834, 906], [996], [776], [564], [349], [393], [832], [], [395], [673, 681, 526, 527, 664, 508], [20], [711, 563], [256, 676], [404], [235, 658], [196, 198], [907, 572, 966], [322], [982], [349], [175], [109], [896], [761], [514, 788], [400, 667], [879], [434], [], [859, 651, 760, 827], [], [695], [585], [851], [245], [], [430], [479], [663], [723], [648], [257, 249, 222], [276], [178], [774], [690], [937], [923], [802], [191], [220], [306], [508], [54], [237], [785], [452], [349], [914], [], [], [581, 479], [136], [214], [951], [178], [943], [455], [24], [105], [497], [298], [333], [596, 763, 764], [], [918], [227], [576], [557], [331], [905], [458], [489], [709], [490], [547], [611], [306], [905], [967, 968], [79], [944], [323], [199], [819, 566], [734], [633], [680], [942], [410], [342], [950], [827], [503], [339], [624, 453], [236], [134], [150], [879], [670, 628], [367, 379], [4], [801], [246], [], [103], [638, 639], [28], [150], [923], [855], [323], [52], [289], [895], [950], [], [2, 148], [473], [750, 282], [789], [630], [459], [717], [83], [55], [83], [842, 457], [168, 159], [738], [741], [724], [243], [655, 445, 638, 639], [701], [64], [786], [532, 762], [323], [504], [265], [360], [600, 825], [403], [945], [89], [361], [673, 508], [170], [706], [570], [481], [13], [488, 843], [898], [22], [340], [714], [444], [696], [219], [416], [742], [718, 536], [960], [65], [939, 943], [770, 806], [570], [968, 504, 415], [], [91], [821], [39], [503], [616], [834, 522], [844], [507], [430], [577], [93], [168], [640, 868], [649, 825], [45], [207], [641], [473], [12], [325], [192], [712, 790], [467], [526, 786], [], [881], [958], [164, 166, 167], [], [814, 970, 484], [685], [181], [830], [610, 838], [451], [181], [180], [756], [103], [320], [683], [2, 3], [553], [14], [306], [831, 968, 846, 619, 504], [394], [703], [926], [756], [379], [458], [336], [853], [267], [141], [679], [25], [273], [165], [69], [809], [113], [114], [148], [], [340], [137], [291, 340], [149], [9], [574], [781], [834, 400, 667], [196], [], [954], [436], [695], [945], [767], [350], [140], [698], [292], [810, 878], [315], [243], [774], [780], [], [232], [723], [993], [351], [6, 842], [658], [838, 434], [], [40], [77], [492], [48, 49], [23], [], [512], [161], [121], [351], [], [748], [32], [184, 189], [812], [315], [274], [106], [684], [367], [577], [121], [628], [444], [281, 282], [263, 230], [724], [213], [678], [388], [573, 518], [679], [682], [424], [930, 963, 868, 923, 813, 415], [473], [886], [421], [300], [908], [174], [529], [172], [614], [726], [605], [225], [453], [497, 406], [936, 909, 926], [791], [243], [66], [337], [684], [767], [299], [840], [74], [834, 630], [188], [971], [382], [669], [276], [30], [787], [166], [989], [347], [661, 479], [470], [625], [908, 404], [358], [519, 907], [147], [205, 197], [528], [480], [734], [549], [421], [588], [170], [229], [885], [826], [570], [596], [539], [908], [425], [934], [700], [82], [118], [184], [43], [87], [487], [80], [426], [221], [89], [529], [475], [601], [341, 342], [142], [553], [788, 502], [428], [], [162], [490], [419, 720], [339], [40, 46], [215], [44], [965], [299], [721], [888], [730], [530], [459], [96], [481, 482], [63], [855], [342], [562], [489], [857], [589], [590], [316], [763], [335], [412], [72], [550], [425, 912], [943], [575], [96], [332], [536], [285], [], [917], [563], [921], [385], [754], [132], [183], [834], [327], [962], [230], [393], [189], [339], [918], [977], [272], [914, 536], [98], [510], [273], [116], [510], [521], [707], [341], [488, 843], [886], [555], [200, 244], [76], [365], [609], [], [898], [564], [79], [994], [993], [78], [207], [522], [249], [14], [998], [8], [10], [], [2, 3], [], [298], [710], [501], [973, 991], [331], [41, 26], [690], [], [644], [128], [660], [602], [524, 461], [186], [685], [428], [384], [754], [386], [72], [911, 824], [], [488], [397], [155], [980], [208], [], [], [436], [594], [79], [491], [956], [777], [395], [788], [777], [112], [751], [838, 692], [200], [688], [26], [350], [582, 412], [905], [143], [685], [790], [803], [424], [970, 979], [845], [438], [87], [836, 837], [130], [466], [604], [726], [558], [468], [820], [836, 837, 617], [745], [294], [949, 923], [185], [366], [184], [265], [625], [987], [359], [108], [], [443], [323], [610, 899], [911], [245], [952], [180, 435], [732], [], [378], [815], [213], [806], [429, 981], [709], [744, 657], [556], [238], [548, 851], [179], [], [254], [72], [814], [263], [523, 728], [977, 978, 853], [547], [120], [7], [127], [154], [223], [39], [656], [192, 852], [192], [0], [112], [209], [538], [54], [78], [916], [362], [688], [561], [256], [], [468, 718, 839], [58], [14], [693], [842], [530], [209, 210], [860], [869], [737, 455], [537], [228], [118], [907, 818], [689], [792], [704], [688], [385], [736], [897], [823], [895], [986], [375], [200], [336], [280], [609], [596], [119], [93], [228], [119], [836, 837], [692, 943], [9], [427], [614], [558], [205], [610, 796], [941], [787, 524, 461], [250], [781], [145], [664], [884], [251], [770], [], [], [], [482], [950], [477], [437], [], [], [775], [], [481], [206], [966], [298], [86], [750], [696], [967, 968], [743], [376], [531], [818, 862], [562], [951, 503, 572], [559, 799], [842], [644], [301], [774, 414, 842, 464, 978], [349], [252], [34], [348], [361], [57], [154], [], [934, 415], [261], [], [879, 344], [478], [], [207], [698], [970, 979], [565], [900], [632], [453], [358, 359], [481], [], [46], [393], [43], [363], [359], [921], [678], [515, 880], [746], [721], [37], [670], [859], [311], [46], [426], [73], [44, 26], [611], [136], [281], [172], [489, 10], [622], [69], [895], [231], [35], [422], [687, 406], [616], [47], [353], [99], [581], [760], [514, 774, 523, 655], [636], [110], [918], [547], [206], [511], [306], [919], [650], [838], [606], [555], [], [210, 164], [], [834], [157], [480], [259], [852], [769], [559], [216], [], [244], [], [949], [246], [652], [593], [418], [344], [], [495], [730], [275], [151, 508, 158], [244], [437], [932], [40, 46], [836, 837, 841, 970], [279], [890], [522], [32], [49], [241, 238], [641], [646], [548, 851], [], [166], [10], [699], [613], [170], [795], [535], [369, 379], [136], [196], [387], [362], [600], [9], [281], [], [404], [177], [192], [874], [], [281], [640], [231], [128, 135], [540], [707], [636], [], [], [532], [94], [996], [310], [241, 238], [948], [259], [376], [356], [953], [89], [396], [513], [870], [915], [292], [163, 168], [297], [271, 272, 273], [906], [322], [261], [607], [], [104], [67], [985], [810, 878], [992], [215], [298], [315], [283], [463], [647, 828], [770], [728, 735], [213], [301], [314], [674], [779], [400, 667], [720], [564], [700], [733, 919, 920], [640], [964], [90], [574], [248], [772], [939, 942, 948], [], [13], [736], [377], [673, 681, 620, 664, 526, 527, 782, 508], [503], [31], [472], [754], [479, 817], [219], [772, 711], [384], [892], [404], [286], [115], [130], [988], [439], [1], [], [285], [711], [524, 461], [448], [483], [528], [172], [515], [951], [608, 487, 824, 502], [], [851, 892], [934, 923], [258], [515, 643], [], [673, 592], [], [881], [302, 303], [697], [945], [198], [880], [20], [758], [306], [283], [936, 923], [612], [743], [50], [502], [673, 810, 526, 527, 782, 664, 508], [518, 465, 597, 413], [285], [19], [518], [293], [185], [773], [503], [251], [908, 895], [537], [], [715, 524, 461, 883], [], [], [343], [722], [667], [286], [280], [], [225], [518], [236], [487], [989], [463, 758], [25], [353], [607], [801, 983], [758], [809], [539], [161], [313], [], [12], [209], [973], [17], [705, 547], [143], [948], [171], [685], [125], [836, 837, 650, 819], [], [75], [], [746], [953], [113], [843], [279], [928], [749], [761], [600], [738, 580], [165], [604], [161], [333], [66], [524], [745], [84], [674], [55], [353, 350], [142], [722], [747], [582, 617, 728], [801], [617, 845], [236], [68], [507], [4], [905, 750, 846], [419], [998], [141], [118], [476], [736], [357], [947], [929], [731, 762], [489], [106], [282], [928, 960, 966, 923, 572], [610, 731], [728], [888], [649], [869], [], [340], [832], [987, 121], [], [19], [604], [303], [4], [700], [541], [662], [168, 210], [136], [679], [607], [], [787], [18], [489], [623], [744, 657, 517], [394], [89], [462], [934], [345, 690], [910], [604], [449], [645], [645], [801], [84], [804], [834, 655, 975, 630], [168], [18], [690, 346], [161], [425], [775], [608, 584], [292], [260], [24], [158], [248, 249], [964], [77], [642], [95], [20], [805], [961], [884], [96], [551], [732], [424], [771], [965], [551], [500], [829], [181], [887], [866], [999, 218, 700], [831], [538], [676], [981], [102], [43], [312], [469], [702], [188, 189], [901], [611], [738], [510], [164], [819], [912], [142], [832], [48], [646], [811], [81], [965], [460], [866], [32], [511, 479], [577], [671, 444], [92, 95], [371], [404], [76], [971], [294], [694], [988], [530, 619, 846], [344], [417], [52], [364], [], [886], [90], [299], [489, 600], [861, 435, 285], [991], [151], [610, 903], [286], [948, 572], [557, 733], [690], [582, 879, 692, 954, 955], [684], [820], [425], [376], [410], [404], [759, 447], [377], [814], [795], [738], [181], [263], [50], [619], [256], [909], [252], [641], [801], [673, 526, 527, 664, 508], [173], [949], [], [800], [962, 923], [478], [123], [722], [135], [369], [28], [323], [132], [316], [69], [175], [656], [987], [85], [889, 486], [738], [340], [331], [], [196], [214], [144], [], [610], [932], [962, 923], [280], [252], [188, 190], [532], [676], [360], [], [300], [412], [589], [879, 775], [538, 727], [615], [574], [617], [435, 789], [654], [981, 429], [746], [6], [856], [187], [578], [177], [402], [489], [108], [642], [847], [288, 290], [], [173], [241], [249], [288], [395], [33], [247], [958], [923, 806, 936], [632], [258], [43], [881], [803], [455], [585], [], [586, 652], [291], [0], [991], [842, 977, 978], [], [356], [254], [], [779], [688], [668], [677], [217], [327], [976, 978], [254], [316], [497], [44], [373], [177], [647], [11], [363], [162], [54], [114], [75], [32, 28], [660], [61, 62], [929], [352], [561], [9], [491], [137], [696], [267], [98], [464], [210], [], [259], [892], [350], [995], [743], [426], [44], [264], [585], [744, 657], [691], [669], [357], [892], [944], [230, 231], [711], [], [322], [407], [798], [948], [54], [329], [52], [986], [], [745], [236], [873], [682], [772], [682], [705], [498], [846], [], [], [393], [672], [717, 479], [436], [743], [765], [253], [608], [425], [148], [334], [193, 187], [0], [988], [201], [258], [680], [783], [808], [805], [177], [94], [788], [858], [952], [701], [62], [787], [349], [600, 894], [200, 175], [425], [], [564], [351], [433], [169, 172], [611], [19], [110], [923, 925], [271], [695], [607], [972], [44], [674], [673, 526, 527, 664, 508], [222], [547], [809], [], [937], [423], [631], [966, 459, 445], [299], [449], [], [357], [], [343], [770], [105], [515], [809], [88], [515], [128], [630], [999, 191], [379], [750, 281], [746], [245], [102], [839, 975], [877], [884], [917, 453, 454], [391], [81], [330, 331], [], [168, 159], [], [479], [587], [509], [72], [574], [299], [922], [711], [337], [113], [182], [725], [988], [346], [452, 968, 504], [626, 893], [316], [638, 639], [880], [641], [922], [933, 923], [96, 489, 93], [587], [892], [300], [281, 283], [31], [248], [829], [192], [], [403], [767], [40, 489, 46], [521], [382], [84], [966, 907], [656, 627, 468], [810], [492, 750, 831, 414], [929], [486], [967, 968, 504], [281], [376], [504], [], [873], [418], [608], [553, 493], [443], [752, 852], [604], [110], [306], [850], [955], [953], [262], [531], [189], [279], [677], [90], [209], [742, 662], [211], [737], [64], [785], [681, 810, 620], [611, 954], [203], [845, 966], [714], [793], [491], [904], [474], [309], [731], [854], [211], [671, 535], [], [877], [768, 414], [822], [683], [462], [975], [529], [620], [497, 538], [263, 231], [681, 620], [283], [364], [535], [889], [145], [159], [746], [301, 310], [198], [240], [889], [416], [469], [839, 718, 978, 821], [23], [553], [796], [970, 671], [962, 937, 923, 959], [698], [499], [166], [932], [689, 601], [545], [96], [], [323], [925], [278], [95], [309], [500], [222], [896, 999, 861], [488], [324], [593], [], [463], [613], [63], [180], [685], [637], [38], [341], [542], [343], [988], [656], [130], [681, 620], [963], [648], [308], [939], [125], [301], [791], [569], [425], [309, 599], [529], [228], [431], [182], [178], [450], [153], [419], [145], [301], [459, 655, 638, 639], [788], [908, 895], [719], [221], [546, 650, 819], [826], [988], [91], [382], [689], [335], [720], [548], [159], [260], [223], [259], [972], [87], [555], [], [326], [874], [71], [679], [53], [367], [69], [703], [766], [556], [714], [512], [418], [376], [68], [834, 400], [924], [320], [908, 913, 404, 977, 978], [809], [843], [560], [835], [610, 836, 837], [768], [765], [879], [365], [678], [207], [373], [896], [820], [874], [490], [81], [708], [], [762, 868, 659, 532, 470, 923, 924], [787], [138], [192], [203], [731], [836, 837, 839, 460, 718], [928], [562], [791, 254], [20], [938], [287], [330], [628], [471], [174], [513], [684], [630], [360], [590], [200], [344], [934, 959, 923], [656], [330], [562], [963], [515, 652], [881], [186], [352], [37], [809, 925], [926], [], [825, 706], [538, 698], [531], [987, 998], [526, 782, 664], [159], [382], [230], [587], [970, 795], [706], [182], [], [349], [41], [72], [215], [433, 638, 639], [103], [616], [409], [207], [950], [423], [453], [564], [844], [911], [833], [496], [945], [106], [631], [912], [15], [812, 908, 404], [580], [957], [836, 837, 630], [872], [974], [956], [738], [775], [721], [218], [738], [], [772], [6], [632], [543], [], [206], [806], [816], [98], [122], [912], [221], [422], [186], [376], [923, 700], [358, 359], [795, 799], [36], [305], [774], [530], [137], [515], [447], [306], [299], [338], [695], [404], [988], [148], [342], [243], [771], [51], [325], [311], [199, 251], [420], [155, 157], [349], [281], [734], [649], [473], [892], [510], [194], [314], [391], [72], [622, 759], [578, 834, 457, 982], [662], [95], [861], [865], [], [681, 620, 526, 508], [748], [832], [435], [782, 664], [], [637], [251], [], [276], [137], [654], [159], [818], [230], [537], [880], [413], [68], [605], [927], [354, 680], [863], [9], [39], [428], [941], [658], [793], [571], [652, 625, 447], [139, 141], [153], [434, 823], [820], [995], [46], [502], [737], [319], [859], [248], [746], [829], [969], [649], [319], [884], [297], [450], [237], [439], [426], [989], [177], [993], [489], [795], [128], [154], [513, 566], [811], [775], [367], [], [547], [679], [375], [91], [730, 603], [411], [96], [912], [307], [62], [591, 659], [783], [3, 983], [505], [832], [727], [330], [581, 479, 436, 511], [646], [738, 532], [942], [248], [839], [926], [643, 876, 435], [117], [317], [681, 620, 526, 664, 508], [116], [660], [981], [74, 77], [482], [], [968, 923], [872, 622, 759, 414], [870], [673, 527, 782, 664, 508], [64, 59], [161], [738], [994], [13], [119], [365], [157], [198], [193], [793], [977, 978, 472], [526, 495, 786], [962, 923, 935], [125], [3], [235], [497], [786], [810, 333, 508], [578], [845], [291], [], [257], [805], [472], [], [375], [443], [], [507], [924, 965], [774], [93], [514], [10, 11, 14], [934], [947], [443], [437], [367], [837], [514], [126], [549], [373], [623], [851], [670], [636], [468], [149], [416], [863], [203], [448], [908, 404], [30], [548, 782, 851, 598, 632], [2], [776], [487, 590], [607], [751, 479], [927], [43], [761], [407], [602], [168], [258], [920, 919], [931], [958], [955], [75], [7], [141], [], [191], [273], [75], [460], [496], [398], [], [262], [667], [63], [645], [712], [776], [], [723], [988], [673, 527, 664, 508], [713], [355], [487, 531], [454], [978, 222], [559], [800], [687], [737], [222], [384], [940], [], [272], [543], [103], [51], [777, 787], [590], [465], [926], [452], [597], [610], [227], [981], [749], [751], [331], [222], [940, 942], [956], [608, 681, 620], [592], [346], [663], [205], [684], [178], [607], [44], [47], [954], [602], [411], [813], [133], [871], [43], [58], [140], [511], [576], [606], [368], [741], [368], [587], [15], [724], [], [876, 435], [898, 680], [72], [879], [776], [385, 907], [900], [909, 926], [445], [21], [], [725], [437], [6], [896], [990], [498], [], [116], [932], [369], [234], [881], [311], [], [491], [682], [267], [220], [734], [279], [148], [997], [], [385, 386], [844], [801], [129], [709], [822], [495], [987, 998], [675], [852], [], [], [], [218], [470], [], [584], [315], [515, 819], [136], [780, 977, 914, 978], [636], [941], [941], [65], [657, 475], [152], [900], [799], [956], [957], [525], [45], [903, 689, 501, 887], [547], [853], [726], [810, 878], [784], [632], [841, 794], [852], [337], [992], [353], [598], [797], [889], [121], [701], [321], [562], [943], [452], [129], [610], [466], [0], [98], [581, 717], [228], [4], [555], [844], [528], [3], [487], [898], [277], [393], [342], [929], [896], [943], [], [211], [898], [590], [], [11], [726], [866], [990], [873], [610], [893], [952], [407], [885], [327], [359], [], [], [165], [449], [174], [281], [804], [176], [975], [757], [530], [397], [875], [619], [516], [687], [627], [243], [220], [], [131], [205], [], [470], [253], [307], [593], [62], [987, 998], [861], [907, 440, 572], [594], [449], [], [897], [619, 846], [755], [82], [510], [754], [613], [], [635], [183], [277], [363], [928], [321], [728, 936], [307], [292], [20], [835], [488, 616], [956], [301], [255], [538], [355], [866, 853], [546, 650, 818, 819], [300, 302], [306], [393], [804], [925], [794], [868, 931, 968, 532, 504], [427], [410], [], [801, 983, 570], [363], [941], [385], [812], [876, 435, 794], [681, 620, 285], [142], [], [551], [581], [253], [749], [453], [102], [899, 505], [679], [83], [310], [255], [608, 515], [923, 572], [99], [509], [445, 638], [679], [479, 751], [200], [89], [338], [744, 586, 657, 408], [820], [849], [992], [33], [139], [733], [896, 861], [938], [138], [674, 333], [610], [496], [290], [640], [499], [853], [], [944], [53], [576], [270], [636], [79], [], [201], [884, 406, 857], [127], [568], [], [785], [], [987, 998], [806, 975, 445], [835, 733], [258], [789], [658], [182], [739], [], [986], [767], [326], [762, 572], [229], [112], [685], [373], [873], [], [333], [659], [133], [165], [675, 757], [855], [451], [692, 509], [655, 843], [8], [56], [332, 478], [979], [505], [473], [202, 189], [672], [660], [334], [460], [769, 77, 815, 798], [293], [], [995], [65], [934], [690], [568], [317], [340], [850], [399], [10], [29], [544], [], [746], [352], [221], [717], [396], [315], [875], [720], [557], [92], [17], [441, 572], [455], [303], [834, 906], [442], [65], [534], [684], [974], [96], [889], [679], [857], [856], [679], [831], [40], [569], [412], [125], [322], [], [352], [991], [401], [440], [259], [751], [441, 932], [391], [421], [162], [226], [228, 229], [281, 282], [708, 682], [516, 431], [786], [200], [550], [500], [803], [523], [970], [781], [397], [669], [673, 508], [143], [113], [271, 277], [889], [932], [472], [569], [645], [], [783], [673, 526, 527, 782, 664, 508], [131], [884], [204], [195], [570], [225], [904], [14], [184], [566], [7], [987, 998], [575], [693, 472], [28], [635], [155], [29], [842], [987], [34], [217], [407], [773, 455], [557], [994], [77], [271], [94], [650], [], [827], [449], [299], [75], [809, 942, 659], [821], [418, 709, 838, 767], [336], [757], [779], [786], [49, 50], [688], [817, 511, 479], [165], [67], [145], [407], [369], [216], [58], [695], [239], [622], [19], [740], [], [213], [576], [], [906, 834, 630], [812], [], [485], [456], [851], [10], [549], [773], [143], [28], [218], [840], [86], [], [], [195], [], [337], [254], [935], [561], [599], [651], [613], [11], [75], [862], [], [47], [506], [904], [740, 756], [917, 921], [920], [912], [77], [286], [], [126], [274], [24], [20], [904], [16], [0], [144], [248], [502], [687], [357], [336], [518, 671, 444], [11], [242], [274], [523, 721], [161], [711], [521, 618, 651, 813, 827], [388], [84], [62], [687], [374], [], [504], [216, 219], [158], [216], [672], [559, 818, 819], [], [962, 923], [72], [636], [863], [325], [421, 632], [], [162], [691], [975], [652], [113], [36], [899], [288], [328], [896], [579], [555], [486, 889], [719], [223], [19, 13], [781], [608], [314], [43], [943], [566], [994], [125], [388], [479, 817], [727], [318], [518], [574], [867], [540], [506], [882], [300], [613], [66], [865, 850], [973], [157], [727], [750, 591], [], [398], [198], [602], [259], [512], [905, 854], [36, 37], [420], [162], [564, 750], [382], [95], [244], [715], [596], [247], [409], [], [890], [581], [736], [360], [4], [154], [286], [598], [96], [739], [30], [765], [806, 630], [21], [334], [343], [402], [3], [149], [803], [872, 453], [177], [203], [410], [511], [997], [199], [281], [128], [246], [520], [405], [164], [866], [468], [95], [634, 858], [206, 221], [780, 914, 921], [276], [955], [420], [270], [881], [], [40, 46], [249], [772], [478], [857], [637], [675], [419], [426], [259], [353], [185], [178], [554], [602], [354], [241, 238], [639], [3], [761], [288], [755], [], [264], [19], [937, 938], [306], [416], [168], [880], [], [447], [191], [69], [705, 547], [704], [218], [552], [662], [940, 941, 942], [173, 251], [], [121], [178], [914], [971], [206], [610, 890], [719], [31], [159], [619, 846], [225], [610, 465], [113], [281], [], [113], [212], [], [612], [300], [702], [819], [674], [513, 776, 819], [335], [498], [870], [702], [63], [204, 153], [730], [635], [996], [803], [131], [803], [977], [111], [], [792], [357, 358], [681, 819, 620], [], [965], [307], [50], [408], [826], [92], [879], [910, 567, 926], [513], [867], [514, 515, 898, 808], [100], [570, 691, 652], [489], [418], [387], [866], [350], [870], [420], [166], [540], [345], [819, 818, 632], [417], [640], [662], [914], [650, 541, 558, 819], [68], [707, 637], [557, 919], [96], [902], [172], [902], [587], [447], [959], [507], [132], [789], [342], [66], [875, 566, 541], [764], [51], [390], [791], [416], [517], [896], [], [18], [985, 309], [515, 469], [39], [395], [809, 959], [833, 913], [947], [126], [850], [813], [723], [73], [544], [165], [187], [886], [], [37], [147], [912, 824, 447], [864], [842], [], [723], [72], [539], [633], [609], [220], [489], [418], [555], [430], [113], [439], [221], [727], [616], [272, 280], [], [428, 195], [863], [530], [], [251], [], [979], [579], [306], [619, 846], [939], [751], [676], [281], [974], [859], [547], [703], [769], [888], [61], [], [218], [496], [392], [88], [904], [856], [323], [281], [804], [491], [122], [408], [809, 925], [785], [164], [], [968, 721], [259], [284], [11], [], [491], [147], [449], [504], [952], [488, 695], [661], [242, 243, 805], [102], [139], [2], [], [785], [251], [174], [425, 858], [489], [836, 837], [958], [44], [348], [266, 219, 156], [193], [24], [167], [518, 444], [970, 976], [766], [862], [733], [], [951], [934], [450], [], [649], [150], [955], [94], [135], [], [522], [641], [459, 978, 445], [836, 837], [606], [980], [95], [46, 59], [], [], [386], [287], [518], [], [578, 903, 689], [102], [186], [685], [252], [736], [179], [322], [475], [866], [], [427], [278], [602], [582, 950, 790, 953, 954], [120], [372], [641], [910], [626], [448], [803], [983], [319], [3], [202], [658], [528], [956], [], [500], [722], [759], [770, 788], [90], [892], [], [350], [188], [920], [576], [760], [908], [215, 218], [621], [407], [208], [610, 841], [526, 882, 606], [964], [534], [344], [726], [81], [83], [266, 267], [2, 3], [855], [201, 589], [654, 475], [234, 795], [72], [947], [], [426, 635], [16], [681, 620], [379], [765], [736], [888, 821], [27], [152], [53], [540], [903], [85], [64], [95], [834], [786], [908], [243], [253, 273], [479, 436], [74], [652, 847], [417], [711], [583], [639], [], [], [923], [131], [316], [510], [193], [372], [140], [770, 788], [842, 433, 639], [625], [], [34], [22], [259], [744], [878], [472], [470], [216], [690], [179], [30], [288], [518, 491], [694], [522], [1], [320], [809, 659], [850], [95], [529], [204], [890], [93], [865], [868, 495, 572], [546, 650, 664, 527, 819], [946], [629], [815], [661, 479], [488], [311], [130], [781], [90], [93], [250], [239], [684], [137], [94], [707], [570], [572], [268], [673], [449], [198], [787], [618, 926], [965], [930, 934, 923], [333], [344], [128, 131], [464, 787], [], [462], [382], [176], [441, 572], [861], [81], [509, 582], [713], [120], [858], [621], [263, 236], [248, 249], [345], [762], [57], [12], [703], [150], [734], [881], [866], [416, 602], [267], [840], [400, 667], [62], [399], [], [17], [426], [81], [127], [445], [88], [981], [912], [109], [673, 526, 527, 664, 508], [220], [693], [740], [699], [], [182], [213], [201], [243], [], [376], [535], [275], [958], [605, 526, 784, 477], [240, 241, 238], [973], [459], [225], [564], [846], [275], [86], [363, 501], [640], [512], [564], [355], [968, 505], [738], [636], [630], [142], [10], [], [315], [387], [931], [992, 997, 947], [543], [258], [610], [668], [404], [], [50], [922], [923, 122], [574], [741], [456], [967, 968, 504], [543], [156], [770, 788, 916], [646], [35], [488, 600], [673, 904, 905, 526, 527, 664, 508], [796], [646], [393, 108], [226], [777, 524, 461, 787], [827], [920], [989], [30], [165], [361], [524, 461], [], [387], [432], [], [385, 101], [489, 368], [355], [705], [148], [549], [995], [], [123], [384], [916], [95], [652, 764], [396], [807], [], [992], [783], [299], [529], [958], [211], [961], [87], [232], [369], [664], [130], [], [444], [515], [894], [453, 831], [790], [660], [668], [919], [], [14], [327], [297, 295], [898], [102], [905, 794], [39], [217], [194], [869], [40], [475], [8], [927], [], [108], [588], [638, 639], [745], [232], [11], [], [875], [443], [245], [], [820], [], [577], [277], [494], [], [542, 822], [444, 637], [907], [], [423], [45], [105], [530], [352], [754], [675], [141], [476], [], [681, 620], [683], [388], [111], [497, 663], [171], [139], [530], [189], [125], [804], [994], [581, 479], [939, 943], [553, 493], [], [459], [872], [316], [], [289], [125], [131], [422], [617], [946], [336], [], [963], [539], [960], [812], [727], [128], [150], [127], [472], [936, 909, 926], [263, 253], [448], [923, 968, 849, 762, 828], [77], [416], [890], [311], [709, 767], [417], [479, 661], [216], [407], [138], [], [903], [805], [405], [989], [330], [16], [480], [519], [], [610, 589], [216], [], [810, 508], [216], [588], [938], [604], [341], [82], [651], [847], [], [67], [409, 892], [582, 936, 940], [333], [111], [], [432], [993], [178], [234], [750, 721], [341], [645], [449], [608, 744, 841], [975, 447], [349], [515, 665], [0], [511], [34], [638, 639], [911], [841], [741, 539], [299], [508], [62], [819], [981], [518, 665, 671], [955], [484, 914, 821], [782, 664, 281], [430], [905, 799], [], [131], [192], [48], [726], [92], [155], [362], [510], [607], [588], [238, 241], [187], [508], [862], [873], [911], [842], [809], [538], [866], [733], [977, 978], [], [499], [809, 923, 925], [403], [532], [901], [], [209], [35], [844], [232], [507], [299], [497], [111], [563], [680], [995], [403], [633], [340], [804], [], [517], [139], [936], [452], [17], [609], [247], [], [672], [560], [102], [356], [498, 919], [403], [], [143], [820], [324], [739], [479], [85], [330], [558], [], [433, 842, 639], [340], [67], [90], [318], [4], [532], [76], [544], [403], [764], [], [874], [537], [365], [45], [494], [95], [581, 661, 479], [145], [777, 623, 499], [429], [554], [8], [268], [140], [], [343], [787], [522], [398], [276], [864], [313], [974], [781], [217], [], [892], [364], [180], [44], [587, 784], [923], [676], [], [896], [586], [606], [770, 806, 608, 610], [804], [228], [336], [739], [432], [], [16], [73], [707], [916], [291], [279], [267, 265], [53], [825], [962], [807], [399, 501], [812], [995], [640], [139], [320], [245], [891], [540], [696, 477], [955], [738], [636], [528], [545], [316], [619, 846], [838, 551, 711, 629, 631], [], [53], [761], [491], [768], [701], [489], [468], [355], [24], [726], [812], [245], [55], [896], [332], [938], [614], [356], [56], [311], [317], [494], [150], [720], [139], [486], [118], [], [744, 657], [74], [794], [903], [], [23], [772, 679, 488], [104], [437], [602], [753], [456], [389], [908], [687], [22], [748], [682], [451], [894], [919], [308], [792], [161], [383], [681, 620, 526, 916], [915], [], [401], [439, 873], [235], [754], [662], [621], [821], [33], [847], [433], [585], [], [526, 673, 508], [482, 754], [552], [386, 101], [974], [825], [248, 249], [538, 698], [183], [46], [647, 845, 438], [240, 238], [874], [], [932], [763], [608], [17], [842, 459], [955], [758], [990], [38], [354], [853], [], [997], [212], [702], [745, 572], [], [696], [635], [449], [10], [91], [194], [873], [847], [250], [91], [989], [679], [784], [146], [255], [631, 838], [688], [13], [971], [157], [879], [], [165], [836, 837], [], [561], [458], [739], [], [869], [490], [806, 911, 502], [807], [], [], [40, 46], [560], [22], [568, 824, 869], [519, 907], [712], [144], [], [236], [858], [552], [146], [239], [256, 234], [], [957], [704], [791], [567, 926], [827], [377], [], [910], [160], [601, 578], [260], [542], [690], [146], [777], [651], [159], [371], [189], [64], [683], [814], [416], [717], [773, 659], [940], [465, 597, 630, 413], [468], [636], [145], [348], [398], [530], [869, 824], [880], [12], [933], [381], [146], [802], [127], [153], [968, 504], [814], [894], [637], [55], [359], [641], [635], [396], [537], [], [41, 44, 26], [937], [318], [12], [890], [266], [808, 836, 837], [624], [538], [575], [959], [10], [632], [72], [918, 721, 608, 750], [548], [740], [], [321], [661], [38], [991], [444], [573], [205], [619], [667], [807], [602], [757], [205], [], [67], [710], [145], [181], [64, 55], [619, 846, 721, 831], [100], [261], [28], [900], [552, 903], [772], [513, 776, 822, 541, 542], [897], [936], [140], [600], [], [329], [603], [642], [135], [658], [184], [416], [283], [950], [570], [655, 806], [794], [], [954], [921], [563], [], [554], [830], [277], [121], [839], [93], [711], [], [77], [818], [794, 861], [946], [208], [927], [211], [647], [693], [868], [267], [404], [979], [132], [120], [193], [653], [569], [489], [983], [770], [272], [752], [845], [448], [396], [742], [728], [], [321], [], [621], [291], [575], [243, 254], [820], [421, 693], [315], [589], [207], [274], [356], [730], [869], [619, 846, 721, 883, 831], [284], [311], [673, 526, 527, 664, 508], [424, 423], [886], [733], [724], [489, 444], [41], [324], [69], [376], [835], [323], [479], [6], [754], [452, 151], [204, 155], [], [320], [481], [337], [859], [324], [245], [619, 846], [865], [], [717], [459], [86], [118], [355], [], [525], [], [398], [570], [389], [422], [343], [74], [148], [211], [846], [126], [682], [923, 924], [], [293], [263], [699], [491], [], [42], [146], [408], [931], [655, 752, 852], [], [115], [657], [223], [881, 579, 889], [332], [962, 659], [558], [865], [295], [434], [572], [95], [108], [98], [846], [156], [337], [819], [750], [], [648], [], [195], [627], [180], [856], [975, 977, 472], [123], [289, 293], [109], [749], [177], [684], [584], [546, 650, 402, 818, 819], [], [472], [698, 538], [52], [587], [535], [375], [240, 241, 238], [922], [869], [673, 681, 526, 527, 782, 664, 508], [470], [847, 403], [714, 402], [608], [481, 482], [6], [418, 918], [], [90], [496], [903], [174], [], [281], [673, 810, 526, 527, 782, 664, 508], [640, 919, 841, 468, 728, 608], [358], [203], [421], [754, 632], [990], [686], [460], [844], [150], [258], [71], [446], [40, 44], [419], [865], [318], [722], [364], [585], [], [466], [914], [211], [858], [868], [230], [715], [339], [], [696], [482], [84], [909, 910, 926], [581, 479, 436], [], [], [], [226], [861], [882], [341], [792], [], [827], [360], [438], [318], [2], [229], [999, 435, 861], [275], [103], [672], [286], [98], [408], [942], [679], [35], [688], [79], [171], [232, 852], [22], [654], [436], [182], [950], [688], [816], [222], [773], [472], [296], [951], [517, 540], [911, 735], [383], [173], [41], [962], [467], [846], [664], [233], [905, 869], [82], [692], [475], [928, 960], [699], [741, 735], [378], [209], [569], [808], [589], [4], [166], [922], [952], [839], [770], [857], [174], [261], [406], [740, 783], [264], [41], [556], [448], [242], [680], [744, 657], [420], [824, 474, 911], [675], [50], [568, 248], [352, 353], [984, 425, 853], [777], [768], [265], [894], [], [619, 846, 470], [793], [12, 14], [967, 968, 504, 923], [823], [61], [419], [569], [656, 858], [431], [315], [508], [746], [453, 454, 624], [654], [74, 815], [444], [3, 4], [74], [199], [35], [232], [231], [524, 461], [111], [256, 218], [994], [], [810, 590], [964], [806, 870, 843, 850], [211], [519], [452], [637], [198], [946], [821], [508], [217], [873], [258, 279], [790], [672], [578], [614], [281], [594], [654], [465, 597], [51], [504], [106], [22], [821], [45], [516], [524, 461, 787], [694], [], [], [363], [767], [39], [7], [585], [647], [722], [510], [457], [174], [439], [919], [516], [215], [119], [233], [245], [871, 536], [929], [946], [71], [842, 445], [281], [123], [58], [497], [205], [], [438], [279], [710], [897], [912], [512], [689], [], [879, 614], [181], [388], [761], [509], [188], [537], [439], [112, 977, 978], [687], [975, 703], [], [773], [859], [14], [552], [190], [549], [500], [385], [524, 461], [802], [332], [49], [397], [913], [945], [176], [198], [26], [], [107], [], [868, 849, 504], [101], [847], [809, 924], [247], [736], [813], [385, 862], [142], [585], [4], [971], [730], [707], [445], [821], [795], [168], [780], [295], [581, 479, 436], [790], [361], [587, 792], [875], [675], [481], [104], [5], [941, 923], [454, 911, 474], [], [262], [456, 970, 445, 638], [508], [981, 429], [707], [475], [325], [851], [292], [412], [], [907, 440], [755], [495], [486], [941], [601, 578, 982], [206], [371], [896, 861], [686], [923], [672, 899, 469, 827], [420], [440, 441], [], [797], [596], [], [354], [944], [464, 676], [338], [462], [930], [731], [680], [679], [938], [413], [438], [455, 600], [162, 167], [164, 166], [813, 567], [921], [7], [106], [321], [897], [131], [921], [110], [453, 454, 559], [737], [259], [71], [690, 345], [144], [453], [370], [267], [640], [968, 504], [941], [411], [695], [225], [205], [10], [704], [72], [876, 435], [307], [650], [987, 923], [455], [728], [734], [680], [497], [877], [317], [591, 868], [595], [635], [852], [987, 998], [654], [970], [417], [56], [479, 511], [280], [256], [394], [422, 559], [205], [962, 923], [], [123], [991], [891], [416], [761], [983], [871], [981, 429], [291], [603], [5, 6], [595], [723], [544], [2], [873], [668], [], [], [898], [458], [880], [962, 467, 499], [179], [340], [515], [729], [700, 999], [245], [97], [330], [655], [629], [919], [71], [421], [519, 907], [977, 978, 445], [], [391], [230], [645], [], [283], [518, 671], [866], [31], [], [], [678], [521], [458], [150], [486], [], [347], [645], [], [466], [288], [745], [702], [562], [618, 909], [719], [918], [335], [344], [575], [499], [602], [952], [520, 680, 431, 529, 850, 443], [], [933], [874], [387], [234], [51], [61], [165, 187], [87], [], [61], [383], [194], [373], [193], [866], [470], [570], [257, 258, 489], [269], [14], [115], [393], [], [772], [937], [625], [673, 553, 526, 527, 664, 508], [979], [10], [511], [916], [388], [279], [], [523], [2], [902, 488], [768], [], [157], [24], [950], [944], [230, 231], [337], [612], [846], [215], [625], [529], [258], [985], [769, 798], [769, 114], [443], [205], [15], [578, 885], [683, 875, 558], [800], [281], [889], [434], [770], [519], [508], [673, 664, 526, 527, 508], [325], [803], [760, 415], [360], [743], [640], [729], [573], [731], [91], [], [301], [], [145], [931], [816], [723], [], [669], [941], [810], [730], [811, 281], [605], [22], [945], [678], [911, 658], [751], [], [292], [520, 697], [480], [230], [705], [536], [327], [232], [624], [110], [301], [889], [23], [429], [668], [337], [110], [864], [910], [448], [807], [723], [58], [105], [439], [199], [96], [746], [769, 606], [429], [], [650], [312, 311], [824], [866], [995], [554], [898], [577], [980], [768], [570], [850, 911], [10], [444], [977], [177], [443], [911], [352], [], [24], [708], [170], [860], [56], [936], [5], [318], [589], [648], [937], [668, 538, 607], [692], [836, 879, 822], [270], [543], [228], [923, 947], [933], [567], [920], [907], [880, 972], [615, 543], [568], [320], [927], [957], [329], [88], [104, 489], [461], [591], [896], [338], [971], [608, 518, 734, 465, 413], [797], [969], [999], [129], [373], [159], [366], [844], [647], [482], [142], [983], [129], [205], [245], [717], [52], [908, 404, 895], [], [453, 850], [473], [808], [332], [858], [448], [668], [700], [829], [795], [21, 127], [197, 199, 836, 837], [281, 282], [904], [763], [681, 620, 508], [256], [51], [612], [805], [155], [439], [373], [908], [546, 650, 819], [138], [111], [502, 539], [562], [702], [753], [304], [425], [828, 845], [307], [872, 759], [941], [923, 907, 532, 470, 762, 572], [], [169], [588], [33], [498], [557, 733], [107], [546, 889], [490], [597], [139], [806, 655], [778], [673], [], [287], [97], [332], [463], [33, 983], [636], [486], [183], [950, 951], [], [822, 542], [56], [723], [], [39], [240, 241], [696], [864], [921, 917], [977, 978], [868, 588, 692], [160], [824, 775], [790], [49], [761], [7], [235], [803, 637], [276], [584], [71], [756], [645], [629, 508], [774], [858], [53], [750], [836, 837, 906], [38, 45], [640], [856], [602], [225], [953], [484], [466], [769], [491], [489], [326], [71], [331], [66], [302], [434], [], [409, 531], [511], [745], [519], [114], [], [429], [418], [334], [318], [162], [182], [614, 818], [225], [740, 783, 477], [80], [], [14], [499], [591], [497, 884], [568], [100], [894], [486], [354], [], [521, 926], [514, 515, 597, 763, 445], [924], [63], [477], [676, 173], [888, 718, 839], [277, 278], [60], [], [716, 13], [913], [207], [375], [652, 465, 830], [340], [156], [154], [253], [251], [861], [277], [785], [317], [514, 655], [], [617, 823], [483], [382], [613], [48], [777], [812], [502], [198], [263], [306], [37], [35], [184, 191], [801], [262], [485], [], [576], [150], [700, 950], [333], [30], [23], [130], [50], [619, 750, 846, 721], [677], [249], [557], [35], [108], [], [400, 667], [960, 868], [348], [649], [830], [996], [670], [660], [494], [851], [662], [751, 479], [675], [851], [454, 917], [227], [747], [56], [332], [214], [930], [127], [987, 998], [921], [66, 68], [], [283], [784], [386], [996], [744, 657], [652, 465, 413], [239], [296], [359], [945], [876, 435, 282], [651], [], [509], [124], [66], [], [981], [572], [334], [127], [319], [900], [29], [327], [28], [382], [344], [731], [399], [680, 898], [156], [995], [161], [78], [367], [494], [774, 464], [951], [480], [81], [252], [464], [532, 453], [52], [], [735], [], [301], [354], [338], [653, 665], [482, 485], [992], [562], [676], [], [219], [570], [542], [974], [713], [538, 727], [801, 107], [725, 505], [937], [891], [], [290], [513, 875, 819], [850], [755], [866], [687], [344], [441, 572], [924], [237], [903], [93], [92], [350], [923, 951, 762], [162], [267], [335, 845], [], [411], [774], [357], [137], [581, 586], [608, 464], [411], [660], [162, 166], [810, 878], [937], [661], [558], [168], [89], [732, 622, 759], [235], [247], [384], [845], [871], [686], [993], [196], [345], [548], [404], [391], [174], [686], [755], [14], [143], [779], [914], [2], [930], [538], [912, 825], [478, 239], [478], [], [75], [922], [401], [730], [399, 840, 462, 741], [971], [32], [40, 46], [791], [525], [685], [672], [863], [754], [366], [205], [580], [202], [474], [416], [598], [635], [986], [914], [897], [607], [453, 454, 624], [757], [11], [960, 928], [136], [], [747], [311], [784, 587, 740, 477], [249], [326], [], [], [337, 360], [823], [58], [], [189], [936], [886], [762], [402, 593], [], [4], [851], [944], [708], [845], [164], [945], [256], [53], [821], [455], [918], [119], [55], [462], [20], [857], [650, 402, 819], [646], [197], [439], [752], [774, 412, 671, 836, 837, 733], [8], [670], [845], [617, 515, 860], [802], [853], [32], [650, 683], [139], [487], [401], [168], [82], [877], [781, 409], [305], [652, 830, 764, 413], [853], [723], [534, 729], [578, 876, 689, 435, 794], [858, 807], [884], [353], [218], [451], [879], [504, 968], [196, 198], [], [458], [805], [21], [864], [589], [384], [652, 465], [881], [458], [], [659], [847], [813], [923], [506], [198], [103], [912], [854], [674], [673, 664, 526, 527, 632, 508], [0], [84], [183], [5], [37], [840, 462], [478], [270], [541], [81], [927], [810, 878], [677], [471], [649], [416], [929, 509], [251], [], [366], [335], [464], [625], [20], [776, 650], [561], [379], [559], [415], [139], [757], [142], [569], [], [201], [895], [576], [663], [491], [64], [39], [185, 182], [866], [844], [326], [530], [322], [407], [548], [579], [84], [717, 751, 479], [680], [812], [940], [284], [250], [484], [677], [297], [880, 731], [368], [291], [7], [296], [731, 861], [15], [31], [], [783], [431], [244], [16], [377], [639], [628], [908, 404], [185], [730], [660], [362], [647, 969], [519], [323], [978], [509], [721], [], [], [608], [309], [591], [316], [484], [], [496], [836, 837, 853, 762], [976], [922], [956], [619, 818], [422], [103], [624, 453], [871], [326], [270], [986], [478], [907, 440], [843], [685], [311], [426], [792], [764], [908], [280], [280], [], [503], [865, 509], [637], [672], [153], [110], [45], [595], [995], [916], [923], [], [375], [376], [219], [735], [], [406, 857], [], [963], [586], [148], [199], [56], [287], [473], [937], [449], [861], [195], [707], [584], [497], [514, 689], [704], [538], [533], [904], [692], [76], [286], [], [783], [216], [189], [25], [500], [102], [821], [795], [737, 455, 907, 440], [862], [760], [377], [179], [637], [999, 648], [685], [511, 479], [393], [390], [275], [626], [337], [464], [310], [968, 504], [116], [222], [272], [747], [845], [815], [40], [30], [402, 819], [966], [], [580], [873], [580], [448, 494], [957], [893], [557], [139, 140], [], [628, 536], [324], [578], [203], [757], [609], [947], [321], [945], [485], [610], [472, 693], [653, 463], [544, 909, 849, 469], [172], [118], [319], [518], [837, 678], [694], [962, 923], [957], [938], [422], [525], [], [135], [890], [224], [923], [100], [967], [42], [926], [566], [724], [114], [249], [], [913], [407], [804], [528], [254], [480], [441], [207], [607], [357], [85], [396], [694], [543], [875], [519, 956], [257], [873], [5, 6], [553], [105], [268], [], [304], [866], [157], [775], [896], [599], [528], [71], [351], [636], [464], [99], [336], [17], [39], [770], [882], [], [72], [659], [661], [836, 837, 487], [], [6], [352], [861], [307], [328], [341], [735], [733], [], [152], [732, 759], [924], [717], [867], [229], [], [662], [757], [577], [309], [581, 479], [724], [766], [842, 433], [587], [923], [645], [229], [685], [732], [340], [530], [352], [865], [826], [820], [853], [495], [475, 15], [25], [534], [822, 542], [311], [337, 334], [907, 499, 470], [749], [347], [260], [412], [442], [199], [834, 487], [498], [65, 56], [764], [789], [], [766], [811], [660, 757], [650, 402], [562], [968, 504], [353], [244], [570], [438], [795], [198], [298], [838, 551, 629, 631], [21], [90], [248], [17], [532, 762, 923], [669], [413], [716], [85], [467], [861], [893], [317], [803], [225], [426, 685], [410], [925], [], [185], [814], [351], [578, 452, 689, 538, 601], [974], [80], [343], [496, 529, 411], [84], [884], [433, 639], [322], [927], [550], [651], [512], [940], [988], [790], [791], [909, 567], [638, 639], [367], [400, 667], [], [25], [736], [474, 452], [95], [822], [90], [119, 39], [242], [86], [638, 639], [504, 850], [596], [54], [], [320], [773, 532, 923, 572, 762], [737], [916], [287], [168], [375], [129], [959], [546, 650, 818, 819, 542], [816], [597], [558], [551], [], [], [553], [603], [466], [], [], [80], [27], [162], [434], [82], [222], [532], [15], [730], [595], [382], [785, 464], [881], [753], [76], [112], [204], [], [618, 813], [350], [506], [947], [130], [278], [932], [338], [41], [401], [285], [32], [829], [156], [190], [226], [340], [327], [365], [498], [435, 794], [619, 846], [611], [910], [262], [905], [], [524], [503], [659], [558], [795], [807], [761], [984], [947, 125], [112], [299], [84], [122], [847], [847], [472], [219], [864, 586, 652, 413], [650, 568, 608], [44], [952], [149], [89], [583], [565], [145], [], [806], [31], [232], [703], [858], [73, 74, 815], [644], [70], [745, 620], [513, 875], [685], [173], [840, 587, 758], [836, 837, 842], [479], [742, 620, 664, 527, 508], [798], [79], [746], [198], [316], [727], [252, 262], [258], [597], [302], [859], [932], [637], [761], [209], [297], [442], [993], [32, 152], [350], [989], [815], [432], [779], [], [1], [880], [578, 834, 836, 837, 458], [163], [506], [804], [672], [110], [143], [934], [566], [214], [911, 253, 735], [524, 461, 958], [525, 718, 437], [], [518], [119], [177], [400, 667], [295], [], [789], [234], [929], [638, 639], [597], [937], [494], [257], [62], [], [347], [369], [924], [539], [397], [317], [126], [580], [550, 968], [810, 878], [255], [576, 536], [459], [188], [], [33], [723], [594], [435, 58], [39], [85], [199], [888], [883, 739], [409], [147], [478], [462], [592], [716], [342], [143], [], [494], [74], [666], [464], [218], [411], [365], [900], [247], [754], [174], [515, 790, 636], [868], [12], [535], [887], [300], [39], [938], [427], [287], [203], [52], [94], [361], [317], [793], [935], [928], [977], [431], [776], [61], [505], [51], [63], [71], [116], [914, 536], [672], [], [401], [495, 532], [205, 478], [449, 975], [951], [671], [344], [806], [330], [697], [281], [615], [333], [528], [699], [651, 827], [812], [], [603], [], [337], [457], [948, 950, 954], [808, 842, 977, 978], [], [618, 813], [417], [60], [801], [453], [132], [582], [332], [114], [324], [489, 134], [260], [825], [], [968, 762], [775], [474], [], [87], [702], [579], [544, 827, 469], [], [344], [968], [667], [261], [988], [593], [931], [688], [438, 728], [845], [694], [843], [715, 652], [], [170], [979], [378], [], [582, 936, 943], [872, 759], [740], [378], [108], [127], [935], [203], [931, 933], [351], [254], [915], [633], [967], [429], [751], [268], [10], [983], [578, 982, 601], [744, 657], [556], [970], [268], [105], [464, 597], [482, 632], [104], [255], [569], [582], [272], [], [115], [399, 501], [133], [548], [241], [796], [111], [371], [891], [797], [957], [345], [666], [342], [159], [608, 117], [562], [608, 610], [260], [393], [296], [682], [608, 774, 788], [148], [776], [], [537], [203], [207], [765], [517, 821, 536, 510], [459, 434], [478], [683], [495], [875], [683, 566], [233], [985], [511], [710], [959], [973], [988], [673, 526, 527, 782, 664, 508], [235], [424], [206], [224], [539], [396], [945], [281, 285], [884, 406], [702], [542, 541], [605], [484, 814], [774], [601], [9], [652], [950], [993], [374], [603], [616], [206], [586], [930], [647], [343], [269], [328], [156], [153], [], [484, 871], [385, 101], [885], [], [794], [], [291], [664, 782, 662], [981], [410], [47], [364], [290], [708, 517], [852], [115], [916], [528], [115], [754], [459, 608], [360], [], [322], [872, 652, 447], [551], [751, 479], [97], [185, 153, 187], [202], [966], [971], [597], [599], [77], [72], [839], [629], [111], [718], [698], [223], [934], [360], [993], [632, 818, 819], [364], [984], [770, 806], [728], [528], [581], [322], [77], [894], [445], [869], [384], [617, 823], [], [38, 26], [160], [479], [250, 220, 248], [276], [901], [923], [308], [342], [], [838, 711, 648, 585, 631], [], [], [279], [78], [97], [], [746], [532], [688], [568, 831], [599, 955], [109, 973], [357], [919], [466], [908], [339], [573], [643], [715, 524, 461, 787], [301], [677], [], [143], [158], [451], [139], [700], [436], [774, 681, 620, 750, 721, 846], [704], [369], [936], [736], [901], [287], [], [835], [638, 639], [442], [734], [329, 397], [], [310], [970, 518, 671], [110], [580], [709, 710], [735], [265], [565], [560], [43], [282, 478], [800], [388], [177], [275], [340], [766], [290], [196], [148], [865, 850], [63], [157], [825], [240, 241], [920], [], [752], [694], [870], [770, 488, 843], [160], [700, 999], [], [958], [387], [556], [737], [76], [456], [701], [81], [942], [262], [61], [992], [545], [77], [262], [783], [548, 869, 655, 851], [367], [373], [115], [399], [], [], [565], [446], [129], [6], [748], [417], [970], [358], [815], [], [767], [71], [290], [909, 926], [788], [416], [399, 824, 600], [696], [358], [905, 750, 721], [272], [920, 829], [558], [854], [248], [160], [323], [622, 759], [870], [796], [318], [775], [140], [614, 887], [954, 950], [581, 479, 717], [784, 740], [571], [116], [519], [136], [355], [75], [956], [88], [491], [288], [874], [228], [559], [293], [241], [500], [486], [943], [350], [407], [146], [], [47], [99], [303], [402], [879], [], [51], [160], [573], [457], [842, 463], [872, 420], [221], [389], [527, 782, 916, 664, 508], [826], [382], [], [125], [581, 436, 479], [97], [254], [802], [499], [977, 978], [660], [629], [432], [261], [193, 153], [242], [838], [], [698], [257], [923], [222], [157], [570], [112], [359], [451], [292], [513, 776, 683, 875, 822, 541, 542], [378], [496], [775], [879], [836, 837], [752], [723], [724], [202], [295], [447], [801, 842], [185], [810, 878], [93], [380], [984], [964], [122], [384], [151], [], [528], [989], [612], [704], [220], [768], [86], [100], [573], [2, 3], [959], [], [459], [994], [498, 598], [286], [303], [276], [341], [14], [953], [856], [248], [797], [350], [903], [760], [103], [413], [608, 770, 414], [936], [840], [134], [34, 978], [302], [211], [597], [852], [135], [552], [356], [927], [214], [164], [292], [41], [477], [769], [709], [], [488], [500], [640], [918], [483], [117], [95], [897], [884], [853], [99], [472, 693], [213], [202], [592], [14], [767], [875], [228], [], [277], [608, 481, 482], [243], [204], [132], [875], [126], [439], [724, 536], [528], [962], [400, 667], [312], [477], [267], [716], [569], [339], [819, 541], [809, 618, 925], [193], [], [711], [967, 968, 923], [779], [533], [330], [], [], [668], [736], [262], [136], [379], [], [671], [281, 285], [955], [573], [968, 504], [331], [132], [784], [592], [621], [215], [172], [458], [150], [303], [799], [], [654], [515], [49], [490], [15], [223], [262], [682], [301], [592], [635], [291], [718, 628, 540], [625], [360], [716], [752], [20], [623, 795], [421], [618, 659], [122], [183], [232], [221], [], [399], [704], [19], [], [568], [317], [542], [822], [561, 950], [968], [719], [151], [], [679], [386], [581, 479, 511], [485], [], [946], [642], [368], [25], [239], [472], [550, 967, 968], [647], [368], [83], [854], [801], [], [772, 748], [118], [87], [873], [772], [114], [935], [218], [], [464], [966, 907, 572], [2], [622], [449], [961], [777, 499], [691], [69], [622, 759], [221], [257], [28], [328], [826], [821], [15], [73], [24], [357], [957], [29], [438], [521], [134], [866], [147], [187], [], [289], [671], [138], [26], [], [758], [738, 211], [617, 823], [613], [777], [217], [], [458], [772], [953], [835], [], [64, 55], [942], [327], [392], [871], [858], [810, 508], [833], [786], [924], [779], [586], [612], [402], [318], [842], [782], [], [673, 526, 527, 782, 664, 508], [501], [536], [153], [928], [819], [2, 3], [55], [138], [57], [661], [659], [173], [683], [655], [114], [669], [357], [887, 857], [277], [114], [616], [145], [355], [607], [2, 3], [385, 101], [859], [94], [813, 909], [896], [875], [652], [0, 389, 758], [984], [], [388], [74, 815], [11], [785], [540], [904], [], [860], [397], [810, 878], [489], [299], [171], [325], [546], [659], [555], [600], [437], [936], [353], [528], [739], [839], [727], [967], [121], [638, 639], [], [980], [196, 197, 198, 199], [258], [714], [], [729], [927], [67], [322], [579], [342], [8], [904, 905], [767], [40, 911, 27], [935], [296], [738], [882], [15], [439], [164], [580], [77], [331, 332, 338], [432], [150], [292], [188], [563], [391], [522], [492], [353], [804, 844], [660], [668], [619], [262], [661], [165], [683], [454, 624], [403], [201], [341], [90], [669], [474], [199], [942], [], [310], [859], [889], [482], [863], [925], [910], [], [491], [350], [183], [795], [586], [260], [681, 620], [265, 266], [439], [735], [32], [984], [], [668], [494], [278], [290], [292], [884], [785], [488], [833], [362], [], [128], [204], [583], [18], [127], [738, 968, 505], [99], [478], [155], [439], [111], [377], [976], [174], [836, 837, 459, 445], [466], [917], [12], [145], [883], [57], [898], [935], [918, 762, 923], [769], [], [130], [335], [760], [376], [937], [224, 223], [130], [621], [], [30], [497], [593], [], [658], [32, 28], [116, 126], [357], [277], [129], [407], [368], [515], [11], [408], [], [103], [57], [865], [506], [849], [770], [827], [730], [207], [562], [159], [184], [976], [74], [737, 651], [333], [309], [203], [533], [994], [25], [467], [771], [897], [332], [584], [269], [673, 527, 664, 508], [618, 562], [581, 479, 717], [454], [204], [267], [346], [706, 532], [959], [885], [434], [643], [142], [249], [505], [99], [341], [805], [], [373], [394], [789], [988], [870], [], [306], [790, 126], [793, 259], [115], [264], [510], [70], [772], [154, 478], [], [755], [36], [638, 639], [523], [765], [335], [911], [119], [], [169], [617], [276], [143], [310], [550], [228], [809], [357], [812], [565], [273], [457, 834], [64], [502], [336], [899, 521, 532, 412], [235], [696], [499], [175], [16], [165], [537], [783], [284], [606], [], [483], [], [935], [854], [917], [555], [444], [867], [578, 982], [633, 316], [714], [719], [25, 28], [280], [305], [], [793], [42], [587], [160], [261], [736], [281, 285], [687], [776], [918], [], [534, 729], [497], [987, 998], [352, 353], [661], [332], [143], [], [397], [495, 692], [655, 630, 474], [42], [979], [982, 703], [506], [672], [135], [4], [963], [205], [836, 837, 919], [364], [112], [645], [802], [481, 453], [691], [604], [633], [990], [160], [747], [425], [650], [73], [], [870], [954, 955, 953, 923], [628], [968], [41], [143], [420], [917], [142], [790], [988], [329], [568], [491], [954], [677], [7, 8], [609], [252], [527, 782, 673, 475], [800], [377], [439], [549], [213], [0], [76], [162], [905], [253], [971], [362], [913], [900], [290], [842, 693, 472, 445], [336], [774], [301], [621], [453, 454], [238], [43], [96], [], [989], [487, 620], [881], [761], [970, 795], [736], [80], [455], [651], [858], [228], [867, 919], [932], [401], [631], [827], [771], [999, 700], [651, 760], [875], [242, 243], [651, 631], [85], [165], [141, 142], [615], [244], [28], [575], [59], [700, 999], [928, 923, 960], [338], [557], [267], [868], [354], [601], [685], [520], [933, 934], [88], [714], [181], [459], [711], [762], [860, 919], [358], [13], [96], [472], [165], [694], [519], [], [839], [], [618, 813, 910, 532], [67], [80], [20], [113], [515, 695], [341], [608], [41], [18], [252], [738], [406], [980], [384], [838], [474], [161, 162, 167], [49], [], [84], [149], [406], [], [652, 413], [352], [760], [40], [82], [581, 479], [734], [57], [676], [115], [12], [363], [144], [733], [921], [945, 948, 950, 953], [449, 975], [127], [844], [986], [281], [471], [310], [273], [153, 203], [473], [258], [256], [1], [225], [426], [869, 879], [502], [227], [405], [59], [968, 504], [895], [444], [11], [770], [18], [893], [351], [50], [507], [775], [695], [592], [339], [748, 911, 692], [166], [515, 775], [916], [250], [214], [174], [552], [23], [432], [942], [257, 222], [], [130], [999, 700], [236], [395], [947], [637], [313], [141, 142], [818, 819, 854], [673, 487, 810], [480], [416, 638, 639], [699, 541, 542], [659], [966, 572], [806], [934], [518, 671], [220], [490], [276], [81], [682, 458], [805], [], [815], [581], [515], [543], [751], [142], [880], [2], [634], [792], [], [], [684], [665, 670], [763], [153], [296], [568], [203], [992], [741], [157], [], [737], [570, 830], [663], [496], [735], [88], [879], [], [993], [797], [385], [], [430], [3], [465], [67], [410], [795], [605], [823], [35], [873], [251], [866], [535, 479], [990], [992], [255], [984], [659], [866], [670], [69], [], [524, 787, 915], [882], [389], [991], [], [544], [564], [896, 804], [855], [984], [692], [298], [594], [557], [372], [652, 413], [528], [562], [743], [213], [937], [916], [191], [229], [923], [980], [], [630], [411], [695], [411], [895], [602], [68], [132], [51], [198], [710], [799, 831], [844], [1], [580], [798], [972], [64, 59], [375], [434], [10], [951], [220], [898], [195, 805], [60], [847], [551, 629], [964], [379], [986], [842], [205], [594], [191], [225], [229], [894], [794, 435], [611], [891], [99], [646], [941], [385], [358, 173], [774], [837, 836, 733], [], [703], [560], [268], [974], [150], [114], [], [390, 973], [216], [753], [131], [682], [822], [666], [416], [725], [291], [270], [212], [905], [567], [345, 690], [149], [920], [777], [94], [974], [760], [427], [723], [16], [528], [178], [562], [459, 543], [888], [280], [], [215], [], [740], [317], [709], [539], [239], [969, 692], [], [25], [736], [529, 830, 610], [63], [841, 697], [274], [63], [834, 869], [760], [396], [476], [71], [401], [382], [468], [923, 521, 762, 926], [357], [832], [977, 638, 639], [404], [11], [5], [433, 638, 639], [160], [941], [], [165], [247], [434], [576], [892], [259, 462], [938, 939, 943], [75], [619], [373, 463], [538, 668], [497, 884, 406], [892], [190], [392], [615], [30], [37], [364], [616], [414, 518, 535], [821], [487], [538], [], [817], [31], [977, 978], [646], [502], [434], [641], [355], [961], [64, 55], [752, 852], [382], [470], [253], [150], [110], [5], [195], [399, 636], [], [45], [816], [806, 630], [402], [987, 998], [617], [190], [626], [720], [6], [547], [980], [911, 539], [862], [208], [518, 489, 671], [], [640], [107], [917, 921], [90], [138], [508], [193, 186], [113], [118], [88], [520], [179], [98], [132], [196], [265, 267], [106], [76], [33], [180], [120], [848], [724], [404], [170], [], [584], [847], [644], [774], [28], [302], [819, 546], [399], [70], [769, 798], [578, 982], [831], [367], [919], [135], [161, 168], [455], [94], [944], [174], [616], [776], [], [115], [867], [733], [644], [849, 285], [955], [866], [78], [255], [911, 533], [532], [97], [153], [638, 639], [911], [439, 570, 764], [433], [847], [893], [855], [], [335], [302], [477], [543], [446], [333], [354], [940], [314], [560], [126], [358], [6], [553], [], [692, 886], [311], [], [251], [841, 759], [32, 30], [], [833], [518], [233], [21], [348], [296], [873], [748], [355, 489], [762], [853], [137], [495], [610], [279], [563], [707], [], [821, 703, 839, 975], [970, 979], [102], [601], [291], [637], [121], [317], [963], [95], [364], [838, 487, 459, 445, 638], [307], [512, 473], [224, 214], [475], [870], [23], [866], [], [156], [151], [336], [66], [901], [], [229], [757], [117], [221], [521, 809, 909, 987, 926], [844], [287], [404], [524], [756], [629], [], [], [309], [799], [173], [216], [448], [619, 846], [953], [13], [140], [], [768, 836, 842], [561], [573, 518], [629], [594, 982], [398], [68], [588, 790], [204, 185], [836, 837], [685], [279], [350], [688], [271], [55], [70], [645], [673, 526, 527, 664, 508], [607], [162], [97], [985], [515, 643], [139], [942], [278], [553], [291], [749], [], [], [205], [632], [827], [982], [701], [809, 925], [194], [881], [683, 432, 566], [182], [819], [142], [178], [989], [37], [421, 525, 975], [260], [305], [566], [846], [628], [556], [706, 789, 539, 799], [379], [254], [418], [369], [254], [770], [709], [353], [834, 457, 630], [], [360], [270], [950], [928, 868, 923], [760], [761], [23], [262], [194], [795], [31], [68], [252], [735], [358, 359], [682, 781], [725], [96], [763], [533], [809], [206], [873], [392], [798], [208, 179], [307], [606], [834, 515, 836, 837, 906], [628], [231, 156], [742, 872], [405], [651, 760, 827], [541], [802], [178], [70], [455], [292], [819, 854], [995], [393, 983], [775], [280], [13], [3], [946], [204], [], [136], [213], [470], [344], [596], [382], [748, 636], [909], [793], [186], [141], [616], [548, 851, 598, 632], [150], [], [693], [275], [740, 519], [224, 223], [], [], [622], [75], [720], [928, 960, 923], [772], [340], [819], [378], [804, 469], [789, 421], [273], [852, 186], [644], [], [301], [236], [9], [621], [15], [96], [48], [296], [222], [281], [715, 652, 764, 413], [249, 250], [549], [207], [860], [373], [80], [863], [312, 311], [552], [763], [494], [55], [562], [38], [131], [517], [276], [472, 693], [], [372], [566], [], [445], [495], [741], [823], [89], [549], [33], [288], [350], [753], [0], [979], [540], [921], [985], [], [194], [216], [866], [737], [779, 506], [802], [928], [416], [614], [713, 742], [218], [164], [951], [810, 878], [874], [435], [850, 732, 759], [729], [796], [734], [521], [698], [526, 453, 454, 608, 740], [], [182], [166], [102], [225, 465], [960], [896], [188], [989], [313], [322], [610, 862], [396], [327], [744, 657], [928, 960, 868, 415], [], [240, 238], [286], [557], [654], [929], [358], [302], [905], [989], [307], [578, 689], [943], [213], [933], [], [827], [694], [705], [360], [710], [341, 342], [836, 837], [538], [780], [205], [762, 923, 122], [70], [], [48], [273], [835], [809, 925], [448], [747], [220], [850], [187], [825], [472, 693], [79], [593], [719], [], [316], [723], [203], [398], [736], [581, 733, 479], [790, 588], [946], [86], [500], [474], [386, 101], [602], [836, 837], [131], [353], [18], [928], [329, 842], [705, 547], [681, 620, 526, 916, 906], [801], [274, 271], [], [713], [], [], [619, 883], [972], [614, 887], [313], [287], [829], [432], [355], [541], [40], [543], [648], [261], [459, 978, 445], [843], [572, 966], [487], [43], [], [964], [229], [254], [138], [870], [827, 849], [332], [247], [148], [434, 912], [599], [731], [100], [], [538], [498], [82], [671], [153], [875], [422], [72], [840], [548], [586, 864], [891], [930, 868, 967, 968, 504], [308], [301, 918], [128], [62], [750], [], [133], [720], [194], [13], [481], [575], [954, 943], [180], [78], [814], [42], [], [221], [301], [], [67], [258], [730], [3], [355], [240], [284], [879], [513], [690], [695], [280], [716], [300], [912], [574], [205], [131], [847], [843], [348], [800], [674], [658], [608, 869], [734], [459], [714], [295], [723], [699], [489], [607], [148], [809], [69], [347], [866], [203, 156], [412], [225], [35], [488], [16], [951], [], [293], [253], [189], [809, 926], [934, 923], [906], [509], [485], [517], [410], [772, 679, 488], [448], [526], [358], [721, 761, 831], [324], [536], [991], [892, 409], [613], [577], [47], [349], [406], [810, 655, 508], [461], [843], [594], [670], [], [483], [471], [248, 249], [668], [46], [392], [948], [], [694], [794], [562], [858, 467], [723], [902, 488], [650], [218, 215], [], [], [292], [40], [948, 572], [71], [918], [517], [146], [642], [181], [868, 923, 968, 725], [809, 659, 729], [819], [520], [440], [258], [792], [308], [795], [701], [304], [70], [518], [619, 846], [165], [276], [364], [392], [698], [930, 931], [105], [], [188], [221], [315], [169], [428], [77], [], [596], [288], [710, 767], [957], [553, 526], [58], [861], [305], [], [612], [619, 846], [907], [611], [152], [44], [456], [3], [814], [362], [896], [866], [523], [489, 274], [844], [905, 789], [], [917], [442], [], [199], [], [11], [686], [485, 685, 754], [334], [293], [505], [], [232, 852], [0, 758], [388], [189], [307], [], [], [375], [424, 423], [518], [38], [617], [111], [421], [752], [725], [908, 895], [107, 108], [291], [386], [707], [44], [578, 689], [114], [73, 74], [166], [668], [421], [259], [601], [908], [428], [881], [836, 837], [198], [302], [], [545], [226], [], [240, 241, 238], [834, 906], [235], [532], [517, 554, 536], [1], [445, 977, 236], [301], [675], [453, 454], [395, 758], [24], [263], [965], [301], [684], [558], [755], [684], [769, 633], [739], [151], [996, 309], [263], [154, 155], [928, 960], [], [613], [979], [813, 501], [779], [458], [728], [681, 620, 526], [319], [], [4], [131], [12], [182], [568], [608, 836, 837, 655, 636], [992], [802], [936, 943], [279], [34], [283], [540], [810], [364], [928], [283], [571], [521], [968, 504], [525], [370], [200, 155, 204], [481], [851], [396], [382], [652, 413], [], [232], [278], [625], [924], [342], [242], [829], [577], [264], [435], [440], [771, 507], [616], [674], [56], [472], [], [457], [466], [714, 542], [], [254], [162], [703], [395], [], [267], [140], [147], [303], [916], [616, 695], [971], [559, 764, 413], [835], [803], [469], [29], [341], [310], [], [9], [704], [270], [459], [3], [517, 839, 718], [756], [517, 975, 977, 536], [], [858], [], [947], [703], [228], [294], [963], [394], [864], [915], [979], [253], [380], [896], [117], [], [583], [836, 837], [794], [310], [701], [101], [552], [705, 888], [687], [15], [627], [], [552], [48], [364], [428], [471], [221], [549], [813, 910], [732], [279], [456], [711], [770], [28], [132], [826], [920], [344], [374], [237], [496], [], [96], [622], [503], [910, 659], [171], [585], [135], [393], [266], [669], [], [], [859], [66], [989], [569], [242], [962, 813, 827], [716], [746], [761], [346], [439], [113], [463, 412], [497, 442], [452, 689], [205], [673, 742, 681, 526, 527, 662, 664, 508], [741], [440], [874], [727], [660], [127], [0], [180, 195], [311], [], [525], [442, 494], [353], [453, 818], [786], [100], [240, 241, 238], [916], [160], [757], [164], [293], [654], [476], [919], [926], [713], [783], [262], [388], [829], [902], [438], [], [282], [521], [364], [177], [833], [658], [596], [215, 218], [576], [358], [752], [424, 423], [223], [78], [859], [605], [193], [156], [841], [82], [643], [77], [403], [173], [514, 836, 837], [], [420], [111], [33], [56], [249], [88], [830], [673, 478], [199], [136], [814], [591], [128], [], [984], [158], [372], [205, 750, 721], [555], [824, 633], [833], [125], [693], [589], [216], [571], [681, 491], [269, 249], [206], [602], [363], [538, 668], [155], [], [352], [389, 567], [], [4], [207], [616, 625, 724], [953], [], [300], [551], [586], [81], [190], [97], [104], [362], [714], [550], [160], [949], [883], [759], [989], [628], [741], [884], [803], [142], [95], [945], [208], [567, 827, 926], [204], [819], [654, 671], [55], [449], [235], [450], [793], [240, 241], [], [417], [436], [22], [2], [37], [886], [618], [277], [642, 462], [317], [733], [349], [754], [796], [425], [908, 404], [314], [416, 638], [8], [518], [893], [], [671], [574], [908, 895], [300], [224], [], [341], [292], [972, 976], [971], [918], [578, 689, 501, 885], [846], [444], [938], [], [471], [317], [657], [898], [836, 869], [302], [48], [175], [949], [728], [681, 620, 478], [14], [645], [141], [399], [614], [359], [], [920], [865, 411], [28], [758], [976], [318], [971], [409], [677], [500], [556], [279], [], [178], [616, 494], [206], [], [988], [673, 681, 810, 620, 527, 782, 664, 508], [928, 923], [439], [71], [37], [853], [881], [172], [170], [595], [156], [889, 541], [491], [45], [906], [866], [649], [901], [232, 249], [753], [149], [511], [895], [465], [590], [376], [545], [39], [364], [476], [782, 664], [3], [63], [599], [684], [69], [311], [681, 620, 508], [127], [611], [27], [667], [726], [865], [630], [135], [545], [838], [343], [49], [], [624], [123], [418], [205], [883], [993], [243], [766], [53], [151], [31], [215], [427], [213], [880], [208], [], [475], [621], [526, 673, 681, 620], [], [104], [578, 654], [490], [235], [702], [720], [832], [732], [528], [999, 861], [503], [639], [745], [362], [238], [731], [738], [204], [966, 907], [598], [719], [11], [444], [667], [215], [151], [407], [985], [50], [314], [], [216], [954], [], [962], [108, 973], [294], [], [945], [317], [16, 19], [388], [788], [806], [23], [883], [210], [836, 837], [929], [525], [], [301], [534], [959], [], [836, 775, 655], [461], [127], [822], [800], [996], [524, 461], [597], [169], [857], [36], [101], [21], [570], [255], [185], [635], [836, 457], [926], [470], [71], [851, 548], [290], [250], [489, 251], [], [333], [373], [820], [175], [], [453, 493], [318], [], [547], [165], [653], [911, 824, 474], [695], [395], [123], [430], [334], [657], [908, 814], [258, 259], [596], [523, 664], [834, 906], [188], [829], [563], [14], [724, 536], [235], [687], [147], [154], [936], [69], [760], [593], [606], [682], [114], [646], [857], [538, 185, 975], [897, 651, 760], [880], [112, 125], [922], [881], [662], [831, 721, 608], [661], [684], [391], [859], [518], [574], [902, 488], [967, 968, 504], [268], [529, 667], [61], [373], [873], [183], [785], [], [], [882], [929], [577], [434], [143], [804], [381], [], [], [436], [306], [953], [749], [367], [479], [522], [16], [815], [36], [58], [85], [610, 770, 862], [927], [58], [651], [954], [864], [830], [298], [212], [], [262], [987], [665], [558, 593, 819], [873], [276], [35, 36], [467], [956], [713], [753], [948], [231], [564], [899, 532, 725], [69], [947], [869], [423], [86], [838, 631], [2], [416, 702], [816], [825], [696], [925], [177], [35], [40, 46], [176], [242], [473], [], [], [115], [162, 167], [92], [682], [], [177], [], [34], [990], [968, 809, 849, 659, 923], [842], [430], [162, 166], [505, 899], [602], [907], [582], [488, 679, 455], [], [941], [780], [681, 620, 760, 508], [750], [190], [761, 831], [98], [], [513, 439], [543], [], [578, 982, 601], [949], [382], [149], [199], [235], [704], [], [794], [892], [784], [143], [268], [274], [138], [455, 440, 444], [171], [694], [779], [899], [883], [813, 942], [821], [230, 231], [], [296], [581], [98], [365], [387], [213], [756], [286], [56], [514], [300], [446], [650], [24], [357], [826], [673, 553, 526, 527, 782, 664, 508], [896, 897, 827], [475], [388], [574], [929], [992], [941], [648], [810, 508], [301], [761], [182], [294], [102], [356], [], [352], [430], [957], [9], [191], [419], [369], [207], [825], [28], [307], [996], [141], [119], [997, 947], [300], [617, 823], [59], [303], [368], [333], [130], [83, 883], [901], [336], [396, 973], [780], [784, 792, 477], [], [], [908, 404, 895], [625], [369], [125], [743], [300], [131], [678], [865], [168, 159], [666], [56], [70], [436, 581, 479], [574], [530], [728, 412], [374], [], [300], [716], [304], [791, 582], [265, 266], [482], [152], [811], [42], [971], [697, 823], [956], [451], [685], [20], [290], [709, 710, 526, 692], [73], [71], [455], [707], [62], [185], [545], [844], [933], [796], [405], [14], [783], [377], [979], [], [], [451], [230], [], [680], [811], [988], [400, 667], [179], [879], [587], [690], [428], [384], [366], [727], [923], [518, 671], [586], [905], [54], [223], [836, 837, 445], [565], [328, 973, 991], [212], [179], [782, 851, 664], [562], [703], [340], [302], [501], [13], [758], [229], [855], [122], [525], [430], [978], [233], [824], [360], [67], [461], [292], [464], [949, 927], [939, 943], [552], [222], [106], [346], [487], [256], [623], [202], [992], [502], [837, 433, 445], [329], [754], [520], [117], [642], [39, 47, 978], [619, 846], [889], [30], [92], [426], [231], [442], [], [373], [323], [894], [], [], [84], [738], [790], [604], [869], [846], [581], [518, 880], [], [791], [99, 8, 730], [626], [587], [673, 742, 664, 526, 527, 782, 508], [53], [489, 981], [387], [985], [286], [76], [110], [619, 846], [595], [388], [434, 533], [826], [745], [], [363], [879], [19], [768], [31], [475], [419, 648, 720], [938], [646], [320], [730], [240, 248], [910], [83], [900], [903], [942], [254], [910], [943, 923], [152], [775, 459], [96], [426], [216], [787], [339], [], [87], [44, 26], [3], [920], [181], [368], [858], [36], [825, 409, 892], [521], [921], [115], [911, 796], [616, 87], [832], [935, 923], [], [539], [105], [581, 479, 511], [228], [615], [290], [89, 951], [515], [500], [11], [527], [321], [603], [96], [94], [246], [57], [8, 7], [342], [14], [967, 968, 923], [150], [624], [575], [8], [854], [], [], [483], [124], [921, 667], [976], [], [510], [424, 423], [127], [197, 205], [403], [874], [658], [770, 788], [213], [773], [256], [744, 657], [675], [711], [595], [], [203], [849], [], [], [673, 742, 620, 526, 527, 664, 508], [513, 903], [551], [564], [358], [176], [839, 821], [925], [388], [354], [350], [253], [399, 501], [561], [598], [794], [804], [820], [929], [123], [529], [407], [64], [515, 593], [823], [366], [896], [907, 572], [], [], [596], [453, 454, 905, 750], [992], [169], [190], [557, 692, 509], [], [1], [968, 659], [], [805], [463], [187], [911, 636], [774], [], [976], [497], [999, 861], [636], [693, 472], [], [571], [971], [523], [548], [38], [808], [915], [652, 683], [1], [614], [261], [75], [929, 245], [], [206], [311], [923, 71, 868], [47], [], [538], [984], [608, 514], [230], [372], [549, 742], [981], [949], [196], [641, 642], [521], [267], [919, 858], [823], [297], [405], [289], [674], [410], [2], [758], [233], [199], [915], [407], [610], [99], [332], [723], [970, 979], [326], [897], [226], [898, 681, 620], [306], [248, 249], [], [900], [644], [774], [763], [8], [813, 910], [868], [108], [190], [516, 431], [839], [358], [], [612], [22], [914, 484], [258], [26], [384], [190], [810, 878], [954], [], [699], [902], [721], [959], [214], [177], [129], [232], [909, 827], [436], [781], [911, 658], [], [560], [], [82], [979], [897], [412], [800], [683], [98], [109], [311], [635], [686], [929], [320], [778], [53], [10], [683], [980], [97], [987, 998], [883], [], [970], [575], [177], [394], [948], [914], [125], [971], [374], [396], [703], [449, 979], [122], [550, 967, 968, 505], [], [198], [654], [596], [933, 934], [352], [608, 806], [579], [681, 810, 620, 508], [23], [317], [585], [877], [836, 837, 971], [6], [747], [375], [], [172], [481, 482], [173], [66, 68], [770], [777, 623], [386, 101], [36], [242, 243], [686], [472], [86], [219], [868, 949, 953], [905, 919], [336], [952], [40], [337], [2, 3], [224], [237], [588, 610, 492, 636], [114], [441, 572], [169], [637], [768, 560], [963, 966, 762, 923], [809, 659], [503], [], [792], [777], [], [472], [422], [416], [431], [731], [877], [395], [76], [6], [767], [314], [635], [196], [934], [360], [783], [344], [710], [164], [267], [294], [807], [571], [343], [359], [922, 918], [840], [640], [642], [484], [116], [170], [456], [267], [350], [], [132], [129], [71], [407], [88], [992], [488, 600], [626], [919], [513, 650, 819], [538], [732], [342], [701], [391, 758], [836, 837, 842, 445], [935], [927], [507], [923], [892], [205], [520], [96], [171], [675, 671], [800], [], [492], [518, 691, 570], [106], [62], [988], [463, 696], [], [], [116], [509], [834], [], [713], [425], [568], [492], [39], [631], [785], [908, 895], [124], [963, 335], [390], [969, 474], [221], [241], [876, 435], [199], [763, 597], [681, 620, 508], [162], [278], [], [940], [672], [193], [311], [814, 977, 978], [275], [952], [416], [136], [], [150], [387], [940], [448], [350], [967], [974], [120], [], [514], [723], [310], [287], [536], [234], [894], [52], [213], [842], [898], [], [810, 878], [608, 836, 837, 841], [193], [256], [923], [391], [617], [269], [663], [545], [], [], [608], [762, 532], [146], [297], [578, 630, 982, 601], [26], [390], [128], [189], [], [880, 414, 671], [914], [383], [17], [50], [512], [], [694], [248, 250], [252], [458], [162], [511], [600], [459], [961], [553, 446], [472], [673, 664, 526, 527, 632, 508], [522], [351], [421], [679], [360], [918], [914], [], [500], [418, 709, 710], [597], [974], [122], [976], [938], [92], [751], [509], [386], [109], [539], [277], [309], [802], [132], [143], [], [865, 968], [773], [962], [447], [], [805], [778], [737], [285], [992], [636], [44], [477], [607], [127], [71], [470], [250], [], [], [518, 665, 671], [771], [841, 610], [273], [694], [997], [366], [230], [72], [245], [238, 241], [653], [313], [879], [62], [911, 533], [390], [843], [18], [160], [988], [965], [812], [350], [813], [930, 934], [1], [689], [786], [351], [934], [403], [676, 597], [45], [456, 777, 623, 787], [367], [274], [568], [299], [679], [563], [327], [650], [829], [679], [633], [996], [644], [422], [131, 134], [719], [], [923], [17], [929], [416], [311], [708], [679], [467, 766], [898, 596], [11], [345, 690], [289], [673, 526, 527, 782, 664, 508], [], [705, 460, 975], [159], [712], [288, 290], [594], [274, 277], [732], [808, 515], [426], [75], [174], [864], [175], [902], [612], [332], [780], [372], [802], [], [809, 923], [507], [299], [228], [718, 821], [763], [858], [867], [789], [38, 44, 463, 26], [654], [], [578, 601, 415], [747], [269], [995], [466], [935], [579], [56], [546, 650, 819], [62], [483], [272], [608], [450], [776], [295], [677], [110], [708, 862], [70], [192], [428], [529], [791], [505], [447], [321], [156], [658], [724], [299], [898, 664, 527, 782, 508], [149], [144], [664, 508], [736], [78], [218], [234], [715], [789], [19], [462, 734], [514], [611], [926], [175, 185], [], [791], [], [116], [703], [440], [569], [626], [728], [581, 874], [404], [37], [405], [], [969], [713], [526, 664, 508], [933], [], [546, 486, 650, 402, 819, 541], [921], [471], [150], [666], [], [258, 270], [458], [419], [233], [746, 455], [403], [489], [227], [952], [533], [854], [132], [17], [23], [814], [114], [], [825], [205], [993], [581], [390], [445], [832], [89], [110], [711], [496], [226, 698], [911], [9], [252], [836, 837, 123], [112], [759], [263], [11], [724], [647], [693], [159], [814, 975], [711, 631], [322], [881], [27], [575], [823], [342], [137], [288], [422, 543], [304], [640], [613], [371], [24], [739], [440, 831, 455, 721, 737], [472, 693], [99], [586], [568], [], [735], [584], [299], [480], [225], [487], [23], [144], [673, 681, 620, 508], [69], [794], [717], [], [544], [447], [476], [735], [887], [484], [78], [835, 858], [251], [634], [363], [397], [685], [809, 925], [283], [543], [935], [643], [513], [521], [30], [31], [412], [957], [], [440], [394, 758], [223], [872, 745, 761], [292], [647], [326], [792], [758], [8, 7], [561], [604], [557], [47], [725], [942], [327], [267], [987, 924], [488], [956], [347], [135], [817], [819], [530, 531, 409, 892], [489], [155], [859], [420], [359], [298], [553], [608], [456], [349], [178], [578, 601], [217], [950], [673, 526, 527, 664, 508], [496], [920], [368], [772], [32], [518, 671], [742], [476], [], [993], [677, 587, 783], [659], [750, 189], [216], [626], [823], [105], [394], [505], [899], [100], [], [663], [94], [301], [551], [482, 754, 761], [], [755], [249], [71], [11], [673, 526, 527, 664, 508], [235], [847], [], [962, 923], [816], [150], [], [257], [161], [819, 601], [551], [424, 423], [892], [512], [723], [298], [884], [908, 404], [812], [119], [171], [968, 849], [265], [346], [853], [745], [313], [911, 824], [921], [763], [913], [600], [260], [973, 991], [884], [49], [625], [651], [203], [136], [307], [40, 46], [634], [488], [248, 250], [853], [820], [288, 293], [802], [601], [100], [679], [], [875], [168], [111], [961], [452, 911], [], [727], [588], [376], [239], [39], [84], [658], [497, 442], [], [24], [320], [755], [890], [236], [875], [559], [5], [438], [205], [796], [929], [547], [99], [431], [88], [847], [369], [25], [565], [674], [31], [], [383], [349], [101], [944], [476], [494], [10], [328], [159], [505], [563], [680], [83], [679], [140], [258], [768], [335], [122], [125], [876], [371], [65], [661], [829], [945], [618, 910], [578, 457, 689, 982, 601], [778], [785], [741], [923], [546, 650, 818, 819], [589], [40], [798], [148], [876], [], [66], [], [551], [], [713, 742], [990], [], [549, 616], [604], [50], [918], [673, 742, 620, 664, 526, 527, 632, 508], [784], [562], [411, 849, 762], [], [], [299], [874], [744, 657], [968], [904], [84], [533], [200, 232], [18], [469], [], [484], [622], [232], [398], [65], [680, 529], [69], [857], [908, 404], [85], [990], [239], [905, 750, 894, 799], [869], [82], [861], [872, 759], [572], [796], [572], [901, 907], [431], [722], [621], [], [252], [987], [487], [9], [480], [117], [310, 314], [681, 620], [10, 15], [977], [124], [91], [818], [827], [232], [964], [], [283], [14], [435], [220], [155], [152], [666], [805], [642, 542], [822], [579, 875], [205], [534], [515], [401], [744, 657], [691], [801], [548, 851], [22], [181], [141], [736], [42], [28], [431], [339], [152], [916], [581, 479], [74], [182], [], [280, 985], [578], [687], [], [262], [162], [299], [386, 101], [451], [869], [673, 664, 526, 527, 632, 508], [844, 539], [431], [297], [65], [369], [45], [486, 889], [568], [940], [], [], [260], [405], [353], [450], [104], [825], [144], [99], [], [93], [956], [27], [452, 911], [157], [62], [344], [493], [225], [195], [181], [538], [], [728], [975, 616], [631], [421], [805], [380], [204], [616, 600], [258], [95], [398], [364], [518], [], [847], [570], [890, 445], [548, 851, 831, 598], [798], [623], [731], [581, 479, 817, 511], [5, 390, 973], [195], [824, 834], [673, 526, 527, 664, 508], [366], [864], [214], [882], [991], [239, 222], [766], [54], [744], [17], [155], [757], [153], [183], [223], [980], [451], [805], [472], [326], [5], [179], [107], [518, 880], [350], [339], [626], [489, 919, 412], [666], [932], [295], [890], [417], [101], [631], [141], [234], [382], [959], [6], [293], [871], [90], [224], [874], [97], [354], [633], [454], [331], [948, 950, 951], [486], [383], [604], [996], [998, 987], [936], [570], [392], [255], [694], [308], [311], [337], [359], [901], [571], [495], [881], [423], [334], [289], [741], [309, 599], [19], [109], [653], [929], [581, 734], [455], [167], [125], [102], [722], [40, 46], [443], [937], [371], [341], [867], [463], [882], [773], [111, 52], [54], [82], [244], [601], [39], [481, 482], [638, 639], [449, 718, 733], [716], [738, 559], [652], [331], [236], [147], [765], [], [932], [110], [654], [669], [29], [420], [69], [69], [524, 461], [918], [251], [], [281, 283], [139], [627, 479], [79], [736], [872, 949], [488, 535], [126], [716], [409], [909, 910], [81], [210], [21], [92], [22], [], [26], [650], [102], [], [404], [33], [], [845], [33, 973], [471], [898, 605], [425], [255], [117], [], [962], [], [198], [246], [421, 506], [683], [877], [66, 68], [756], [196], [821], [603], [626], [136], [181], [752, 852], [533], [95], [488], [549], [531], [282], [358], [703], [152], [749], [696], [89], [564, 750], [392], [914], [18], [457], [867], [602], [825], [705, 547, 733], [190], [281], [661], [27], [488], [941, 923], [445], [864], [945], [], [724], [905], [229], [530], [82], [948], [342], [887], [397], [354], [497], [143], [669], [119], [368], [360], [17], [], [52], [968, 849], [235], [401], [581, 479, 436], [899], [890], [314], [662], [421], [975], [376], [851], [], [234], [543], [], [937], [395, 758], [537], [367], [8, 7], [553], [850], [395], [292], [736], [772], [771], [75], [54, 60], [330], [754], [399], [967, 968, 504], [], [241], [322], [115], [681, 810, 620, 662], [820], [466], [444], [], [278], [477], [673, 526, 527, 664, 508], [582, 998, 987], [148], [], [785], [497], [537], [461], [900], [927], [], [621], [25], [106], [153], [128], [233], [10], [268], [284], [], [397], [], [107], [610, 976], [191], [717], [931], [718], [528], [326], [728, 458], [241], [42, 44], [676], [966, 907], [149], [703], [561], [852], [238], [100], [851, 532, 825], [324], [450], [552], [852], [549, 633], [162, 166, 167], [417, 975], [535], [483], [], [72], [55], [849, 505], [], [581], [619], [809], [912], [], [279], [259], [530], [386, 101], [432], [907], [514, 841, 608, 610, 630, 636], [310], [162, 167], [251, 246], [753], [935], [863], [945], [946, 322], [38], [277], [231], [652], [227], [494], [688], [832], [743], [592], [365], [169], [679], [742], [487], [405], [975], [481, 482], [71], [291], [340], [937, 923], [557], [849], [961], [729], [], [568], [341], [849], [760], [], [260], [254], [759], [934], [847], [418], [931], [846, 526], [952], [943], [], [179], [58], [960], [320], [293], [462], [297], [240, 241], [16], [520], [295], [488], [323], [512, 473], [438], [39], [732], [602], [721], [31], [247], [621], [308], [45], [], [86], [470, 736], [581, 479, 817, 511], [899], [118], [641], [459], [101], [98], [368], [139], [690], [186], [205, 246], [582, 951], [35, 37], [518, 665], [754], [195], [402], [576], [418, 709], [733], [159], [892], [100], [44, 26], [903], [801], [263], [734], [672], [997], [918], [992], [273], [913], [322], [426], [298], [869, 879], [66], [872, 759], [212], [452], [578], [456], [886], [93], [671], [248, 250], [572], [131], [62], [52], [153, 155], [718, 821], [546], [189], [28], [418], [32], [20], [21], [829], [157], [854], [201], [515], [795], [382], [955], [369, 381], [300], [297], [97], [566], [225], [115], [774, 731, 861], [314], [501], [207], [985], [675], [832], [833], [], [479], [287], [700], [536], [336], [392], [], [405], [867], [741], [603], [951, 949, 950, 954, 923], [119], [942], [460], [522], [800], [708], [456], [91], [104], [70], [886], [73, 815], [208], [75], [440], [100], [582], [821], [314], [363], [661], [94], [651], [], [386], [140, 142], [188], [597], [324], [281], [31], [992], [672], [365], [569], [174], [348, 349], [991], [604], [], [158], [629], [], [603], [789], [568, 715, 716], [385], [983], [197], [725], [38], [884], [784, 477], [386], [], [30], [567], [779], [27], [481, 482], [958], [852, 752], [591], [680], [760], [523, 655, 765], [721, 750], [673, 681, 620, 526, 527, 664, 508], [125], [716, 912], [411], [712], [429], [850], [747], [472], [269], [149], [739], [489], [], [886], [278], [706], [614], [134], [678], [833], [680, 910, 659, 828], [267], [900], [662], [403], [147], [847], [65], [169], [], [662], [293], [392], [205, 589], [435], [271], [75], [979], [501], [238], [262], [662], [794], [949], [404], [923], [564], [734], [903], [323], [358, 359], [380], [382], [504], [76], [117], [48], [619, 846], [289], [973], [463], [877], [719], [931], [907, 572], [667], [6, 983], [500], [218], [], [937, 567], [361], [], [432], [636], [374], [], [565], [544, 672, 596], [520], [670, 518], [652], [197], [500], [30], [39], [92], [189, 175], [733], [], [458], [], [737], [295], [810], [506], [652, 413], [424, 423], [117], [428], [455], [95], [458], [659, 666], [305], [521], [239], [775, 842, 616], [355], [908], [755], [72], [293], [330], [907, 966], [764], [707], [627], [952], [614, 887], [789], [183], [347], [441], [], [867], [694], [637], [454], [174], [896, 804], [247], [851], [185], [985], [239], [608, 836, 837, 582], [127], [], [114], [], [552], [10], [683], [70], [], [512], [820], [162, 168], [247], [845], [457], [852], [924], [685], [842, 879, 977, 978, 445, 638, 639], [], [580], [383], [385], [250], [444], [3], [801, 433, 793], [152], [162], [393], [280], [943, 923], [562], [339], [496], [636], [203], [921, 446], [392], [], [682], [57], [179], [262], [586, 652, 413], [578, 903, 689, 885], [], [388], [347], [806, 630], [228], [494], [279], [600], [739], [131], [232], [205], [], [610, 487, 655], [674], [661], [253], [115], [842, 638, 639], [569], [649], [64, 55], [400, 667], [201], [801], [277], [49], [40, 46], [928], [287], [500], [267], [850], [674], [834], [411], [19], [758], [438], [954], [769], [964], [918], [880], [734], [555], [539], [816], [919], [339], [967, 968, 504], [213], [425, 858], [789, 799], [759], [614], [4], [401], [959], [196], [678, 487], [389], [175], [42], [275], [26], [48], [790], [764], [762], [89], [294], [667], [755], [999], [726], [847], [127], [834, 906], [774], [816], [395], [647], [853], [345], [17], [712], [628], [923, 934], [868], [800], [480], [930, 934, 936], [934], [295], [520, 516], [181], [572], [522], [894], [703], [159], [], [924], [883], [435, 794], [757], [196], [867], [22], [729], [430], [165, 852], [137], [724], [63], [324], [127], [], [4], [], [869], [256, 244], [552], [529], [171], [317], [140], [548], [750], [928, 923, 960, 927], [304], [896], [319], [974], [96], [595], [640], [320], [134], [460], [297], [65], [441], [307], [570], [343], [953], [350], [560], [195], [678], [557], [518], [], [645], [367], [576], [852], [147], [207, 219], [497], [238, 241], [748, 414], [145, 146], [235], [393], [32, 30], [559], [966, 441], [], [375], [814, 977], [107], [], [921], [710], [909, 926], [207], [834, 630, 703], [190], [466], [846], [19], [247], [283], [315], [334], [986], [], [832], [11], [57], [673, 526, 527, 782, 664, 282, 508], [513], [284], [69], [292], [], [769, 773], [185], [161, 785], [603], [516, 520, 697], [248], [514], [679, 327], [241, 238], [254], [495], [468], [312], [368], [700], [72, 815], [318], [539], [772], [263], [524], [151], [889], [656, 479], [135], [903, 689], [619, 846], [653], [951], [623], [901], [79], [32, 31], [249], [], [], [336], [377], [162], [], [234], [679], [673, 553, 526, 527, 782, 664, 508], [102], [411], [892], [437], [225], [629], [101], [582, 519, 939, 943], [868], [700], [923, 934, 933], [389], [742], [843], [98], [9], [981], [112, 327], [710], [567], [292], [], [803, 866], [205], [572], [471], [678], [473], [560], [85], [588], [105], [17], [176], [], [524, 461], [887], [488, 718, 536], [644], [555], [], [404], [813, 910, 659], [824], [582], [48], [88], [980], [85], [955], [825], [183], [756], [395], [530], [163], [845], [132], [715], [139], [679], [549], [484], [489], [445, 638], [944], [581], [325], [40, 46], [310], [812], [850], [76], [483], [950], [854], [447], [874], [276], [710], [857], [161], [32, 30, 31], [360], [816], [1], [883], [298], [836, 837, 655, 879], [], [921], [884], [778], [754], [482], [715], [525], [927], [898, 784], [], [725], [122], [699], [403], [24], [76], [191], [761], [534], [238, 240], [526, 527, 782, 664], [], [3], [794], [174], [716], [569], [342], [261], [245], [284], [771], [662], [974], [147], [531], [768], [646], [234], [], [641], [912], [493], [975, 822, 541, 542], [752], [212, 217], [18], [353], [653], [512], [141], [388], [57], [398], [486], [836, 837], [540], [119], [901], [], [974], [357], [913], [158], [554], [105], [634], [703], [625], [299], [66, 68], [915], [115], [591], [251], [459, 445], [378], [814], [908], [188], [674], [980], [430], [940], [672], [102], [628], [323], [464], [718], [721], [15], [], [973, 108], [214], [401], [758], [390], [960], [439], [948, 957], [92], [869, 818], [621], [240, 239], [], [301], [], [25], [679], [664, 851], [319], [463], [972, 500], [332], [411], [909], [510], [524, 461], [91], [734], [], [384], [202], [81], [268], [470, 406], [509], [894], [922], [851, 548], [604], [424, 922], [658], [515, 808, 639], [995], [386, 101], [389], [825], [589], [991], [636], [866], [884], [794], [86], [449], [849], [553], [], [384], [31], [935, 937, 923], [701], [397], [972], [531], [130], [729], [891], [275], [17], [164], [], [563], [205], [575], [774], [828], [532, 762], [], [386], [482], [903], [602], [312], [257], [886], [531], [344], [161], [338], [779], [264], [340], [203], [893], [131], [958], [940], [258], [689], [29], [207], [484], [], [57], [427], [291], [], [717, 656, 436, 479], [781], [24], [513], [947, 997, 114], [720], [480], [364], [499], [47], [], [265], [802], [598], [119], [69], [916], [240, 241], [785], [593], [132], [60], [640], [997], [492], [159], [184], [505], [264], [645], [435, 876], [680], [396], [594], [919], [324, 325], [], [245, 254], [579], [551], [92], [624, 453], [], [438], [853], [826], [297], [669], [], [874], [384], [558], [682], [117], [572], [990], [42], [606], [437], [681, 810, 620], [], [87], [496], [0], [41], [632], [776], [526, 786], [627], [], [728], [562], [539], [385], [73, 815], [47], [259], [59], [48], [120], [669], [528], [322], [37], [458], [281], [950], [912], [781], [673, 613, 761, 605], [256], [214], [896], [], [518], [292], [], [318], [958], [162], [195], [800], [43], [439], [], [628], [428], [952], [41], [453, 454], [184], [123], [956, 957], [498], [935], [324], [651], [525], [704], [457], [844], [754], [], [810], [754], [107], [849], [532, 398], [791], [], [582, 790], [315], [520], [874], [138], [448], [491], [], [223], [748], [112], [213], [242], [137], [260], [523], [182], [305, 306], [868], [365], [700, 999], [636], [128], [268], [252], [694], [241, 238], [246, 251], [98], [426], [522], [948], [514], [371, 373], [487], [56], [329], [502], [456, 652], [139], [520], [833], [724], [318], [499], [148], [108], [384, 383], [305], [36], [234], [437], [73], [125], [315], [132], [373], [67], [615], [361], [919], [667, 151], [870], [90], [901], [331], [955], [766], [], [], [962, 923, 935], [136], [43], [37], [33], [178], [244], [886], [], [714], [727], [704], [435, 876], [221], [543], [126], [384], [325], [91], [498], [291], [335], [603], [], [201], [510], [666], [649], [483], [948], [232], [551], [198], [751, 479], [763], [513], [], [259], [560], [473], [877], [421, 904, 905], [], [783], [969], [468], [5], [811], [316], [35], [617], [907, 440], [408], [533], [687], [641], [963], [], [209], [782, 664], [213, 248], [899], [672], [645], [987, 998], [270], [679], [779], [849], [330], [618, 909, 828], [973], [614], [], [828], [495], [69], [624, 453, 454], [753], [623], [547], [362], [848, 632], [428], [708, 458], [893], [936], [699], [702], [228], [626], [868, 967, 968, 504], [], [], [871], [897], [481, 482], [379], [633], [904], [455], [287], [262], [432], [308], [985], [489, 733, 919], [49], [989], [316], [117], [471], [519, 907, 440], [544, 909, 828], [917, 921], [172], [697], [267], [], [140], [587, 596], [443, 836, 837], [921], [], [57], [350], [838, 631], [568], [59], [205], [310], [242], [309], [535], [518], [323], [325], [263], [20], [582, 938], [240], [279], [219], [191], [126], [466], [683, 558], [603], [988], [551], [458, 708], [131], [210], [650], [322], [783], [477], [219], [981], [2], [196], [427], [628], [304], [], [], [113], [419], [746], [34], [836, 837, 656, 785], [48], [767], [630], [892], [882], [457], [], [386, 101], [60], [5], [72], [900], [614], [717, 479], [749], [813], [563], [440], [169], [385, 101], [514, 948, 836, 837, 852, 489, 636], [388], [100], [379], [456], [400, 667], [999, 893], [809, 925], [351], [834, 906], [224, 223], [206, 221], [660], [688], [], [209], [396], [549], [], [721], [942, 923], [352], [578], [769, 71], [805], [599], [585], [639], [767], [849, 505], [581, 479], [841, 610], [313], [746], [834, 585], [839], [468], [53], [418, 709, 710, 767], [929, 968], [], [62], [783], [528], [132], [194], [137], [337], [941], [544], [594], [400, 667], [712], [], [250], [824], [], [72], [327], [714], [380], [], [577], [371], [900], [93], [111], [223], [486], [749], [637], [428], [98], [211], [], [695], [815], [890], [15], [868, 505], [891], [73], [617, 823], [], [941], [301], [681, 620], [44, 48], [668], [923], [607], [684], [322], [980], [928], [892], [572], [888], [76], [873], [518], [], [400], [136], [114], [310], [911], [599], [806], [257], [516, 431], [27], [965], [], [87], [660], [24], [540], [462], [340], [48], [], [7], [774], [624, 453], [322], [177, 170], [46], [605], [931], [], [323], [478], [148], [738], [217], [603], [908, 895], [262], [796], [517, 733], [9], [458], [997], [781], [92], [531], [], [543], [15], [548], [759], [343], [530], [335], [385], [395], [136], [105], [8], [892], [839, 718], [860], [507, 695], [664], [467], [706], [938], [735], [8, 7], [651], [72], [279], [], [921, 917], [295], [191], [996], [848, 632], [263], [609, 500], [728, 281], [280], [277], [378], [52], [588, 692, 415], [], [949], [73], [283], [182], [601], [306], [395], [40, 46], [122], [425], [862], [777], [195], [99], [539], [604], [329], [976, 979], [852], [], [659], [126], [289], [654], [551], [611], [579, 881], [563], [728], [315], [792], [931], [114], [902], [372], [424, 423], [205, 653], [149], [792], [121], [540], [], [274], [514], [128], [399], [903], [], [646], [883], [942], [734], [669], [2], [588], [615], [966], [935], [837, 518, 671], [559], [778], [], [677], [237], [645], [59], [802, 518], [251, 575], [911], [352], [619], [510], [571], [357], [330], [347], [389, 395], [300], [414], [449, 975], [85], [597], [207], [938], [11], [911], [933], [173], [823], [440], [194], [315], [308], [470], [53], [52], [491], [909, 926], [136], [118], [658], [608, 869, 824], [945], [918], [96], [242], [392], [426], [168], [989], [348, 825], [39], [638, 639], [], [350], [746], [50], [84], [650], [274], [991], [599], [295], [], [417], [317], [253], [645], [193, 187], [452, 911], [825], [406], [651, 187], [19], [932], [27], [494], [834, 906, 982], [316], [479, 511], [123], [72], [106], [514], [361], [160], [539], [54], [682, 538], [219], [162], [218], [811], [109], [312, 311], [], [452], [793], [], [488, 843], [299], [350], [289], [203], [834, 906], [572], [707], [351], [855], [866], [996], [159], [840], [948], [690], [592], [801, 570], [165], [18], [696], [655], [480], [580], [694], [707], [715], [340], [736, 681, 620], [727], [422], [208], [], [632], [142], [733], [62], [], [4], [288], [791], [838, 711], [650], [348], [790], [625], [272], [669], [208], [], [58], [525], [193], [378], [798], [565], [781], [344], [61], [293], [13], [782, 851], [239], [409], [794], [938], [944], [865], [388], [202, 189], [322], [194], [430], [371], [172], [546, 650, 819], [2, 3], [912], [394], [622, 759], [312], [945], [801, 983], [903, 789], [985], [162], [397], [769, 622], [], [331], [574], [141], [649], [567], [], [718, 637], [61], [85], [487], [578, 854], [57], [328], [583], [863], [566], [375], [], [], [240], [990], [277, 278], [625], [961], [], [969], [503, 572], [949], [508], [866], [381], [560], [27], [72], [338], [466], [375], [227], [352], [603], [137], [265], [754], [892], [589], [573], [286], [325], [754], [539], [613], [987, 923], [787], [959], [63], [192], [83], [507], [489, 86], [869], [738, 580], [385], [196], [924], [], [610], [], [414], [536, 484, 871], [538], [168, 211, 159], [143], [132], [961, 659], [858], [371], [292], [2, 3], [902], [301], [544], [100], [675], [335], [92], [309], [737], [418], [522], [907], [842], [769], [232], [482], [70], [866], [576], [732], [480], [146], [324], [574], [699], [82], [540], [591], [479], [409, 892], [], [940], [127], [29], [255], [], [282], [655, 570], [313], [30], [963], [], [213, 205], [661], [13], [741], [93], [366], [974], [329], [387], [790], [435], [328], [911, 533, 539], [564], [263], [183], [944], [98], [578, 982], [952], [656, 784, 477], [353], [786], [372], [], [142], [369], [14], [284], [540], [], [243], [976], [224, 805, 223], [], [78], [420], [687], [55], [285], [736], [203, 246], [521], [911], [320], [769], [132], [258], [891], [650], [809, 618], [514, 763, 445], [905, 493], [295], [230, 231], [74], [], [877], [770], [267], [40, 46], [521], [889], [80], [140, 142], [817], [268], [129], [69], [459, 445], [24], [176], [487], [714], [], [576], [135], [517], [929], [599], [347], [117], [802], [732], [868], [49, 50], [788], [85], [741], [642], [239], [471], [443], [481], [232], [153], [790], [826], [], [], [83], [937], [], [750], [533], [581], [], [560], [619, 844, 846, 761], [98], [514, 515], [752], [825], [75], [493], [371], [29], [328], [234], [738], [32], [644, 470], [630], [786], [354], [407], [33], [239], [], [79], [368], [166], [836, 837], [626], [916], [322], [733, 862], [205], [622, 179, 245], [781], [659, 923, 925, 809, 950], [], [190], [595], [369], [858], [861], [432], [517], [51], [836, 837, 975], [159], [682], [985], [578, 601], [645], [301], [444, 671], [326], [344], [943, 923], [], [147], [856], [597], [508], [261], [187], [722], [344], [451], [], [311, 312], [121], [535], [315], [891], [432], [448], [], [362], [209], [494], [488], [135], [217], [442], [176], [94], [276], [107], [], [793], [826], [], [880], [914], [14], [62], [567, 827], [], [828], [907], [275], [937], [851], [933], [73], [930, 415], [531], [920], [167], [422], [482], [721], [406], [], [774], [426], [438], [967, 968, 911, 504], [578, 834, 982], [382], [858], [112], [340], [169], [891], [146], [162, 167], [273], [716], [227], [662, 632, 761], [], [642], [2], [532], [638, 639], [561], [347], [400, 667], [731], [175], [582, 728], [908, 404], [67, 54], [9, 489], [805], [], [627, 654], [749], [138], [652, 465, 792, 413], [577], [180], [205], [185], [437], [302], [886], [368], [], [439], [771], [], [93], [187], [], [15], [554], [324], [], [274], [721], [883], [28], [233], [544, 909, 827], [766], [44], [320], [247], [500, 286], [], [355], [779], [681, 620, 526, 508], [453], [897], [148], [478], [658], [825], [984], [11], [399], [823], [140], [127], [309], [763, 597], [898], [675], [61], [210], [194], [997], [339], [], [962], [374], [801, 836, 445], [986], [871], [109], [619], [115], [116], [452], [751], [205], [896, 804], [382], [998], [506, 117], [656], [464], [779], [784], [289], [905, 619, 846, 831], [309, 599], [394], [10], [824, 735], [900], [683], [780, 976, 914, 405], [], [711], [371], [643], [205], [534], [290], [582], [115], [379], [221], [951], [820], [], [224], [879, 977], [159], [608, 999, 861], [523], [636], [717], [324], [759], [944], [365], [955], [996], [613], [34], [866], [579, 421], [270], [953], [538], [437], [163], [571], [], [822, 542], [86], [], [574], [681, 526, 664, 761], [608, 515, 788], [338, 333], [93], [522], [946], [560], [652, 872], [542], [944], [936], [422], [], [319], [183], [996], [157], [28], [515], [85], [187], [181], [257], [696], [106], [203], [871], [554], [19], [902], [782, 664], [901], [741], [179], [22, 23], [508], [597], [767], [389], [616], [559], [860], [510], [345], [904], [107], [481], [410], [], [588], [], [], [987, 998], [], [213], [84], [647, 968, 809, 659], [63], [368], [], [227], [700], [72], [145], [876, 435], [130], [779], [], [702], [], [489, 85], [364], [719], [658], [933], [76], [943, 692, 963, 868], [951], [172], [837, 454], [276], [622], [453, 454, 553, 917], [164], [839, 660], [271], [301], [509], [591], [13], [444], [144, 540], [801], [157], [576], [788], [18], [397], [863], [842], [196], [731], [854], [800], [153], [487], [561], [394], [460], [], [], [393, 108], [825], [442], [830, 691], [980], [140], [405], [564], [695], [191], [], [332], [13], [], [93], [234, 236], [555, 734], [169], [573], [854], [805], [405], [], [602], [256], [261], [999], [778], [879], [880, 879], [985], [262], [252], [516], [630], [31], [31], [66], [66], [202], [333], [650], [254], [428], [129], [257], [749], [79], [816], [376], [367], [344], [55], [440], [618, 813, 909, 827], [991], [74, 815], [772], [159], [712], [870], [581, 479, 511], [491], [987], [363], [336], [537], [231], [604], [862], [300], [529], [30], [948], [651], [9], [845], [673, 526, 527, 664, 508], [352, 351], [74], [234], [905, 831], [707], [441], [565], [764], [58], [291], [6], [671, 518, 535], [477], [385], [683], [44], [833], [21], [87], [], [55], [194], [713], [194], [83], [452], [830], [590], [643], [845], [613], [288, 290], [221], [362], [939], [], [882], [682], [582, 950, 951], [225], [326], [414], [158], [65], [181], [], [375], [], [710], [6], [313], [256], [673, 613, 681, 620, 526, 527, 662, 632, 508], [419], [98], [780], [805], [898], [52], [836, 837, 552, 459], [961], [97], [995], [574], [576], [304], [664, 782, 527], [559], [185], [687], [352], [81], [581], [173], [836, 837, 850], [584], [473], [896, 567], [306], [574], [], [900], [], [168], [114], [424], [], [34], [], [795], [661, 479], [], [113], [783], [], [911, 533, 539], [468], [834, 650, 851], [739], [104], [480], [781], [988], [518], [], [981], [952], [450], [446], [], [703], [370], [188], [505, 827], [844], [984], [362], [], [532], [82], [748], [497], [532], [], [677], [569], [257], [246], [349], [862], [372], [645, 733], [247], [], [907, 720], [379], [287], [65, 395], [524, 461, 728], [], [250], [847], [], [301], [851], [], [187], [844], [535], [335], [398], [323], [453], [528], [520], [948], [222], [305], [230], [157], [281, 282], [], [351], [35], [112], [673, 527, 761, 664, 633], [682], [943], [715, 524, 461], [896], [], [861], [422], [628], [217], [922], [12], [321], [777], [87], [768], [126], [284], [65], [139], [31], [497, 557], [307], [619, 818], [745], [706], [688], [915], [279], [130], [822], [609], [], [552], [567, 926], [959], [716], [300], [916], [920], [622], [145], [977, 978], [], [272], [892], [506], [125], [615], [872], [702], [272], [466], [758], [738, 580, 428], [658, 911], [923], [387], [863], [556], [202], [991], [485, 632], [886], [87], [565], [801], [162], [390], [360], [161], [144], [], [875], [771], [457], [836, 837, 785], [153], [433], [481, 482], [834], [96], [462], [21], [471], [773], [440, 455], [231], [88], [684], [572], [576], [379], [984], [484], [7], [407], [787], [231], [941], [592], [919], [581, 654], [657], [957], [881], [258], [337], [111], [999, 861], [930], [104], [542], [497], [673, 664, 526, 527, 508], [23], [251], [917], [862], [64], [526, 664, 508], [404], [160], [123], [381], [], [843, 702], [971], [289], [799], [753], [711], [303], [480], [72, 815], [215], [581], [887], [748], [453], [786], [273, 274], [], [985], [], [807], [970, 980], [2], [426], [720], [99], [628], [], [635], [781], [612, 879], [547], [51], [14], [570], [152], [308], [908, 404], [386], [763], [955], [], [157], [68], [], [194], [495], [232], [927], [495], [577], [829], [269], [956], [680], [236], [], [49], [682], [138], [], [884], [722], [361], [255], [530, 844], [273], [958], [357], [206], [741], [785], [535], [372], [391], [355], [289], [912], [493], [851], [195], [4], [622], [808], [855], [564], [394], [485], [301], [713], [763, 597], [379], [265], [183], [166, 958], [787, 501], [113], [29], [477], [240, 241, 238], [203], [907, 910, 532, 923, 924, 936, 966, 762], [130], [548, 782, 851, 598, 664, 889], [434], [939, 943], [717], [955], [76], [735], [561], [222], [636], [146], [48], [714, 539], [994], [415], [860], [856], [659], [421, 882], [114], [628, 536], [475], [], [], [683], [284], [288], [372], [515], [599], [384], [990], [288], [19], [58], [514, 836, 837, 703], [], [884], [930], [98], [486], [370], [231], [977], [840], [973], [277], [380], [676], [41], [934], [], [646], [569], [310], [971], [390], [710], [791], [597, 763], [], [378], [186], [654], [496], [431], [376], [834, 457], [588, 285], [], [691, 638, 639], [704], [82], [576], [850], [779], [353], [319], [542], [954, 950], [123], [636], [699, 541], [617], [678], [443], [58], [666], [77], [106], [460, 557, 718, 814], [734], [955], [561], [426], [947], [294], [414, 703, 841, 608], [944], [471], [111], [155], [286], [724], [893], [538], [641], [423, 424], [430], [], [680], [373], [304], [450], [58], [602], [637], [174], [800], [23], [722], [289], [756], [448, 853], [287], [683], [644], [463], [977, 978], [881], [300], [524, 461], [855], [], [111], [514, 792], [651, 700], [], [320], [485, 848], [621], [577], [405], [988], [938], [481], [880], [], [603], [33], [673, 742, 664, 526, 527, 782, 632, 508], [90], [], [714], [221], [708], [], [70], [512], [814], [281], [], [993], [218], [490], [347], [164], [957], [968, 918], [565], [595, 958], [815], [884, 406], [608, 610, 836, 837], [86], [945], [903], [671], [535], [398], [781], [239], [756], [768], [854], [455], [106], [387], [983], [383], [274], [682], [], [908, 404], [581, 479, 817], [913], [507], [771], [675, 478], [172], [672], [91], [154], [98], [948], [565], [728], [298], [268], [335], [434, 435], [223], [85, 86], [850, 791], [682], [256], [416], [292], [968], [376], [581], [], [781], [], [], [776], [810, 878], [162], [], [812], [913], [957], [970], [297], [615], [425], [2], [321], [190], [770], [602], [440], [394], [88], [144, 127], [604], [], [462], [9], [654], [155], [], [481, 482], [591], [574], [274], [329], [968, 618], [123], [905], [319], [546], [296], [623], [173, 176], [868, 923, 659, 532], [375], [452], [394], [525], [358, 359], [], [73, 77], [497], [998], [], [418], [105], [647], [437], [218], [636], [559], [300], [762], [620, 594], [], [459, 445, 638], [868, 438], [985], [906], [948, 572, 849], [189], [749], [720], [241], [692, 948, 950, 951], [269], [478], [462], [437], [45], [896, 435, 861], [635], [733], [670], [], [206], [455], [329], [678], [19], [547], [419], [], [724], [212], [852], [20], [661], [989], [919], [369], [626], [650, 818, 819, 632], [101], [422], [116], [691], [496], [907, 760], [314], [122, 123], [983], [89], [892], [494], [371], [769, 911], [414, 455, 631], [542], [], [], [254], [85], [796], [973, 991], [777], [45], [31], [380], [458], [337], [950], [770], [498], [762], [979, 972], [242, 243], [952], [], [658], [328], [901], [200], [634], [414], [292], [776], [868], [357], [742, 728], [134], [85], [553], [198], [729], [28], [88], [314], [160], [], [799], [510], [913], [707], [205], [204, 155], [553, 621, 882], [152], [349], [619, 846], [56], [667, 263], [801, 983], [653], [269], [429, 981], [42], [448], [], [853], [444], [776], [847], [870], [159], [], [494], [66], [148], [], [162], [507], [31], [670], [811], [257], [198], [863], [958], [776], [], [321], [986], [322], [940], [712], [825], [518], [501, 568], [195], [287], [340], [796], [836, 837, 775, 759, 445], [551], [162, 167], [819], [424], [489], [32], [793], [37], [236], [710, 767], [777], [591], [], [433], [459], [886], [380], [834, 982], [532], [434, 631], [878], [308, 309], [194], [586, 847], [284], [418], [33], [234], [647], [834, 570], [105], [99], [146], [122], [], [97], [429], [], [539], [996], [216], [811], [894], [77], [749], [66], [524, 461], [], [650], [668], [790], [93], [179], [313], [889], [524], [713], [489, 381], [843], [343], [272], [412], [16], [679], [777], [94], [680], [290], [279], [], [719], [], [117], [270], [693, 472, 445], [], [8], [841], [822], [523], [102], [712], [467], [343], [838], [602], [478], [86], [461, 465], [426], [626], [742], [752, 852], [786], [92], [288], [189], [908], [425], [192], [27], [257], [986], [836, 837, 793], [460], [311], [870], [115], [579], [29], [], [810, 878], [911], [2], [877], [189], [321], [347], [], [321], [854], [459], [205], [670], [911], [681, 810, 620, 508], [599], [943], [931], [985], [425], [], [191], [7], [23], [800], [876], [813], [231, 232], [831], [967, 504], [716, 573], [961], [277], [241], [900], [225], [378], [922, 441, 762], [587], [240, 241], [820], [236], [312, 311], [149], [518, 671], [896], [594], [962, 923], [949], [256, 220], [868], [], [874], [159], [936], [226], [782, 851], [849], [110], [373], [989], [543], [533, 824, 735], [181], [436], [], [488, 679], [381], [10], [245], [269], [81], [995], [968], [359], [904, 905, 831], [789], [647], [303], [23], [609], [650, 906, 834, 632], [340], [458], [861], [296], [193], [2], [581, 479, 717], [170], [768], [361], [917], [612], [901, 427], [979], [125], [90], [390], [346], [881], [98], [547], [974], [234], [188], [35], [298], [369], [683, 432], [771], [757], [436], [778, 943], [910, 659], [697], [236, 237], [500], [49], [979], [524, 461], [489, 429, 981], [653], [381], [400, 667], [434], [590], [], [904, 309], [107], [457, 869], [805], [661], [324], [217], [441, 572], [914, 780], [174], [759], [64, 55], [88], [], [605], [188, 189], [], [727], [198], [190], [497], [236], [310], [675], [42], [723], [187, 201], [944], [895], [809], [722], [143], [400, 667], [810, 620, 526, 508], [44], [221], [365], [930, 588], [346], [836, 837], [276], [925], [811, 753], [381], [40], [121], [908, 895], [732], [470], [763, 597], [816], [997, 947], [365], [122], [152], [611], [517, 733], [136], [673, 526, 527, 664, 508], [123], [819], [879], [], [13], [], [711], [845], [208], [96, 489], [110], [533], [950], [518, 671], [564], [219], [729], [156], [296], [913], [435], [195], [487], [704], [23], [109, 973], [47], [48], [748, 893], [48], [276], [487], [830], [49, 50], [307], [888], [449, 853], [], [40], [984], [272], [370], [196], [790], [489, 59], [76], [911, 658], [73], [727], [672], [851], [981], [883, 942], [], [336], [], [861], [444], [540], [927], [352], [375], [78], [902], [], [688], [546, 650, 402, 818, 819], [504, 850], [343], [480, 608, 539, 799], [166], [857], [495], [993], [], [40, 41, 44, 46], [70], [738], [], [632], [752, 852], [], [192], [179], [466], [670, 518], [732], [], [262], [45], [255], [513, 650], [676, 488], [19], [389], [223], [167], [659], [179], [346], [883], [459, 445], [98], [425], [], [354], [483], [], [279], [843], [735, 223], [783], [191], [], [820], [548, 664, 851, 632], [225, 235], [437], [162], [275], [617, 501], [312], [766], [105], [109], [697, 470], [334], [585], [], [513], [518, 429], [547], [54], [612], [574], [765], [391], [496], [831], [872], [0, 391, 758], [841], [], [922], [134], [355], [325], [523], [0], [893], [605], [], [759], [244], [933], [465], [514, 788], [49], [189], [894], [358, 359], [233], [], [501], [851], [702], [808], [507], [515, 451], [703, 578, 601], [816], [640], [390], [82], [774], [230], [599], [293], [120], [787], [830, 836, 837, 610], [948], [45], [323], [842], [19], [978], [904], [481, 482], [945], [866], [], [899], [232, 231], [756], [467], [], [757], [444], [502], [603], [18], [265], [671], [767, 692], [183], [729], [246], [959], [442], [997, 947], [473], [357], [439], [695], [197], [272, 62, 67], [450], [302, 314], [14], [207], [257], [627], [673, 526, 527, 782, 664, 508], [181], [573], [520], [257], [], [62], [698, 538], [565], [371], [], [52], [351], [94], [774, 608, 610], [995], [149], [340], [963], [975, 979], [489, 429], [], [336], [256], [790], [305], [900], [], [130], [617], [2], [299], [191], [985, 309], [656], [], [972], [489, 429, 981], [928], [980], [560], [580], [98], [789], [473], [987, 998], [651, 655], [305], [739], [614], [430], [402], [42], [659], [631], [588, 850], [722], [828], [3], [], [107], [786], [616], [993], [949], [851], [84], [922], [616], [988], [682], [], [769], [595], [914], [433], [], [370], [535], [757], [240, 241, 238], [938], [], [983], [842, 433, 445], [640], [], [834, 906, 630], [331], [920], [859], [825], [529], [875], [132], [62], [714], [571], [536, 403], [334], [], [], [37], [983], [845], [807, 561], [376], [382], [606], [560], [], [7], [], [315], [98], [673, 526, 527, 664, 508], [913], [711], [76], [], [550], [117], [224], [3], [], [197], [405], [771], [584], [623, 563], [], [317], [557], [987, 998], [566], [237], [421], [248], [0], [514], [916], [], [384], [793], [554], [593], [480], [433, 639], [24], [977], [422], [165], [316], [11], [608, 836, 837, 869, 464], [777, 490, 461, 464], [489], [385], [616], [271], [552, 619, 493, 846], [744, 657], [742], [812], [480], [286], [325], [549], [38], [299], [677], [491], [269], [528], [112], [286], [265], [440], [314], [513], [384], [608], [], [], [149], [342], [726], [403, 895], [457], [331, 478], [230, 222], [944], [362], [619], [581], [33], [783], [42], [352], [424], [444], [385, 386], [109], [802], [409, 892], [509], [923, 926], [955], [59], [], [108], [], [491], [752], [90], [835], [], [498], [174], [52], [127], [695], [449], [779], [601], [887, 884, 406], [121], [678], [44], [916], [38], [702], [937], [868], [391], [], [811], [470], [677], [619, 846], [9], [743], [809, 926], [0, 515, 853], [636], [], [337], [4], [630], [472], [910], [741], [98], [138], [545], [302], [132], [680], [870], [76], [384], [8], [], [651], [], [192], [644], [521, 659, 950], [627], [73, 74, 815], [897], [49], [632], [299], [653], [283], [670], [770, 478], [929], [715], [382], [37], [6], [542], [713], [335], [441], [577], [146], [968], [10], [856], [], [593, 541], [140], [3, 6], [940, 943, 948], [], [850, 765], [726], [70], [575], [681, 841, 620], [763, 413], [], [789], [57], [125], [128], [268], [307], [710], [701], [210], [354], [313, 315], [962, 923], [658], [724], [718], [], [288, 290], [3], [], [942], [451], [528], [398], [109, 973], [70], [421], [624], [367], [], [981, 429], [607], [64], [22], [471], [164], [6], [225], [], [908, 404], [605], [423], [], [], [18], [313], [640], [55], [642], [243], [37], [483], [800], [736], [351], [492], [843], [771], [169], [111], [895], [653], [18], [145], [], [483], [578, 885], [656], [575, 479], [130], [319], [342], [0], [], [297, 295], [608], [193], [810], [486], [], [], [840], [653], [467, 499], [1], [524], [971], [835], [288], [894], [85], [155], [763], [168], [608, 728], [174], [241], [623], [448], [0], [484], [966], [550], [], [720], [650, 818, 819], [540], [480], [946], [], [985], [707], [], [835], [325], [603], [21], [719], [122], [443], [117], [654], [876, 435], [259], [340], [847], [659], [305], [976], [185], [84], [311], [37], [771], [265, 266], [518], [149], [], [418], [363], [123], [642], [618], [559], [280], [228], [882], [558], [464, 608, 610], [666], [586], [147], [907, 671], [3], [242], [552], [640], [744, 657], [18], [629], [890], [921, 917], [768], [], [988], [613], [438], [560], [305], [236], [920], [78], [936], [769, 77], [579], [711], [768], [17], [383], [9], [628], [215], [528], [869, 742, 526, 655, 630], [346], [], [389], [275], [584], [383], [479, 817], [517], [604], [780], [677, 587], [485, 685], [319], [980], [152, 155], [567], [726], [761], [673, 681, 526, 527, 664, 508], [326], [489, 747], [744, 657], [544, 964, 926], [940], [247], [740, 477], [], [121], [699], [387], [548, 613, 664, 526, 527, 851], [606], [320, 985], [778], [911], [546, 650, 819], [460], [363], [492], [225, 419], [878], [305], [], [175], [130], [428], [776, 439], [314], [], [650, 822, 542], [471], [281], [2], [193], [475], [112], [141], [873], [350], [574], [745], [286], [655], [137], [310], [766], [974], [680], [539], [913], [342], [941], [256], [881], [396], [645], [180], [86], [], [597], [805, 205], [870], [336], [238], [789], [618], [750], [395], [422], [308], [518], [], [517], [607], [941], [749], [546, 402], [964, 987], [210], [903], [], [380], [224], [717], [693], [342, 343], [381], [910], [273], [581, 479, 436, 511], [241], [1], [632], [113], [581, 479, 817, 511], [836, 837], [], [617], [325], [334], [514, 876, 435], [636], [67], [138], [514, 515, 655, 958], [985], [960], [857], [730], [263], [643], [672], [], [964], [954], [838], [681, 620], [773], [807], [37], [417], [380], [866], [666, 924], [316], [12], [102], [], [652], [968, 504], [592], [915, 853], [834, 630], [759], [578, 894], [822], [68], [428], [681, 620], [629], [8, 792, 958], [158], [827], [789], [149], [21], [257, 258], [618], [432, 683], [900], [183], [452, 850, 610], [144], [126], [852], [795], [168], [67, 68], [292], [410], [100], [825], [660], [309], [195], [467, 125], [330], [479, 436], [387], [481], [495], [566], [480], [704], [], [777], [314], [178], [750, 564], [748], [655], [758], [492], [412], [356], [762], [548], [147], [122], [153], [9], [151], [832], [10], [12], [739], [47], [355], [914], [398], [725], [182], [284], [87], [309], [610, 758], [839], [344], [199], [573], [672], [854], [101], [783, 535], [69], [306], [], [75], [948], [610, 836, 837], [291], [948], [49, 679], [820], [400], [462], [19], [232], [267], [922], [171], [133], [744, 657], [361], [815], [259], [28], [372], [937], [234], [847], [474], [649], [779], [731], [694], [950, 951], [920], [115], [6], [777, 764], [150], [224, 235], [419], [834, 906], [587], [713], [384], [807], [615, 890], [979], [159], [593, 650], [735], [37], [244], [119], [551], [896], [861], [326], [190], [376], [], [905, 846, 721, 831], [146], [360], [485, 848, 851, 632], [594], [127], [694], [152], [48], [495], [21], [960, 470], [758], [203], [], [], [988], [152], [559], [829], [704], [646], [], [294], [809, 923], [50], [68], [937], [569], [521], [58], [768], [73, 74], [515], [694], [814, 977, 978], [], [974], [775], [727], [242], [644], [603], [75], [835], [], [345], [49], [92], [459], [137], [294], [647, 967, 606], [968], [204], [879], [831], [471], [643, 881], [112], [967], [328], [781], [338], [61], [856], [578, 982], [976, 972], [41, 44, 26], [118, 119], [939, 943], [149, 150], [898], [503], [231], [940], [615], [431], [696], [880], [596], [833], [916], [768], [], [951], [162, 230], [996], [185], [911, 658], [103], [668], [821], [495], [509], [158], [560], [876, 912, 435], [937], [705, 825], [999], [930], [67], [938], [479], [585], [756], [621], [923, 499], [690, 345], [681, 620], [773], [869], [869], [311], [56], [], [673, 742, 664, 526, 527, 782, 632, 508], [739], [969], [867], [208], [862], [804], [840], [54], [495, 725], [685], [474], [585, 655], [761], [150], [12], [451], [477], [47], [494], [857], [719], [972], [742, 713, 664, 526], [535], [766], [847], [956], [825], [794], [934, 478], [665], [840], [751], [822], [581, 479], [], [858], [401], [272], [169], [977, 978], [870], [107], [853], [785], [204], [942], [17], [177], [492], [608, 903, 841], [444], [502], [723, 549], [927], [336], [], [444], [133], [191], [95], [596], [924], [947], [816, 911], [166], [981, 429], [777], [90], [537], [680], [820], [209], [139], [953], [696], [205], [674], [926], [], [171, 237], [770, 788], [70], [452], [560], [94], [715], [597, 413, 671], [32], [574], [252], [27], [814, 693], [871], [299], [907, 440], [122], [243], [526], [186], [578, 834, 523, 906, 630], [28], [882], [846], [980], [301], [111], [495], [320], [70, 985], [778], [], [561], [780, 975], [501], [837, 670], [723], [391], [93], [243], [47], [233], [63], [650], [991], [805], [866, 730], [427], [192], [360], [15], [273], [575], [407], [916], [74], [], [354], [828], [451], [236], [758], [170], [], [825], [419], [79], [97], [779], [626], [820], [108], [932], [655], [703], [920], [503], [557], [988], [804], [937], [314], [431], [773], [138], [945], [507], [599], [], [], [896], [970, 979], [919], [840], [474], [637], [120], [489, 791], [721], [313], [380], [577], [809], [980], [698], [780], [796, 837, 836], [992], [610, 697], [938, 942, 943], [967, 504], [34], [258], [917], [187], [87], [175], [68], [60], [19], [403], [], [713], [867], [647], [140, 94], [865, 692], [], [376], [261], [787], [217], [440], [761], [102], [901], [], [763], [131], [952], [386], [128], [200], [955], [522], [268], [690, 345], [741], [43], [704, 581, 919], [770], [250], [235, 465], [], [851], [794], [926], [628], [744, 657], [546], [], [977], [130], [373], [940], [872], [258], [997, 623, 696], [119], [840], [458], [835, 855], [520], [], [812], [680], [142], [128, 144], [101], [], [329], [398], [50], [636], [335], [509], [693], [199], [242], [807], [], [610, 770, 862, 733], [592], [337], [386, 101], [110], [], [123], [22], [254], [91], [], [999], [679], [695], [769, 418, 709, 600], [44], [469], [896, 999, 861], [49], [331], [169], [887], [737], [349], [348], [220], [581, 479], [846], [608, 806], [27], [568], [], [281], [312], [101], [90], [39], [494], [44], [189], [746], [662], [63], [282], [292], [], [286], [166, 167], [621], [890], [581, 479], [196, 198], [89], [513], [281], [355], [96], [719], [417], [952], [670], [255], [928, 960], [651, 760], [551, 629, 696], [119], [688], [], [301], [992], [738], [450], [726], [501], [723], [255], [177], [703], [293], [114], [842, 529, 562], [364], [810, 508], [867, 675], [996], [344], [649], [312, 311], [525], [321], [321], [385], [325], [455], [621], [933], [146], [150], [566], [], [826], [], [584], [564], [936], [970], [342], [85], [], [], [871], [279], [108], [28], [808], [932], [232, 231], [457], [344], [740], [926], [421, 765], [249], [670, 415], [526, 400], [431], [], [967, 968, 923], [383], [363], [176], [363], [164], [235], [651], [392, 393, 108, 973], [318], [95], [615], [574], [367], [73], [512], [863], [301], [308], [766], [531], [891], [879], [166], [333], [207], [400, 667], [589], [363], [], [204], [872], [959], [231], [574], [344], [398], [132], [13], [517], [986], [836, 837], [139], [], [462], [127], [513, 875], [549, 968, 504], [22], [894], [813, 659], [549], [682], [526, 787], [770, 605], [436, 733], [288], [459, 445, 638], [228, 265], [51], [367], [561], [308], [868, 954], [928, 927], [296], [401, 881], [67], [749], [297, 295], [7], [722], [216], [681, 810, 620, 508], [645], [548], [306], [79], [662], [722], [430], [756], [638], [378], [760], [507], [844], [923], [840], [666], [900], [618, 926], [221], [], [510], [], [928], [974], [643, 692, 478], [807], [50], [950], [923], [60, 68], [861], [398], [646], [144], [146], [728], [690, 345], [204], [206], [370], [960], [983], [945], [371], [329], [67], [817, 511, 479], [968, 504], [33], [], [676], [513], [155], [373], [198], [820], [134], [770], [588], [362], [64], [847], [474], [866], [581, 817, 479], [10], [255], [512], [933], [430], [631], [108], [367], [317], [603], [999], [621], [484, 871], [915], [983], [375], [186], [195], [287], [340], [56], [975, 497], [560], [295], [987], [206], [861], [770], [969, 659], [292], [506], [188], [784], [397], [300], [815], [349, 350], [419], [174], [573], [6], [926], [897], [], [895], [678], [520, 516], [701], [899, 849], [167], [866, 730], [652, 764, 413], [644], [137], [874], [494], [557, 602, 733], [721], [636], [923, 960], [128], [817, 511], [611], [869], [31], [409], [297, 295], [997], [562], [521], [897], [499], [452], [492], [388], [], [], [449], [394], [173], [920], [285], [584], [813, 910], [891], [711], [144], [222], [813, 909, 910], [351], [633], [273], [362], [638, 639], [261], [489], [225], [196], [335], [], [148], [966], [713], [40, 44], [203], [555], [806], [630], [474], [18], [764], [651], [], [198], [512], [164], [644, 720], [112], [181], [709], [582, 953], [813], [609, 586, 413], [601], [], [], [82], [168], [453, 454], [37], [73, 74, 815], [67], [159], [], [], [324], [718], [311], [534], [976], [230, 231], [607], [476], [400, 667], [274], [765], [814], [143], [109], [806], [701], [433], [], [171], [41], [582, 950, 954], [581, 661, 479], [560], [497], [815], [341, 342], [753], [248], [102], [680], [262], [738, 633], [157], [329], [516, 850], [821], [715], [8], [], [569], [426], [946], [770], [333], [754], [839, 978], [275], [486, 819, 889], [321], [461], [123], [453, 885, 887], [827], [139], [281], [276], [241], [], [836, 837], [611, 207], [948], [696], [317], [77], [614, 879], [684], [707], [479], [618], [851], [680], [553], [138], [362], [927], [381], [47], [989], [920], [359], [793], [881], [890], [81], [608, 615, 792], [244], [652], [347], [984], [681, 620, 508], [581, 717, 479], [377], [720], [258], [194], [784], [478], [451], [660], [416], [308], [914], [532], [412], [662], [361], [688], [985], [121], [754], [863], [577], [231], [443], [200], [104], [203], [189, 191], [547], [673], [209], [621], [105], [450], [752], [810, 878], [], [300], [149], [73], [840], [946], [447], [464, 608], [234], [], [783], [739], [979, 525], [292], [971], [145], [608], [256], [926], [408], [691], [273], [360], [434, 533], [835], [326], [299], [679], [852], [59], [594], [616], [151], [308], [557], [529], [212], [729], [907, 440], [868], [], [971], [661], [], [244], [307], [974], [46], [226], [748], [563], [128], [535], [882], [830], [623], [200], [404], [995], [326], [489, 395], [288], [652, 847, 465, 413], [18], [140], [886], [278], [], [643, 759], [373], [933], [294], [830], [737], [723], [75], [140], [172], [], [234], [838, 631], [625], [690], [677], [261], [76], [161], [943], [61], [487], [851], [162, 166], [555, 475], [354], [], [987, 998], [309], [372], [686], [], [187], [95], [936], [339], [716, 765], [95], [527, 592, 782, 664, 508], [980], [627], [], [86], [311], [214], [692], [548], [690, 345], [406], [908, 404, 812], [892], [850], [431], [950, 951], [465, 652, 413], [570], [], [95], [46], [299], [984], [835], [625], [623], [589], [946], [584], [254], [753], [679], [864], [379], [755], [909], [70], [84], [904], [520, 850], [382], [122], [2], [484, 814], [639], [222, 207], [6], [733, 920], [745], [422], [797], [861], [107], [587], [], [714], [921], [811], [624, 453], [726], [], [928, 960, 954, 572], [712], [173, 253], [650, 402, 818, 819, 632], [995], [686], [962], [681, 620, 664, 508, 477], [803], [306], [620, 526, 664, 508], [982], [324], [971, 724], [920], [440], [405], [440], [954], [417], [581, 734], [974], [791], [369], [581], [423], [637], [990], [858], [791], [400, 667, 733], [818, 872, 622, 759], [9], [748], [328], [670, 518], [250], [326], [531], [673, 526, 527, 664, 508], [139], [469], [317], [352], [704], [694], [148], [74], [935], [130], [987, 998], [136], [813], [259, 265, 153], [465, 763], [38], [928, 960], [628], [248, 250], [471], [476], [41], [432], [188], [715], [801, 983], [135], [283], [607], [935, 567, 923], [], [], [840], [963], [245], [128], [297], [344], [749], [249], [], [126], [419], [], [861], [195], [357], [770, 811], [617, 823], [86], [839], [679], [992], [], [558, 402, 699], [628], [628], [673], [358], [485, 754], [346, 351], [905, 825], [487], [100], [820], [673, 664, 526, 527, 632, 508], [769], [582], [815], [999], [651], [113], [823, 836], [851], [467], [727], [151], [410], [], [], [54], [907, 532, 440, 966, 762], [2], [514], [447], [], [555], [458], [], [], [634], [277, 278], [59], [946], [336], [], [378], [237], [938], [810, 878], [95], [961], [81], [19], [629], [], [539], [538], [245], [693], [290], [339], [49, 50], [999, 281, 861], [], [32], [47], [859, 521, 651, 760], [693, 472], [639], [713], [616], [], [510], [210], [79], [], [783], [655], [236], [550], [953], [744, 657], [929], [894], [253], [851], [732], [659, 809], [], [415], [205], [188], [], [210], [717, 581, 479], [529], [185, 186], [897], [999, 700], [275], [130], [206], [293], [531], [776], [538], [360], [863], [57], [407], [148], [6], [760], [855], [167], [778], [68], [91], [801], [45], [2], [792], [35], [863], [683], [146], [163], [519], [899, 951], [205], [489], [367], [], [151, 188], [874], [546, 650, 402, 818, 819], [619, 314], [831], [377], [282], [279], [272], [161], [73], [775], [554], [602], [149], [782, 851], [373], [297], [384], [535], [785], [347], [175], [853], [14], [472], [275], [608, 806, 841, 831], [658], [609], [888], [48], [322], [284], [457], [969, 470], [383], [955], [259], [603], [705], [2], [307], [531], [229], [487], [760], [247], [286], [862], [483], [685], [721], [624], [66], [674], [137], [517], [22], [739], [519], [274], [744, 657], [], [980, 975], [307], [600], [868, 968, 504], [882], [996], [953], [41], [770], [270, 279], [281, 282], [], [611], [373], [506], [882], [428], [516], [448], [138], [733], [743], [406], [679], [708, 975], [384], [859], [213], [888], [502], [150], [646], [], [981], [], [651, 813, 567, 827], [939, 943], [20], [902], [846], [64], [573], [651, 567, 760], [605], [638, 639], [779], [711], [458, 708], [162], [778], [75], [259], [872], [552, 515], [659, 937], [199], [659], [184], [61, 62], [518, 671], [123], [110], [308], [635], [993], [912], [613], [385, 386], [583], [889], [849, 505], [125], [662], [147], [334], [841], [244], [950], [842, 977, 978], [], [358], [193], [741, 687, 884, 406], [312], [153], [224], [349], [947], [203], [264], [185], [454], [693], [79], [421], [144], [968], [45], [258], [625], [200], [985, 301], [610], [579], [913], [867], [579, 582], [931], [259], [564], [900], [987, 998], [864], [81], [813], [528], [626], [770], [883], [257], [732], [54], [338], [836, 837, 841], [792, 428], [272], [413], [804], [922], [546, 650, 402, 819, 541], [], [441], [852], [316], [277], [457], [605], [658], [40], [515], [], [755], [25], [992], [809, 925], [887], [898, 680], [877], [16], [506], [230, 231], [796], [615], [324], [918], [333], [374], [392], [545], [760], [640], [359], [], [763, 597], [112], [190], [294], [791], [496], [64], [], [225], [646], [129], [908, 895], [], [486], [167], [651], [905], [736], [594], [], [206], [294], [], [145], [593], [72], [], [13], [138], [834], [326], [770], [222], [155], [375], [560, 768], [433], [907], [248, 250], [794], [15], [824], [810, 878], [549], [], [295], [710, 809], [481, 482], [176], [206], [981], [83], [499, 923], [434], [775], [977, 842, 978], [866], [43], [], [291], [951], [531], [848], [840], [370], [393], [], [767], [818], [314], [768], [806], [347], [263, 151], [493], [496], [268], [173], [195], [786], [], [873], [816], [447], [910, 567], [22], [758], [696], [190], [207], [573], [2], [377], [546], [986], [], [200], [868, 813], [857], [766], [972, 976], [], [], [281], [622, 759], [966], [684], [546, 819], [433], [85], [807], [269], [38], [426], [49, 50], [819], [942], [585], [743], [207], [206], [642], [816], [519], [316], [762, 532], [205], [], [28], [845], [539, 741], [0], [847], [99], [510], [836, 837], [866], [], [], [15], [500], [115], [719], [903], [123], [703, 463, 738], [659], [], [29], [872], [767], [372], [195], [191], [252], [546, 819], [659], [284], [487], [28], [165], [726], [355], [272], [879], [271], [600], [], [241], [8, 912], [958], [472], [105], [810, 878], [313], [562], [870], [259], [744, 657, 812], [392], [814], [608, 770], [888], [98], [333], [725], [233], [447], [139], [717], [82], [540], [77], [329], [333], [244], [517, 625], [325], [639], [789], [115], [536], [965], [840, 462], [389], [284], [882], [896], [588, 948], [933], [711], [360], [756], [508], [95], [636], [276], [420], [962, 987, 923], [207], [796], [660, 557], [32, 31], [923], [646], [980], [82], [958], [85], [244], [182], [294], [804], [697], [366], [479, 535], [999], [824], [], [724], [92], [232], [877], [946], [], [496], [812], [985], [813, 567], [662], [883], [100], [359], [444], [771], [426], [917], [811], [775, 977, 978], [983], [871], [322], [187], [940], [8], [343], [207], [859], [630], [155], [306], [322], [681, 620, 526], [69], [59], [336], [393, 327], [93], [174], [170], [68], [206], [85], [477], [185], [112], [934], [463], [730], [650, 819, 822, 632, 542], [562], [870], [977], [928, 923], [451], [168], [898, 655], [14], [89, 414], [715], [775], [734], [765], [38], [681, 620, 526, 846, 632, 508], [396], [951], [815], [], [149], [597], [594], [546, 650, 819], [354], [266, 267], [488, 695, 508], [363], [455], [247], [813, 910], [55], [471], [249], [536], [2, 3, 973], [503], [288], [795, 970], [673, 681, 620, 526, 527, 664, 508], [85], [47], [160], [722], [452], [655], [201], [788, 502], [648], [643], [477], [203], [42], [210], [23], [], [342], [741], [77], [], [113], [704], [501], [721, 697], [534, 729], [382], [89], [], [161], [303], [579, 881], [38], [137], [908, 404], [661], [496], [333], [217], [924], [899, 868, 968, 809, 463, 659], [609, 465, 413], [728], [50], [937], [369], [630], [778], [153], [968, 504], [], [459], [581, 479], [578], [304, 301], [899], [22], [863], [384], [308], [619, 846], [471], [], [334], [679], [382], [87], [460, 718, 975, 977, 978], [499], [842, 977, 978], [842], [649], [26], [761], [738, 825], [460], [157], [719], [864], [585], [634], [88], [618, 809, 926], [161], [933, 923], [195], [950], [243], [395], [879], [285], [991], [333], [934], [648, 760], [992], [907, 440], [578], [250], [176], [570], [80], [879], [23], [51], [700, 999], [290], [91], [208], [596], [814], [764], [77], [480, 785, 731, 414], [366], [411], [330], [836, 837, 774, 655], [557], [56], [240], [119], [695], [586], [669], [331], [361], [526, 527, 664, 508], [671], [37], [382], [105], [458], [768], [658], [518, 671], [281], [], [], [72], [22], [921], [132], [369], [547], [41, 48], [], [248, 250], [783], [282], [350], [608, 792], [584], [], [933], [], [385, 386, 101], [], [], [83], [921, 917], [], [369], [972], [92], [], [623, 923], [382], [738, 580], [593], [], [347], [647, 968, 532], [902], [514, 515, 476, 765], [476], [308], [390], [954], [790, 952, 954], [957], [616, 972], [935], [138], [224], [37], [320], [381], [425], [216], [923, 572], [441, 572], [891], [627], [715, 652, 764], [791], [187], [980], [834, 869, 906], [36], [581], [145], [989], [818], [427], [728], [216], [888], [131], [903], [427, 756], [261], [36], [544, 909], [], [], [447], [418], [537], [], [337], [293], [917], [437], [247], [489, 275, 276], [923], [805], [512], [346], [847], [871], [82], [190], [465, 597, 728], [892, 721], [347], [682], [641], [858], [5, 6], [652, 465, 413], [944], [864], [562], [295], [300], [439], [888], [135], [40], [218], [548], [763], [168, 159], [82], [0], [88], [900], [417], [673], [984], [437], [400], [479], [931], [257], [558], [400], [511, 479], [287], [935, 469, 923], [695], [385, 386], [294], [633], [882], [539], [854], [151], [52], [641, 808], [716], [329], [226], [823], [3], [953], [141], [], [648], [], [332], [907], [160], [308], [758], [522], [219], [806], [842], [602], [22], [258], [734], [520], [148], [252], [248], [348], [17], [916], [793], [659], [617, 823], [], [650, 401, 819], [178], [32], [944], [], [757], [634], [23], [15], [239], [471], [697], [419], [151, 158], [941], [746], [24], [553], [481, 482], [773], [700], [18], [391], [757], [480], [680], [42], [66, 68], [873], [], [117], [], [232], [], [331], [], [274], [129], [804], [454], [538], [654], [], [411], [797], [259], [236], [443], [657], [211], [55], [936], [25], [140], [643], [836, 837], [22], [936], [809, 925], [453], [997, 947], [410], [697], [], [], [617, 438], [118], [995], [387], [801], [777], [989], [562], [472], [348], [513], [720], [755], [215], [939], [865], [], [893], [761], [582], [277], [113], [110], [772], [794], [709], [521, 947], [739], [347], [656], [83], [898], [164], [490], [], [684], [304], [72], [839], [552], [472, 693], [86], [422], [977, 978], [947], [490], [910], [], [346], [39, 47], [312, 311], [703], [270], [600], [720], [15], [890], [986], [563], [447], [976], [839], [440], [217], [404], [461], [153], [863], [349], [902], [898], [836, 837, 617, 789], [132], [268], [490], [], [866, 596], [486, 559], [541], [716], [28], [], [198], [498], [5], [768], [546], [188], [920, 475], [260], [634], [772], [776, 819], [559], [928, 927], [747], [728], [579, 881], [567], [241], [695], [78], [161], [115], [157], [561], [183], [164], [413], [27], [116], [489, 815], [381], [], [610, 430], [251], [462], [], [738], [605], [573], [605], [702], [13], [291], [22], [343], [991], [], [123], [198], [23], [276], [40], [679], [230], [104], [840], [70], [209], [332], [926], [947], [628], [291], [222, 257], [652, 413], [292], [975], [289], [798], [641], [673, 418, 526, 527, 664, 508], [570], [998, 939, 943], [380], [209], [173], [189], [299], [630], [181], [245], [], [291], [829], [956], [176], [575], [324], [4, 391], [915], [464, 597, 763], [83], [740], [52], [713], [205], [504, 441, 572], [], [], [138], [681, 810, 620], [761], [777], [109, 973], [], [541], [223], [613], [180], [512, 623], [540], [154], [], [], [217], [594], [903], [], [465, 796], [964], [923], [], [], [896, 804, 631], [398], [523], [550], [917], [840], [574], [608, 796, 806, 478], [704], [572, 966], [357, 958], [597], [399], [84], [], [364], [40], [429], [436], [920], [163], [680], [2], [557], [190], [156], [722], [571], [894], [439], [756], [871], [198], [564], [438], [373], [149], [232], [391], [269], [933], [721], [], [151], [151], [248, 539], [814], [866, 595], [655], [421], [698], [454], [568], [865], [267], [183], [553, 493], [281], [], [237], [721], [477, 587, 784], [333], [663], [382], [296], [652, 764, 413], [951], [987, 998], [270], [996], [952], [620, 662], [929], [369], [79], [118], [], [359], [697], [761], [29], [617, 438], [37], [222], [233], [106], [76], [36], [53], [310], [608, 515, 610, 841], [283], [], [795], [19], [290], [326], [], [36], [290], [118], [293], [506], [989], [996], [366], [], [], [746], [391], [834, 655], [73, 77], [782, 664], [255], [85], [953], [316], [123], [919, 860], [347], [178], [274], [84], [], [418, 709, 748, 563], [323], [632], [724, 536], [841], [402], [638, 639], [25], [345, 730], [588], [874], [], [363], [130], [532], [673, 526, 527, 782, 664, 508], [707], [401], [576], [413], [0], [603], [674, 630], [197], [828], [978, 437], [], [498], [298], [325], [], [413, 439], [863], [97], [680], [89, 284, 799], [97], [709], [573], [368], [805], [284], [683], [222], [411, 828], [659], [86], [633], [642], [792], [459], [155, 204], [], [756], [798], [673], [], [338], [296], [178], [462], [843], [187, 201], [218], [117], [169], [711], [], [389, 391], [634], [713], [330], [553], [772, 869, 488, 464], [122], [523], [625], [54], [402], [889], [692], [29], [356, 359], [578, 515, 689, 982, 601], [97], [213], [98], [729], [], [459, 445], [20], [358], [], [950], [485], [169], [240], [548], [891], [650, 819], [125], [], [184], [588], [476], [666], [140], [754], [559], [937], [385], [913], [643, 906], [], [718], [516, 669], [673, 504, 508], [50], [596], [866, 803], [135], [496], [667], [486], [211], [18], [387], [563], [931], [142], [767], [310], [910, 411], [448, 489], [245], [64, 55], [439], [64], [157], [240], [578], [922], [288], [842, 523, 433, 795], [808], [108], [934], [861], [209], [517, 540], [112], [769], [423], [652], [], [187, 201], [839], [22], [130], [289], [746], [780], [447], [995], [780, 914], [888], [179, 180], [], [27], [], [373], [879], [536], [582, 936, 939, 943], [146], [518], [659, 949, 950], [218], [], [475], [684], [820], [75], [], [960, 968], [49], [650], [173], [], [565], [405], [690], [345], [652, 822, 541, 542], [889], [343], [], [944], [300], [784], [780, 724], [768], [514], [], [35], [741], [], [983], [83], [906], [518], [229], [487, 590], [218], [864], [91], [147], [617, 823], [237], [920], [866], [469], [746], [581], [892], [], [911], [962], [89], [], [154], [487], [714], [378], [627], [515, 348], [247], [343], [18], [529], [], [142], [739], [332], [491], [517], [926], [220], [930], [926], [977, 978], [581, 479, 817, 511], [974, 468], [912, 977, 978], [606], [577], [40], [464], [488, 600], [], [784], [492], [996], [589], [439], [650, 632], [438], [588, 790], [251], [780], [130], [319], [521], [543], [357], [137], [622, 759], [399], [689], [], [240, 238], [608, 681, 620], [752], [39], [116], [263], [842], [522], [684], [], [], [665], [713], [], [], [791, 582], [850], [336], [823], [971], [588, 813, 910], [9], [227], [945], [307], [194], [578, 689, 601, 831], [379], [726], [695], [800], [831], [802], [131], [71], [686], [485, 848], [352], [501], [810, 878, 658], [185], [765], [18], [496], [209], [437], [698, 483], [964], [103], [276], [388], [243], [841, 911], [578, 982], [228], [799], [773], [741], [575], [15], [424, 919], [581, 479], [], [296], [203], [586], [62], [888], [227], [695], [771], [40, 46], [335, 412], [486], [637, 879], [8], [42], [854], [136], [39, 47], [325], [299], [609], [231], [577], [233], [814], [382], [978], [472, 693], [524, 461], [], [355], [], [979, 821], [537], [249, 250], [], [965], [398], [553], [850], [811], [804], [83], [613], [680], [94], [803, 586], [56], [608, 610, 841], [407], [151], [18], [615], [489, 818], [889, 831], [], [390], [741], [712, 126], [821], [471], [63], [578, 982], [983], [762], [275], [396], [459, 445], [], [172, 173], [148], [834, 522], [472], [], [578, 689, 601], [901], [539], [378], [239], [], [], [894], [897, 534, 729], [532], [896], [522], [459], [439], [344], [691], [372], [875], [513, 776, 875], [5, 6], [], [314], [198], [14], [42], [429], [], [555], [832], [986], [591], [359], [311], [446], [349], [222], [518, 671], [602], [290], [399], [682], [413], [750], [351], [568], [792], [], [581, 479], [389], [842, 977, 978], [31], [577], [996], [592], [821], [117], [140, 142], [], [431], [81], [759], [618, 813, 910], [523], [914, 780], [66, 54], [379], [672], [293], [567], [29], [673, 664, 526, 527, 782, 508], [], [189], [731], [745], [899], [156], [240, 241], [], [17], [77, 815], [806], [76], [596], [580, 807], [581, 717], [905], [117], [865], [897], [893], [431], [928, 923], [588], [454], [490, 524, 787], [497, 406], [563], [783], [646], [82], [44], [416], [339], [669], [8], [220], [722], [255], [494], [230], [826, 488], [273], [181], [349], [391], [995], [265, 266], [283], [57], [810, 651, 508], [518], [170], [913], [436], [], [464], [], [619, 846], [203], [138], [421], [], [], [564], [213], [736], [125], [789], [], [744], [899], [575], [483], [516, 520, 721], [470], [82], [489], [258], [330], [53], [291], [303], [730], [52], [229], [75], [854], [330], [702], [781], [325], [612], [515, 841], [211], [727], [668], [818], [775], [831], [649], [4], [107], [420], [900], [], [751, 479], [257], [750], [894], [949], [628], [410], [533], [874], [745], [293], [224], [896, 804, 711, 585, 631], [28], [422], [810], [619, 846], [254], [967], [583], [541], [618], [281], [697], [638, 639], [479], [316], [582, 936], [452], [470], [738], [172], [868], [206], [], [417], [364], [131], [464], [514], [104], [667], [325], [864], [664], [110], [539], [69], [747], [941], [503], [], [565], [338], [720], [215], [409, 892], [989], [606], [871, 913], [], [860], [421], [683], [144], [957], [596], [211, 243], [218], [], [904, 281, 282], [289], [376], [569], [417, 557, 562], [258], [505], [673, 526, 527, 782, 664, 508], [530], [449], [], [860], [865, 692], [946], [694], [656], [353], [984], [258, 222], [465], [], [636], [867], [94], [], [403], [379], [455], [206], [722], [230], [303], [922], [705, 547], [600, 517], [334], [392, 109], [627], [270], [159], [711], [101], [884], [404], [492], [335], [288], [], [699], [245], [650, 819], [617], [233], [316], [153], [778], [624], [905], [728], [], [143], [727], [640], [331], [541], [27], [46, 47], [987, 998], [322], [633], [879], [847], [892], [108], [78], [669], [191], [414], [], [400, 667], [845], [88], [], [533], [522], [683], [395], [398, 529], [343], [131], [347], [321], [503], [199, 197], [182], [281], [711], [509], [172, 173], [41], [349], [685], [86], [270], [281], [156], [616], [979], [69], [967], [732], [578], [614, 879], [867], [923], [753], [168, 159], [565], [114], [870], [14], [313], [298], [903, 584], [], [515, 910], [269], [927], [459], [793], [198], [213], [366], [544], [546], [930], [649], [77], [409], [469], [614], [66], [965], [537], [454], [179], [83], [350], [179], [771], [517], [581, 479, 817, 511], [390], [766], [], [467], [519], [896, 281], [357], [114], [2, 3], [403], [843], [978], [474], [40], [33], [151], [269], [543], [293], [770, 774, 655], [362], [772, 679], [250], [425], [723], [530], [193], [], [955], [561], [581, 489, 479], [], [40], [777, 623, 542], [961], [917], [819], [664], [], [672], [758], [566], [53], [910], [764], [574], [207], [], [946], [756], [242], [809, 910, 925], [419], [918], [727], [903], [634], [], [615, 652, 465, 413], [], [628], [168, 159], [877], [109], [811], [579], [638, 639], [594], [888], [753], [93], [], [884], [234], [509], [224], [450], [373], [152], [701], [632], [344], [849], [42], [843], [602], [33, 973], [909, 659, 951], [409, 892], [987], [400], [], [455], [492, 857], [618, 567], [644], [494, 442], [430], [646], [514], [962], [306], [868, 968, 923], [44], [872], [765], [707], [836, 837, 842], [175], [554], [], [995], [], [293], [], [448], [57], [920], [], [213, 852], [999], [247, 151], [372], [471], [386, 101], [441], [924], [144], [58], [368], [767], [480], [258], [861], [], [189], [431], [376], [816], [315], [346], [397], [40], [577], [], [424, 423], [], [402, 889], [692], [183], [273], [805], [877], [746], [427], [581], [793], [961, 910, 659], [896], [372], [599], [840, 462], [515, 824], [744, 657], [986], [333], [453, 831], [674], [236, 852], [764], [131], [], [386], [814], [976], [547], [322], [323], [968], [466], [713], [481, 482], [944], [237], [708], [535], [295], [559], [481], [532], [], [358, 359], [151], [258], [748, 600], [659], [624, 453, 454], [999], [788], [666], [103], [855], [57], [472], [275], [83], [841], [], [344], [10], [947], [97], [685], [250], [995], [410], [429, 527, 916, 664], [], [84], [581, 436, 479], [], [156], [459], [98], [300], [8], [751], [614], [644], [53], [402], [616], [759], [], [], [427], [749], [91], [229], [], [91], [710], [], [220], [318], [6], [468], [836, 837], [771], [587], [282], [], [669], [258], [127], [631], [857], [679, 616], [737, 898, 886], [], [48], [772], [108], [789], [433, 842], [123], [481, 482], [161, 195], [609], [562], [979], [644], [908], [968], [733, 557], [897], [572], [893], [355], [578, 654, 982], [813], [227], [82], [403], [462], [], [], [271], [655], [765], [750], [], [32], [468, 479], [109], [51], [579, 881], [], [701], [928, 923, 960], [403], [219], [232], [402], [64, 55], [551], [], [], [554], [626], [863], [849], [0], [870], [754, 507], [407], [747], [15], [939], [330], [233], [905, 619], [982], [315, 311], [802], [987, 926, 998], [252], [489], [154], [498], [346], [566], [917, 794], [85], [287], [456], [], [719], [870], [638, 639], [738], [775, 819, 842, 602], [901], [730], [868, 532, 923, 572], [835], [826], [766], [117], [274], [9], [831], [479, 436], [49], [770, 518, 414, 842, 978], [], [84], [611], [], [683], [215], [474], [340], [], [224], [839], [544, 521, 926], [68], [836, 837, 869], [924], [672], [219], [957], [129], [720], [626], [222], [107], [190], [853], [654], [715], [389, 390, 395], [311], [928], [822], [909, 544, 336, 469], [954], [309], [763, 764], [573], [886], [325], [], [86], [192], [36], [366], [926], [131], [49], [545], [42], [210], [710], [11], [338], [657, 812], [491], [484], [872], [82], [596], [603], [599], [102], [333], [364], [365], [366], [71], [16], [37], [890], [832], [872], [175], [647], [], [482], [756], [393], [375], [496], [943], [616], [562], [690], [616], [], [562], [373], [743], [40, 46], [88], [819, 822], [33], [880], [157], [], [848], [831], [], [941], [866], [53], [140], [134], [830], [367], [922], [691, 692], [673, 681, 620, 526, 782, 664], [247], [827], [736], [546], [743], [260], [770, 774, 655], [160], [270], [171], [52], [540], [], [372], [468], [], [647], [875], [896, 553, 493, 894], [63], [330], [63], [338], [136], [802], [450], [514], [582], [192], [533], [189], [952], [855], [755], [48], [28], [396], [302], [100], [345, 475], [], [], [], [193], [57], [8, 7], [773], [481], [391], [723], [357], [952], [40], [975, 976], [], [], [495, 894], [440], [260], [712], [880], [518], [689], [147], [433], [110], [136], [479], [177], [121], [383], [970, 976, 979], [64], [554], [554], [845], [829], [874, 654], [963], [551], [], [418], [270], [328], [495], [150], [449], [191], [801], [500], [302], [193], [], [991], [211], [540], [315], [335], [327], [449], [388], [114], [672], [133], [538, 668], [776, 513], [625], [949], [412], [992], [793], [], [41], [814], [881], [186], [246], [497], [346], [662], [264], [384], [703], [259], [874], [467], [33], [601], [644], [528], [228], [944], [430], [311], [], [68], [87], [937], [364], [512], [748], [354], [283], [268], [512], [339], [918], [582], [573], [781], [171], [419, 823, 845], [392], [592], [901], [6], [19], [778], [18], [262, 243], [562], [], [159], [786], [], [835], [], [750], [162], [602], [585], [830], [701], [960], [497], [698], [736], [275], [909], [686], [999, 700], [824], [849], [296], [294], [], [116], [320], [], [755], [379], [71], [576], [962, 532, 923], [59], [718], [254], [882], [983], [], [463], [951], [993], [972, 500], [263], [738, 939], [92], [901], [671], [303], [842], [65], [29], [256], [49], [632], [883], [393], [652, 691, 895], [852], [93], [805], [53], [430], [626], [123], [892], [184, 170], [167], [209], [296], [987], [646], [320], [], [273], [832], [414], [34], [729], [340], [38], [136], [501], [335], [480], [103], [321], [849], [241], [726], [847], [], [836, 837], [240], [3], [76], [848], [651], [615, 465], [574], [656], [57], [594], [915], [362], [608, 515], [272], [457], [822], [888], [297], [640], [449], [425, 716], [517], [734], [440], [862], [], [12], [792], [738, 723], [773], [621, 412], [571], [], [479, 817], [1, 728], [], [22], [699], [857], [64], [66, 68], [6], [739], [], [619], [559], [240, 241, 238], [244], [921], [671], [166], [930], [568], [860], [821], [994], [255], [352], [646], [783], [786], [319], [358], [591], [923], [250], [23], [207, 692], [934], [269], [172], [834, 451, 457], [436], [692], [224], [451], [738, 580], [223], [815], [], [678], [443], [671], [946], [361], [432], [278], [], [349], [309], [269], [435, 876], [321], [738, 716], [315], [868, 968, 849, 505, 828], [], [386], [159], [361], [983], [874], [980], [849], [103], [236], [669], [201], [583], [941], [681, 810, 620], [505], [393, 108, 973], [671], [988], [538, 727], [277, 278], [471], [265, 266], [634], [745], [696], [578, 971, 982], [607], [582, 950], [437], [644], [543], [974], [], [894], [274], [126], [261], [947], [401], [100], [876, 445], [754, 632], [356], [289], [106], [676, 570], [569], [486], [385, 865], [650, 819], [973, 123], [809], [768], [227], [537], [444], [551], [760], [], [61], [908, 404, 812], [576], [734], [104], [], [19], [110], [], [83], [827, 534, 411], [350], [424, 423], [348], [973], [744, 657, 812], [382], [953], [911, 474, 894, 735], [324], [567], [], [420], [155], [], [841], [834, 906, 907], [505], [804], [286], [967, 441], [162], [128], [239], [6], [894], [123], [903], [478], [729], [], [99], [517], [677, 587], [2], [559], [437], [6], [651], [660, 757], [105], [872], [532], [119], [862], [583], [155], [316], [162, 882], [549], [809, 925], [235, 434], [440, 441, 455], [597], [86], [259], [450], [120], [8], [456, 872], [290], [723], [959], [756], [713], [631], [243], [962], [77], [], [714], [399], [940], [634], [986], [530], [98], [196], [360], [935], [148], [917], [606], [139], [973], [370], [446], [734], [529], [704, 444], [], [250], [41], [897], [193], [401], [821], [195, 811], [946], [819], [302], [251], [681, 620, 761, 508], [636], [273], [388], [911], [292], [546, 650, 818, 819], [993], [897], [487, 635], [384], [218, 215], [309], [258], [859], [221], [202], [462], [70], [997], [514, 655, 824], [463], [467], [732], [492], [368], [], [329, 126], [685], [408], [186], [700, 999], [647], [728, 703], [672], [492], [482], [928, 659, 949], [626], [715], [339], [581, 717, 479], [328], [431], [824], [247], [488], [279], [903, 650, 819, 851], [101], [692], [553], [35], [89], [888], [328], [580], [481], [278], [748], [40], [], [], [814], [722], [123], [410], [90], [874], [538, 727], [866, 595], [901], [276], [999, 700, 861], [508], [296], [654], [640], [226, 170], [651], [], [102], [433], [659], [], [614], [40], [278], [311], [852], [740], [135], [934], [139], [], [513, 875, 566], [382], [117], [734], [984], [592], [], [896, 804, 648, 861], [792], [353], [579], [947], [369], [879], [941, 942], [543], [521], [], [831], [890], [976], [840, 882], [763], [481], [273], [864], [221], [322], [705, 850], [521, 809, 827, 926], [284, 453], [993], [912], [728], [343], [], [575], [], [178], [400, 667], [670], [431], [], [955], [148], [329, 126], [836, 837, 869], [757], [68, 58], [775], [625], [129], [331], [138], [661], [969], [], [320], [621], [893], [603], [223], [505], [773], [], [913], [573, 479], [314, 126], [459, 445], [], [693], [805], [360], [913], [171], [615], [40], [785], [868, 968, 504], [72], [388], [23], [417], [793], [581, 479], [992], [853], [], [882], [317], [834, 630], [428], [503], [], [833], [569], [292], [309, 599], [938], [940], [695], [786], [104], [218], [314], [777], [398], [773], [498], [269], [], [175], [438, 728], [963], [475], [857], [912, 348], [], [138], [644], [851], [929, 452], [135], [116], [76], [31], [192], [271], [412], [790], [711], [205], [366], [995], [311], [627], [987], [581, 479], [], [321], [], [715], [], [357], [214], [840], [247], [], [191], [714, 679], [928], [140], [80], [232, 247], [606], [610], [486], [800], [568], [747], [527, 916, 664, 508], [288], [255], [915], [782, 664], [453, 454, 624], [463], [983], [583], [324], [652], [145], [385], [728, 636], [695], [295], [692], [209], [526, 799], [], [260], [204], [114], [228], [675], [782, 664], [347], [34], [780], [397], [314], [], [78], [114], [637], [865], [727], [], [417], [], [491], [891], [], [810, 878], [530], [262], [607], [531, 692], [391], [883], [104], [614], [453], [907, 440], [916], [254], [966], [], [502], [9], [659], [107], [953], [105], [749], [79], [376], [601], [861], [690], [942], [140], [638, 639], [326], [433], [515, 402], [342], [484], [915], [366], [883], [716], [983], [660], [673, 742, 526, 527, 782, 664, 508], [797], [343], [894], [558], [970], [548, 851, 598, 632], [830], [481], [945], [546, 402, 819], [991], [927], [521], [309], [133], [414], [327], [413], [70], [352], [160], [462], [918], [673, 526, 527, 664, 508], [711], [264], [274], [424, 423], [653], [335], [754], [465, 413], [], [544], [747], [893], [463], [869, 885, 568, 894], [433, 691, 983, 570], [594], [706], [647, 332], [441], [], [836, 837], [530], [84], [943], [829], [], [760], [], [396], [984], [629], [180], [229], [172], [520], [], [39], [207], [281], [356, 358], [297], [841], [106], [776], [], [225], [312, 311], [343], [405], [362], [993], [770, 842, 610], [870], [698], [220], [983], [424], [400, 667], [434], [748], [], [376], [203], [929], [887], [86], [], [47], [856], [575], [948], [751], [700, 999], [], [205], [284], [121], [442], [235], [622, 759], [541, 542], [537], [681, 620, 526, 527, 782, 664, 508], [991], [525], [926], [87], [72], [], [588, 790], [788], [375], [547], [739], [604], [777], [208], [], [236], [65], [132], [159], [], [276], [997], [129], [647], [637], [], [21], [489, 429, 981], [849], [868, 923], [208], [284], [960, 827], [773], [652, 413], [484, 871], [138], [613], [149], [358, 359], [454], [364], [429], [653], [677, 783], [308], [654], [850], [896], [146], [914], [45], [27], [690], [918], [734], [855], [423], [288], [33], [66, 68], [958], [481, 605], [581, 479], [990], [789], [948], [376], [279], [649], [588], [136], [244], [191], [472], [304], [], [320, 319], [629], [83], [265], [933], [259], [854], [736], [799], [284], [505], [172], [299], [543], [], [456, 489], [256], [], [205], [72], [903], [188], [85], [71], [545], [612], [982], [616], [475, 815], [505], [894], [815, 126], [776], [], [117], [180], [76], [], [858], [518], [622], [745, 851, 598], [946, 309], [445], [92], [127], [294], [472, 693], [570], [808, 879], [], [547], [595], [886], [551], [31], [966, 907, 572], [385], [214], [97], [491], [71], [652], [182], [505, 827], [152], [548], [661], [896, 943], [170], [165, 237], [671], [993], [258], [619], [619, 846, 504], [796], [798], [40], [649], [244], [912], [326], [280], [434], [36], [234], [958], [541, 542], [960], [659], [791], [908], [6, 976], [714], [150], [242], [187], [238], [987, 998, 463], [608, 652, 465, 597, 413], [650, 541, 819, 822], [395], [677], [329], [82], [404], [997, 947], [471], [58], [44], [33], [931], [333], [304], [974], [], [559], [280], [905, 721, 831], [555], [384], [82], [860], [592], [], [642], [995], [570], [923], [315], [762, 923], [513, 875], [868, 923, 521, 809, 926], [257], [801], [211], [580], [798], [227], [359], [746], [566], [660, 977, 978], [489], [247], [555, 247], [453], [565], [531], [762], [652], [43], [253], [443], [544, 469], [323], [702], [319], [485, 632], [820], [751, 573, 479], [972], [339], [15], [116], [111], [31], [989], [159], [973], [128], [525], [692, 790], [66], [565], [30], [212], [638], [], [456], [485, 851, 632], [225], [253], [807], [607], [467], [6], [641], [881, 579], [297], [492], [554], [418], [860], [379], [537], [67], [169], [673, 681, 620, 905, 526, 508], [803], [853], [751, 479], [757], [642], [], [194, 175], [984, 985], [562], [915], [675], [937], [71], [61], [288], [592], [711], [106], [59], [], [477], [873], [860], [79], [496], [435], [], [], [185], [331], [493], [232], [637], [31], [172], [469, 567, 505], [659], [670], [657], [193], [944], [559], [937, 941], [460, 437], [924], [326], [589], [228], [682], [907, 440], [764, 413], [7], [168], [570], [673, 681, 810, 620], [167], [12], [229], [907], [89], [896, 285], [514], [621], [], [391, 758], [], [670, 518], [671], [185], [599], [343], [326], [624, 884], [594], [306], [965], [581], [210], [410], [90], [485], [261], [584], [798], [141], [280], [51], [774], [320], [358, 360], [260], [456], [49], [810, 508], [551, 629, 631], [621], [295], [946], [], [821, 444], [606], [331], [711], [591], [], [333], [227], [329], [37], [948], [906, 834, 501, 630], [388], [789], [638, 639], [636], [203, 186], [246], [638, 639], [494], [110], [136], [154], [626], [866, 661], [484], [620, 681], [85], [425], [151], [94], [24], [380], [594], [590], [144], [488, 778, 600], [463], [49, 50], [193], [132], [201], [137, 975], [431, 281], [867, 517, 536, 510], [34], [], [218], [84], [97], [933], [320, 319], [599], [709], [698], [818], [255], [814], [905], [211], [711], [782, 851], [409, 892], [], [809, 923], [585], [588, 790], [554], [970], [34], [117], [673, 526, 527, 664, 508], [470], [101], [96], [371, 382], [228], [335], [414], [327], [126], [265], [], [618], [720], [803], [357], [582], [], [], [713], [851], [479], [431], [548], [721, 831], [330], [842, 764], [591], [236], [589], [505], [106], [228], [503], [713], [331], [651], [222], [149], [], [284], [810, 878], [707], [150], [467], [547], [850], [964], [586], [630], [180], [708], [342], [66], [], [874], [558], [], [195], [950], [20], [524, 461], [645], [514, 655], [169], [504], [612], [733], [965], [157], [382], [212, 251], [40, 46], [476], [166], [578, 982], [394], [187], [449, 536], [19], [752, 852], [809], [825], [447], [745], [208], [545], [407], [670, 518], [802], [941], [140], [200], [267], [73, 74], [985], [593], [], [387], [238], [826], [741, 697], [721], [642], [167], [930], [915], [585], [573], [546], [310], [927], [538, 668], [71], [887], [810, 878], [551, 629], [351], [44], [623, 784], [738], [160], [561], [164], [461], [136], [284], [86], [], [93], [], [835], [755], [57], [537], [243, 254], [923, 959], [248, 250], [], [], [], [974], [295], [90], [975, 698], [979], [719], [900], [710], [302], [449, 536, 557, 733], [383], [434, 435], [579], [276], [773], [280], [649], [338], [824, 735], [865], [431, 850], [144], [834, 435], [305], [978], [211], [538], [406], [676], [677], [389], [765], [819], [564], [992], [718], [792], [347], [870], [874], [371], [267], [795], [421, 981], [758], [33], [856], [60], [382], [985], [500], [605], [979], [770], [375], [6], [666], [884], [300], [702], [274], [789], [877], [479], [], [673, 681, 620, 526], [643], [340], [732], [842, 433, 638, 639], [145], [809], [], [849], [687], [397], [237], [839], [763], [487, 590], [222], [801], [616], [294], [482], [327], [532, 762], [], [881, 579], [807], [926], [781], [836, 837], [534], [869], [356], [514, 836, 837, 869, 501, 636], [924], [574], [494], [401], [588], [825], [266, 267], [7], [349], [635], [484], [893], [652, 847], [], [345, 690, 462, 463], [743], [619, 846], [842], [], [211], [9], [], [], [910], [309], [139], [906], [73], [930, 907, 470], [934, 923], [40, 46], [759], [152], [397], [936], [22], [], [963], [773], [630], [352], [954], [684], [918], [10], [191], [653], [242, 243], [], [561], [88], [145], [198], [147], [43], [773], [913], [23], [43], [264, 263], [915], [60], [930, 931, 415], [740], [84], [68], [479], [652, 465, 413], [745], [253], [828], [], [699], [254], [702], [41], [922], [457], [379], [83], [479, 511], [160], [796], [203], [447], [494], [770], [944], [839], [834, 836, 837, 457], [871], [236], [62], [184], [260], [153], [715], [144], [176], [], [966, 532, 470, 762, 923, 572], [262], [578, 903], [760], [605], [797], [64], [154], [570], [872, 759], [301], [659], [294], [575], [990], [552], [31], [336], [884], [954], [758], [901], [], [315], [873], [549], [406], [517], [151], [223], [920, 405], [898], [616, 913], [416, 602], [227], [918], [215], [565], [841], [991], [962], [648], [769], [214], [168], [483], [504], [284], [593], [337], [966, 907], [415], [665], [968, 532, 762, 923], [283], [98], [457], [], [690, 345], [468], [114], [608, 836, 837], [382], [344], [877], [569], [608, 617, 438], [459, 445], [211], [383], [977], [], [60], [171, 173], [280], [735], [949], [414], [792], [327], [343], [45], [830, 678], [127], [784], [327], [438], [949], [641], [252], [993], [340], [773], [111], [956], [656], [257], [803], [104], [673, 742, 526, 527, 782, 664, 508], [995], [379], [814], [673, 742, 508, 526, 664, 782, 412], [798], [635], [582, 631], [30], [822], [512], [689], [888], [349], [533], [537], [940], [537], [399], [429], [334], [694], [14], [52], [677], [99], [62], [412], [469], [834, 650, 402], [471], [199], [89], [321], [976, 977, 978], [46], [638, 639], [712], [401], [362], [450], [349], [805], [45], [433], [566], [987, 998], [85], [945], [928, 923, 960], [160], [217], [17], [659], [798], [769, 533, 824], [890], [], [359], [663], [179], [485], [353], [962], [51], [294], [], [309], [94], [483], [984], [964], [716], [500], [726], [718], [], [582, 519, 945, 948, 950], [272], [714], [894], [418], [723], [933, 923], [282], [974], [530], [962, 813, 567, 505, 827], [176], [627], [836, 875], [], [363], [], [852], [19], [177, 172], [926], [739], [348], [507], [362], [400], [991], [605], [66], [409], [383], [622, 759], [2, 3], [963], [478, 592], [580], [581, 479, 627], [845], [49], [216], [984], [148], [], [729], [385, 716], [425], [990], [622], [809, 618, 926, 959], [790], [182, 607], [534], [], [560], [350], [], [376], [92], [699], [801], [671], [417], [90], [484], [916], [572], [57], [877], [625], [479], [810, 508], [262], [118], [426], [159], [1], [905], [283], [508], [553], [879, 638, 639], [724], [389], [332], [250], [739], [475], [192], [91], [715, 652], [438], [396], [61], [401], [362], [920], [533], [327], [517], [145], [364], [572], [771], [689, 578], [518], [821], [296], [107], [582, 939, 940, 943], [297], [148], [829], [608], [916], [793, 830], [226], [941], [178], [845], [665], [159], [497], [535], [641], [839], [366], [514], [758], [673, 664, 526, 527, 782, 508], [756], [800], [855], [518], [], [], [], [905], [722], [319], [744, 652, 847, 657], [805], [771], [753, 282], [713], [872, 622, 759], [347], [808], [606], [530], [867], [76], [977, 978], [958], [850], [], [], [174], [707], [604], [424, 423, 636], [702], [559], [343], [662], [404, 895], [502], [931], [268, 179], [849], [418], [909, 923, 926], [501, 665], [18], [424], [423], [291], [568], [581], [708], [481], [187], [803, 555], [192], [595], [491], [737, 455], [], [641], [782, 664, 281, 285], [93], [783], [], [603], [731], [713], [], [84], [268], [567], [556], [618], [568, 655], [901], [321], [155], [981, 429], [692], [159], [318], [866, 575], [669], [201], [181], [968], [668], [641], [942], [263], [98], [958], [143], [681, 620, 526], [869], [927], [437], [662], [537], [358], [270], [248], [833, 913], [685], [419, 719], [404], [923, 934, 933], [928, 572], [], [695], [], [486], [962], [563], [26], [758], [752], [336], [752], [610], [95], [104], [], [633], [400, 667], [495], [533], [192], [8], [224], [594], [533], [884], [909, 532, 883], [518], [482], [194], [168, 159], [841, 894], [105], [31], [984], [678], [652], [669], [737, 901, 440], [682], [857], [310], [], [738, 834, 906], [], [326], [93], [913], [427], [583], [372], [878], [375], [242], [418], [643, 454, 917], [15], [962], [397], [316], [548], [520], [93], [468], [548, 851, 789, 632], [101], [38], [446], [481, 482], [], [568, 748], [440], [473], [150], [488], [95], [431], [578], [64], [510], [895], [], [597], [724], [399], [334], [35], [951], [288], [182], [177], [417], [820], [205], [], [654], [415], [123], [201], [207, 208], [], [241], [204], [615], [511, 581, 479], [512], [], [494], [89], [276], [], [713], [850], [999, 700], [699], [571], [453, 606], [732], [754], [285], [452], [], [139], [15], [755], [240], [851], [812], [723], [65], [139], [753], [221, 206], [581, 717], [972], [306], [339], [653], [80], [422], [390], [521], [514], [], [182], [611], [542], [342], [646], [311], [], [230, 478], [316], [402], [858], [612], [645], [330], [544, 521], [497], [584], [521], [607], [531], [], [593], [813], [100], [157], [364], [616], [521], [724], [440, 441], [199], [241, 238], [890], [523], [532], [585], [263], [662], [687], [349], [751], [90], [237], [781], [909, 828, 926], [168], [880], [956], [619], [486, 401], [523], [603], [846, 883, 532], [632], [187], [433, 693], [579, 881], [770, 979], [469], [718], [64], [589], [202], [238], [293], [229], [863], [], [416], [922], [892], [418], [573, 479], [515], [262], [958], [55], [303], [787], [529, 793, 831], [926], [], [141], [902], [9], [], [948], [845, 531], [746], [423, 424, 892], [462], [318], [314], [920], [759], [995], [484], [275, 276], [658], [541], [831], [690, 346], [205], [583], [424], [29], [87], [824], [18], [521], [513, 683, 558, 432], [451], [361], [745], [162], [316], [100], [438], [861], [666], [770], [605], [543], [129], [528], [782, 664], [583], [722], [724], [959], [177, 170], [888], [495], [189], [109], [54], [243], [531], [473], [659], [809, 969], [14], [594], [67], [193, 191], [58], [770], [208], [547], [232], [834, 836, 837, 906], [762], [423], [877], [274], [163], [928], [347], [54], [589], [407], [671], [740], [553, 493], [391], [962, 987, 923], [133], [836, 837, 629], [353], [], [554], [192], [633], [855], [232, 250], [307], [531], [713], [64], [311], [490], [62], [329], [694], [587], [102], [259], [378], [627], [], [49], [421], [82], [547], [203], [], [626], [993], [888, 839], [566], [62], [272], [891], [450], [319], [], [163], [985], [668, 562], [961], [681, 620, 526], [296], [], [428], [802], [930, 966, 907], [192], [522], [736], [240, 241, 238, 239], [314], [933], [], [552, 283], [535], [9], [387], [638, 639], [875], [519], [], [443], [], [], [704], [453], [811], [383], [116], [474], [205], [425], [632], [152], [], [160], [935], [334], [932], [705, 537, 248], [79], [981, 429], [723], [681, 620, 526, 664, 508], [349], [841], [860], [806], [], [406], [39], [389], [155], [238], [984], [654], [413], [676, 246], [872], [532], [885], [723], [111], [264], [546, 818, 819, 541], [426, 635], [454], [75], [98], [205, 246], [270], [992], [537], [510], [843], [271], [892], [321], [750], [565], [750, 735], [847], [924], [51], [575], [142], [574], [203], [683], [44], [626], [490], [208], [589], [878], [377], [799], [11], [275], [115], [265], [692], [321], [938], [496], [200, 244], [285], [], [179], [217], [616], [], [660], [698, 538], [708], [672], [664, 851], [131], [204], [362], [582, 943], [219], [868, 935, 809, 923], [273], [402], [571], [603], [626], [546, 402, 819], [957], [], [827], [496], [946], [187], [881], [281], [995], [475], [280], [146], [551], [425], [892], [618], [], [34], [390, 149], [758], [595, 730], [960], [307], [87], [819], [765], [727], [269], [175], [673, 681, 268, 620, 508], [683], [355], [841], [353], [262], [195], [383], [810, 878], [203], [328], [899], [733], [827], [367], [718], [986], [605], [757, 535], [618, 909, 827], [907, 440], [4], [521], [923], [], [674], [491], [758], [243], [806], [23], [679], [573], [816], [780, 914], [466], [644], [188], [], [873], [619], [419], [884], [567], [259, 261], [735], [801], [118], [424, 589], [521, 618, 809], [910], [181], [204], [], [741, 765], [44], [612], [313], [531], [529], [377], [902], [973], [921, 917], [934], [339], [803], [609], [820], [119], [676], [505], [110], [540], [682], [271], [488], [843], [629], [174], [651, 504], [], [874], [701], [667], [27], [577], [201], [31], [979], [927], [836, 837, 970], [435, 281], [918], [526], [38], [857], [476], [605], [628], [539, 316], [572], [233], [771], [666], [867], [596], [], [164], [388], [992], [412], [802], [988], [877], [268], [523], [87], [517, 600], [513, 650, 819], [], [569], [970], [219], [86], [320, 319], [44], [436], [962, 923], [2], [178], [424, 423], [853], [525], [], [589], [93], [190], [931, 868], [901], [722], [], [794], [809, 923, 925], [905], [821, 693], [224], [374], [775], [98], [886], [752], [139], [578, 585, 982], [22], [57], [460, 437], [810, 878], [287], [988], [451], [587], [361], [459, 445], [479], [822, 542], [100], [], [647], [574], [546], [], [604], [629], [557], [683, 558], [654, 734], [170], [629], [397], [297], [333], [252], [597], [823], [324], [421], [277], [834, 432], [858], [280], [430], [392], [941], [548, 851], [494], [158], [], [515], [89], [583], [266], [719], [467, 499], [264], [628], [788], [], [9], [569], [182], [162], [764, 413], [43], [760], [], [364], [920], [871], [351], [45], [], [591], [], [115], [141], [32], [516, 431, 797], [987, 998], [40], [686], [613], [352, 138], [576], [451], [539], [557], [908], [235], [142], [90], [659, 700], [300], [343], [], [409, 826], [718], [557], [826], [725], [522], [602], [63], [827], [406], [], [481], [777], [345, 730], [270, 279], [923], [327], [387], [779], [113], [867], [467], [989], [203], [108], [372], [474], [508], [760, 737, 886], [776], [61], [83], [220], [54], [721], [195], [765], [355], [644, 470], [93], [597], [763], [135], [608], [230, 232], [889], [376], [184], [], [111], [17], [364], [826], [53], [496], [797], [263], [505], [105], [717, 733], [639], [681, 620, 508], [762], [], [755], [49, 50], [897], [450], [240], [850], [693, 472], [880], [672], [217], [337], [948], [142], [989], [740, 440], [156], [591], [950], [204], [697], [234], [], [297], [926], [978], [25, 28], [324], [385], [454], [762], [673, 664, 526, 527, 508], [7, 8], [342], [159], [592], [806], [818], [613], [950], [900], [142], [878], [462], [501], [25], [915], [942], [373], [109], [87], [953], [364], [487, 619, 526, 846, 504], [62], [849], [605], [390], [4], [153], [340], [836, 837], [899], [606], [288], [102], [174], [587, 784, 477], [791], [557, 858, 738], [237], [405, 538, 603], [913], [436], [951], [10, 15], [208], [671], [670], [823], [154], [366], [40], [880], [672], [244], [392], [740], [830], [], [932], [992], [650], [811], [478], [624, 453, 454], [280], [15], [631], [351], [279], [963, 966, 532, 762, 923, 572], [902], [912, 716], [974], [387], [608, 741], [670], [111], [738], [286], [738], [944], [54], [294], [652, 764], [723], [112], [361, 759, 794], [], [246], [777], [777, 499], [19], [462], [92], [564], [109], [449], [254], [727], [428], [168], [415], [590], [479], [615], [66], [524, 461], [602], [990], [990], [], [733, 127], [983], [573], [474], [147], [], [539], [468, 407], [981], [309], [996], [160], [373], [663], [620, 508], [466], [85], [660], [792], [865, 850], [242, 180], [281], [502], [789], [524], [803], [682], [104], [729], [], [228], [259], [252], [339], [417, 866, 595], [208], [], [776, 650, 819], [57], [], [608], [643], [], [232, 248], [738], [90], [808], [820], [999], [640], [610], [611], [294], [617], [], [834, 906, 893], [784], [334], [403], [931], [389], [289], [188], [], [], [707], [987, 998], [503], [576], [524, 461], [619, 846], [338], [524], [], [267], [449], [15, 91], [277], [111], [73, 815], [613], [383], [143], [496], [968, 504], [849, 827], [365], [239], [666], [109], [550, 521, 651], [888], [77], [], [661], [968, 114, 504], [512], [672, 970], [490], [748], [272], [658], [962, 942], [373], [463], [140], [809, 567], [568, 825, 608], [620, 508], [], [560], [932, 415], [853], [745], [713], [981, 429], [679, 488, 695], [106], [536, 540, 510], [578, 689, 982], [50], [524, 461, 715], [263], [560], [525], [117], [732], [826], [137], [284], [608, 423], [64], [795], [], [185], [571], [89], [210], [339], [244], [151], [670], [711], [101], [50], [213], [715, 524], [708], [676, 269], [534], [479, 751], [], [520], [440], [977], [], [948], [], [70], [890], [489], [358], [868], [823], [171], [921, 764], [779], [887, 497, 406], [967], [370], [780], [10], [714], [890], [81], [92], [785], [587, 477], [737, 582, 440], [416], [138], [452], [444], [532], [9], [986], [667], [395], [897], [423], [89], [339], [764], [709, 836, 837, 767], [672], [370], [618, 469], [10], [991], [971], [67], [616], [281, 282], [659], [909], [832], [834, 906, 400], [837, 582, 954], [927], [699], [458], [110], [867], [], [690, 345], [335], [], [150], [221], [580], [308], [544], [271], [176], [316], [102], [], [346], [234], [714], [552], [828], [813], [26], [], [269], [232], [522], [437], [249], [708], [], [836, 542, 822], [600], [446], [125], [857], [278], [], [418], [655], [162], [477], [623], [970], [508], [697, 478], [756], [985], [], [593], [338], [13], [57], [230, 231], [649], [987, 943], [], [860], [193], [290], [318], [675], [360], [436, 479], [], [589], [238], [772, 488], [481], [947], [441], [770, 674], [491], [5], [86], [424], [100], [537], [332], [596], [783], [43], [563], [117], [305], [259], [869, 457], [687], [988], [186], [804], [99], [213], [554], [933], [400, 667], [318], [652], [619], [], [123], [988], [829], [280], [223], [578], [818], [534], [230], [552], [673], [672, 669], [698], [308], [144], [211], [222], [916], [8], [234], [301], [321], [8], [487], [44, 633], [346], [514], [640], [803], [882], [571], [820], [494], [673, 620, 527, 664, 508], [70], [519], [166], [582], [590], [19], [316], [524, 461], [80], [724], [931], [], [127], [888], [756], [458], [688], [4], [20], [773], [398], [203], [395], [795, 615], [735], [905], [23], [631], [772], [555], [263], [64], [796], [467], [727, 538], [222], [], [], [277], [358], [471], [328], [832], [289], [741], [399], [112], [867], [10], [22], [832], [234], [647, 332], [896, 804], [], [241, 238], [], [326], [523], [], [12], [65, 973], [477], [370], [681, 620, 526, 664], [267], [728], [834], [615], [920], [553], [201], [822], [789], [710], [], [715], [387], [458], [418, 623], [95], [898, 762, 572], [485, 526], [363], [380], [74], [538, 858], [392], [769, 438], [389], [930], [563], [426], [29], [798], [844], [696], [470], [194], [383], [], [922], [198], [880], [543], [291], [40, 46], [953], [980], [297], [310], [183], [849], [174], [], [433], [679], [835], [725], [546, 806], [156], [235], [727], [418], [260], [529], [517], [21], [553], [97], [771], [780], [945], [], [388], [822], [605], [891], [207], [], [319], [943], [672], [643, 903], [905, 532, 799], [208], [292], [478], [156], [], [89], [883], [545], [875], [448, 637], [230], [520], [184], [190], [561], [965], [317], [759], [35, 37], [], [99], [993], [2], [868], [692], [76], [244], [169], [646], [903], [], [205], [772], [185], [145], [80], [936], [236], [21], [263], [873], [696], [960, 910], [582], [994], [], [464], [193, 189], [419], [486], [342], [831], [199], [1], [735], [], [807], [809, 925], [572], [677, 587, 783, 784], [251], [778], [311], [325], [777], [768], [143], [311], [45], [], [420], [609], [961, 499, 728], [644], [881], [913], [130], [16], [472], [836, 837, 445], [862], [675], [187], [896], [884, 501], [695], [610], [391], [696], [867], [779], [167], [904], [812], [761], [652, 597, 764, 413], [835], [], [735], [126], [634], [998], [927], [0], [540], [659, 556, 827], [101], [48], [586], [811], [187], [131], [442], [576], [484, 536], [842], [738], [393], [367], [], [973], [284], [467], [58], [38], [985], [720], [644], [90], [97], [260], [38], [915], [479], [561], [616], [497, 406, 857], [68], [595], [344], [303], [490], [59], [842], [829], [584], [356], [544], [673], [80], [60], [253, 846], [504], [188], [902], [834, 906], [329], [624], [0], [795], [865], [697, 610], [641], [389], [547], [20], [235, 174], [754], [], [608], [165], [381], [0], [978], [658], [650, 402, 819], [209], [432], [561], [241], [], [426], [117], [295], [662], [382], [236], [637], [394], [793], [358], [544], [305, 302], [165], [427, 756], [181], [918], [645], [585], [808], [69], [993], [303], [135], [165], [87], [324], [679, 455], [814], [198], [918], [223], [240, 238], [370], [462], [979], [29], [4], [122], [], [338], [411], [211], [772], [557], [879, 242, 850], [531], [688], [5], [251], [761], [158], [491], [591], [384], [225], [571], [113], [259], [], [18, 86], [815], [955], [133], [294], [63], [795, 703], [483], [265], [910], [292], [140], [905], [270, 207], [535], [205], [603], [537], [804], [553], [165], [654], [155], [164], [], [996], [913], [971], [42], [714], [182], [54], [240, 241, 238, 239], [938], [744, 657], [908, 404], [240, 241], [318], [784], [185], [591], [424], [920], [375], [492], [471], [687, 406], [238, 241], [501], [], [327], [774], [41], [718], [], [133], [89], [736], [79], [627, 795], [], [768], [417], [769, 418, 772, 623], [595], [], [753, 894], [135], [416], [77], [63], [495], [766], [], [972, 825], [892], [997, 947], [588], [895], [692], [952], [54], [938], [909], [288, 290], [732], [892, 409], [383], [297], [], [731, 861], [64], [80], [98], [766, 341], [204], [257, 222], [524, 461], [933], [648], [242], [329], [478], [355, 912], [535], [35], [311], [884], [464], [760], [527, 664, 508], [], [453], [386], [800], [191], [716, 765], [329], [698], [578, 982], [851, 548], [33], [710], [14], [161], [105], [73], [854], [410], [102], [], [], [136], [137], [841], [310], [400, 667], [47], [506], [572], [270], [85], [764], [692, 969, 588, 728], [21, 22], [325], [798], [33], [141], [109], [673, 681, 620, 526, 527, 664], [794], [500], [567], [335], [506], [829], [33, 983], [45], [965], [550], [447], [510], [933], [976], [109], [643], [987, 998], [400, 667], [371], [686], [25], [], [864], [37], [681, 620], [196], [744], [473], [849], [612], [542], [675], [58], [], [612], [253], [805], [], [368], [412], [647], [768], [93], [260], [], [200], [277, 278], [775], [902], [382], [36], [357], [198], [568], [374], [171], [105], [762], [474], [166], [], [846], [855], [25], [727], [893], [432], [820], [801], [962, 923], [405], [721], [2], [911], [16], [652, 465], [], [741], [849, 725], [571], [582], [122], [555], [909, 567], [957], [360], [38], [332], [760, 664], [855], [644], [930], [591], [610], [66], [728], [524, 461], [511], [253], [769], [224], [694], [817, 479], [595], [949, 953], [692], [683, 558], [212], [294], [615], [626], [441], [57], [233], [], [363], [821], [], [], [149], [], [207], [842, 500], [679], [756], [64], [676], [399], [710], [852], [548], [946], [320], [884], [389, 391], [404], [945], [485], [392], [], [83], [648], [864], [466], [25], [], [748], [450], [239], [113], [666], [755], [91, 14], [0], [323], [393, 108], [547], [809, 923, 926], [52], [42], [428, 670], [9], [471], [736], [476], [685], [458, 401], [420], [300], [], [625], [986], [553, 493, 883], [915], [164], [122], [], [545], [42], [780], [963, 964, 567, 572], [17], [565], [458], [761, 223], [669], [95], [], [224], [886], [27], [196], [353, 372], [782, 664, 810, 508], [456], [860], [217], [183], [281], [975], [71], [763, 597], [634], [881], [937, 923, 963], [857], [738, 949], [527, 664, 508], [92], [21], [716], [229], [67], [384], [752], [], [544], [98], [983], [273], [881], [400, 667], [754], [680], [139], [], [329, 108], [554], [756], [544], [173], [394], [506], [986], [140], [768], [776], [799], [865], [], [481], [272], [880], [810, 878], [532], [125], [350], [39], [950], [852], [41], [911], [528], [759, 622], [700], [844], [477], [90], [554, 628], [720], [652], [737, 455, 760, 440], [499], [104], [], [964], [459], [557], [41], [795], [], [67, 58], [833], [322], [490], [948], [964], [507], [429], [926], [917], [84], [716], [716], [411], [74], [707], [642], [472], [280], [61, 62], [262], [342], [618], [7], [670], [341, 342], [841], [622, 759, 478], [195], [236, 237], [], [517], [226], [357], [864, 717], [938], [450, 462], [39], [134], [763], [83], [900], [323], [353], [739], [19], [668], [317], [865], [], [222], [896], [19], [], [654], [319], [463], [30], [905], [879], [27], [548, 851], [168], [122], [977], [882], [67], [949, 953], [96], [584], [57], [716], [711], [], [533], [420, 559], [232], [849], [866], [793], [836, 837, 151], [981], [385, 101], [581, 818], [162, 167], [396], [367], [281], [964], [898], [530], [619, 750, 846, 721], [143], [], [509], [52], [206], [552], [574], [707, 886], [275], [429], [448], [512], [515], [], [709], [226], [543], [477], [973], [544], [853], [265], [812], [306], [], [519], [640], [62], [59], [392], [548], [827], [372], [364], [835], [5, 6], [297], [0], [], [328], [497], [946], [721], [138], [593], [904], [93], [862], [769], [403], [487], [969], [488, 635], [898], [53], [39], [358, 359], [686], [666], [493], [391], [771], [7], [864, 919, 733], [124], [380], [928, 923, 960], [78], [174], [230, 231], [437], [127], [88], [608, 718, 975], [178], [135], [66], [64, 55], [484], [], [586], [975], [327], [543], [789], [179], [43], [518, 671], [497], [113], [238], [354, 676], [968], [289], [891], [849, 725], [389], [791], [652, 465], [554], [457, 338], [722], [22], [382], [891], [948], [377], [686], [985, 324], [374], [284], [915], [], [954], [561], [711], [710], [], [789], [], [480], [171], [382], [628], [893], [115], [642], [488], [368], [256], [636], [158], [714], [], [726], [504, 985], [], [137], [668], [581], [133], [962, 923], [631], [708, 698, 671], [125], [907, 440], [772], [86], [], [118], [288, 290], [487], [438], [50], [28], [379], [108], [], [696], [83], [626], [253], [673, 681, 620, 664, 526, 527, 508], [112], [396], [770], [865, 850], [152], [847], [936], [22], [593], [776], [562], [769], [549], [339], [793, 697], [41], [654], [], [863], [888], [370], [962, 762, 966, 532, 923, 572], [146], [489], [51], [755], [491], [526], [940], [9], [383], [575], [181], [347], [496], [783], [513, 683, 875, 558], [600], [789], [960, 923], [529], [372], [815], [295], [566], [189], [845], [170], [589], [377], [], [750, 655], [139], [684], [595], [869, 618, 824], [842, 445], [517], [223], [903], [833], [800], [250], [], [], [454], [163, 168], [299], [432], [719], [417], [62], [980], [575], [], [850, 220], [156], [298], [367], [167], [194], [407], [547], [481], [36, 58], [91], [700, 999], [203], [633, 937, 333], [354], [678, 636], [315], [187, 700], [948], [849], [543], [807], [351], [918], [328], [453, 633], [834, 906, 630], [802, 518], [677], [520], [236, 237], [924], [129], [21], [595, 866], [832], [], [], [646], [182], [46], [367], [985], [995], [760], [384], [556], [722], [], [412], [], [10], [354], [168], [545, 589, 861], [96], [25], [101], [150], [225], [472, 693], [235], [194], [294], [491], [109], [416], [150], [249], [140], [285], [867], [787], [886], [986], [437], [244], [482, 754], [52], [497], [], [811], [305], [368], [302], [160], [290], [469], [], [531], [565], [677], [933], [656], [756], [223], [834, 906], [13], [582, 692, 790], [983], [889], [], [92], [35], [254], [685], [552], [578], [504], [781], [430], [696], [690], [928], [898, 836, 837, 774, 842, 502], [288], [560], [208], [453], [153], [995], [562], [216], [267], [523], [252], [566], [893], [467, 341], [108], [72], [545], [597], [10], [659], [475], [228], [736], [807], [883, 532, 762, 923, 572], [930], [355], [562], [847], [872, 447], [], [365], [64], [], [271], [145], [613], [325], [261], [654], [670], [301], [412], [994], [747], [470], [], [404], [859], [682, 562], [853], [997], [232], [78], [399], [922], [], [946], [371], [855], [615], [734], [601, 578, 689], [212], [], [864], [165], [47], [113], [418], [741], [735], [738, 944], [742], [789], [651], [572], [142], [299], [110], [502], [916], [171], [484], [673, 526, 527, 664, 508], [738], [823], [235], [97], [990], [595], [459], [930], [104], [555], [594], [624, 454], [126], [569], [827], [1], [343], [18], [], [139], [643, 474], [420], [458], [], [853], [], [886], [698], [939, 943], [812], [342], [74], [132], [82], [279], [228], [802], [947], [425], [844], [399], [25], [381], [394], [63], [297], [], [477], [21], [552, 151], [135], [338], [424, 423], [482], [876, 435], [524, 461, 501], [498], [178], [84], [602], [328], [900], [103], [703, 323, 998], [281], [588], [145], [441], [440], [1], [450], [404], [680], [864, 627], [231], [122], [29], [900], [97], [624], [546], [171, 268], [894], [887, 406], [508], [25, 28], [723], [671], [148], [269], [889], [514, 464], [557], [709, 748], [419], [198], [160], [166], [836, 837], [188, 189], [68], [359], [462, 792], [], [160], [454, 917], [487], [762], [583], [97], [46], [630], [877], [652], [109], [400, 667], [110], [254], [754], [253], [296], [293], [359], [448], [117], [99], [267], [606], [990], [11], [531], [79], [507], [802], [719], [488], [], [798], [953], [323], [398], [130], [226], [311], [65], [287], [901], [144], [361], [218], [650], [673, 681, 620, 526, 527, 664, 508], [130], [621], [739], [577], [465, 413], [530], [106], [122], [491, 634], [68], [256], [688], [836, 837, 869], [307], [758], [573], [687], [641], [792], [], [855], [99], [564], [309], [913], [700, 999], [451], [725, 505], [5, 6], [431], [420], [713], [190], [], [548, 851], [630], [722], [28], [646], [606], [428], [587, 784, 477], [91], [959], [718], [626], [668], [476], [500], [920], [555], [284], [524, 461], [33], [89], [991], [99], [188], [221], [476], [274], [607], [184], [304], [], [165], [299], [153, 266], [457, 920], [20], [75], [766], [489], [162], [376], [108], [818], [630], [535], [899, 725, 572], [630], [910], [833], [], [412], [], [893], [589], [938], [783], [384], [], [333], [716], [262], [701], [249], [240, 241], [221], [369], [], [133], [378], [437], [131], [698], [921, 692, 917], [623, 917], [619, 846, 721, 831], [889], [441], [549, 692], [70], [459], [784, 587], [147], [841], [318], [627], [343], [507], [489, 243], [391, 758], [911], [736], [884], [883], [991], [618], [758], [754], [873], [39], [379], [518], [261], [830], [569], [], [21], [451], [291], [72], [753], [489], [802], [531], [674], [698], [832], [157], [47], [], [888], [406], [778], [515, 836, 837], [694], [596], [], [424], [907, 470], [573], [225], [56], [850, 760], [465], [684], [850, 765], [950, 951], [365], [881], [43], [], [108], [61, 62], [952], [92], [82], [913], [503], [567], [589], [481], [246], [756], [53], [599], [793], [200], [244, 537], [58], [178], [904], [363], [988], [616], [126], [], [551], [83], [632], [400], [442], [], [70], [81], [716], [311], [391], [335], [965], [], [433], [685], [334], [343], [474], [395], [180], [14], [977, 978, 437], [761], [247], [152], [221], [973], [355], [341, 572], [374], [661], [412], [977], [558], [742], [133], [], [259], [895], [442], [105], [117], [530], [216], [847], [772], [805], [849], [], [44], [824], [298], [983], [340], [960, 931, 923], [903], [353], [162], [], [364], [843, 602], [348], [494], [485, 632], [402], [100], [352], [704], [4], [51], [855], [732], [176], [214], [], [849, 505], [107], [79], [730], [185], [757], [13], [844], [708], [624, 453, 454], [319], [79], [527, 592, 664], [215, 218], [943], [250], [992], [519], [533], [986], [922], [610], [633, 769], [21], [475], [309], [716], [42], [560], [222], [796], [514, 515], [325], [498], [216], [497], [591, 721, 885], [839], [378], [612], [893], [653], [956], [], [878], [374], [417], [569], [], [686], [328], [], [], [688], [725], [423], [], [65], [711], [761], [643], [987, 998], [720], [543], [], [303], [], [389], [235], [1], [86], [760], [38], [9], [612], [785], [780], [80], [50], [103], [939, 943, 945], [564, 750], [854], [857], [253], [113], [981], [178], [470], [119], [210, 852], [448, 637], [882], [526, 784], [651], [], [878], [351], [33], [771], [578], [735], [681, 620, 508], [], [4], [107], [311], [71], [270], [967, 504], [102], [246], [699], [408], [940], [], [510], [578, 689, 601], [182], [], [881], [946], [186], [683], [517, 540, 510], [946], [508], [630], [281], [323], [155], [297], [], [619, 846], [555], [882], [], [992], [613], [843], [796], [733, 541, 542], [546, 650, 402, 818, 819], [133], [465], [85], [436], [16], [731], [119], [441], [457, 834], [252], [529, 667], [982], [909, 567, 827], [619, 750, 846, 721], [360], [726], [822], [190], [108], [637], [417], [90], [172], [836, 837], [339], [764], [804], [90], [30], [234], [331], [600], [506], [960, 582], [419], [797], [620], [173], [744, 657], [135], [863], [813], [935], [824], [494, 442], [261], [787], [], [191], [781], [372], [753], [526], [148], [963, 945], [375], [770, 788], [296], [854], [908, 895], [488], [439], [121], [314], [101], [275], [618], [558], [582, 937, 938], [404, 895], [386, 101], [305], [733], [165, 234], [608, 428], [396], [], [88], [430], [625], [594, 579], [74], [309], [638, 639], [], [298, 63], [622], [821], [658], [617], [348, 349], [1], [694], [695], [809, 926], [785], [244], [951], [520, 431], [487, 605], [606], [406], [199], [624], [76], [609], [743], [933], [131], [548, 598, 632], [836, 837, 655], [762], [553], [254], [178], [197], [553], [88], [555], [6], [514, 515], [245, 183], [15], [488, 679], [], [717], [920], [267], [989], [999], [634], [313], [400, 667], [366], [839], [635], [851, 632], [713], [597], [71], [435], [888], [457, 834, 906], [787], [292], [400, 667], [808], [176], [975], [106], [748], [487, 590], [104], [30], [973], [818], [20], [866], [], [514, 655], [643, 306], [251], [518, 652, 465, 413], [597], [565], [407], [738], [21], [814], [883], [956], [365, 379], [431], [312], [872], [695], [621], [928], [187], [199], [625], [578], [179], [750, 414], [263], [972], [51], [367], [], [402, 881], [961, 963, 964], [655], [852], [887], [891], [809, 925, 923], [], [458], [702], [144], [], [205], [579], [708, 596], [114], [45], [717], [426], [821], [588, 790], [890], [33], [392], [325], [738], [145], [800], [511], [12], [518], [272], [398], [560], [853], [868, 532, 441, 762, 923], [], [88], [741], [563], [267], [545, 745, 619, 818, 831], [607], [970, 795], [795], [313], [701], [763], [169], [839], [351], [641], [], [833], [26], [], [644], [], [318], [517], [986], [], [], [321], [895], [7], [800], [454], [233], [850, 211], [153], [874], [740], [20], [323], [573], [946], [133], [207], [889], [492], [371], [905], [811], [576], [218], [562], [], [713], [920], [495], [346], [530], [294], [8], [343], [253], [713], [194], [959], [903], [661], [321], [245], [890], [694], [141, 142], [813], [270], [322, 946], [333], [714, 402], [], [486], [418, 563], [602], [456], [328], [956], [701], [], [691], [774], [824], [836, 837, 979], [141], [489], [557], [825], [382], [765], [476], [780], [476], [933], [697], [], [388], [945], [738, 653], [799], [358], [945], [446], [548, 664, 526, 527, 508], [277], [826], [350], [616], [244], [], [56], [644, 470], [44], [708], [351], [742], [989], [485], [767], [586], [306], [873], [236], [352], [567], [968, 504], [788], [18], [954], [404], [965], [728], [34], [], [990], [586], [205, 174, 223], [420], [415], [], [761], [649], [552], [972], [898, 692], [60], [974], [411], [699], [659], [271], [945, 939, 943], [53], [382], [76], [730], [944], [462], [573], [346], [328], [827], [153, 265], [315], [776], [908, 895], [578, 650, 818], [770], [452], [683], [319], [329], [747], [], [885], [], [713], [315], [10], [455], [119], [661], [744, 657], [901], [963], [533], [515, 655, 818, 731, 608, 630], [805], [555], [363], [513, 875, 402], [120], [391], [751, 479], [], [110], [245], [761], [682], [], [693], [301, 304], [403], [320], [462, 655], [495], [820], [481], [46, 47], [891], [656], [], [479], [156], [10], [666], [637], [563], [261], [264], [106], [895], [905, 619, 846, 831], [72, 815], [], [282], [851], [109], [304], [738], [634], [695], [400], [659, 940, 813], [780], [643, 570], [596], [938], [], [318], [495], [36], [790], [], [518, 665], [101], [487], [772, 949], [75], [994], [177], [929, 338], [313, 414], [517, 540], [865, 850], [287], [365], [631], [910], [822], [845], [554], [874, 779, 920], [24], [824], [763, 597], [953], [352], [650], [91], [51], [758], [102], [271], [481, 482], [929], [182], [234], [111], [154], [955], [162], [653], [150], [70], [514, 652], [604], [661], [635], [962], [211], [195], [603], [892], [], [772], [322], [838, 551, 629], [993], [393], [582, 945], [322], [349], [997, 588, 947, 790], [451], [583], [703], [167], [128], [136], [466], [34], [964, 937, 945], [683], [605], [625], [553], [405], [252], [789], [784], [846], [770, 543], [949], [145], [547, 716], [301], [90], [896, 905, 435], [499], [252], [896], [206], [814, 977, 978], [594], [369], [770, 830, 608], [442], [703], [100], [980], [66], [890], [715, 524, 461], [329], [827, 926], [28], [608, 489], [578, 689, 703], [8], [963], [224], [], [132], [674], [641], [740], [452], [301], [], [835, 708], [770, 791, 480, 502], [465], [684], [898], [972], [947], [635], [74], [], [334], [205, 213], [312], [883], [602], [222], [303], [299], [773], [305], [255], [923, 928], [665, 671], [], [364], [524], [296], [197], [336], [945], [386], [313], [942], [826], [823], [506], [357], [644, 532], [458], [376], [642], [194], [181], [], [45], [296], [118], [19], [386], [604], [752], [404], [66], [678], [572], [618], [147], [690], [295], [5], [245], [552], [683, 558, 432, 566], [152, 155], [748], [257], [588], [781], [244], [553], [731], [358, 359], [622, 759], [618], [38], [12, 475], [710], [662], [566], [919], [], [206], [92], [851], [], [246], [957], [224, 223], [29], [892], [433], [673, 929, 681, 620, 526], [12], [994], [781], [92], [555, 570], [195], [692], [955], [284], [812], [272], [137], [741], [902], [972], [162], [929], [875], [914], [949, 927], [146], [735], [297], [24], [896, 435, 794], [405], [16], [], [575], [276], [824, 836, 837], [190], [325], [800], [292], [341], [], [718, 975, 536], [950], [979], [853], [449], [226], [132], [], [900], [422], [832], [388], [699], [759], [96], [0], [], [831], [997], [489, 733], [], [432], [672], [51], [44], [710], [503], [559], [357], [904], [272], [866], [33, 35], [962, 935, 937, 923], [364], [300], [912, 339], [533], [762, 884], [863], [95], [829], [573], [822], [288], [89], [], [251], [920], [200, 204], [130], [], [864, 479], [8], [750, 564], [577], [533], [], [33], [680], [429], [67, 68], [681, 620], [97], [977], [157], [], [523], [34], [681, 810, 620, 508], [338], [699], [142], [746], [812], [951], [88], [349], [431], [911], [107], [475], [766], [674, 333], [162], [647], [384], [819], [122], [754], [907], [153], [652, 764], [342], [406], [430], [56], [25], [], [252], [897], [302], [365], [108], [788, 502], [365], [28], [680], [863], [955], [68], [433], [], [539], [566], [212], [893], [76], [508], [479, 817], [742], [255], [267], [], [815], [920], [230], [637], [465], [516, 520], [876, 435, 794], [750], [], [572], [489], [668], [798], [306], [619, 846], [929], [774, 788, 502], [236, 165], [636], [666], [154], [532], [491], [765], [220], [115], [952], [135], [889, 486], [], [403], [792], [], [144], [94], [146], [554], [688], [118], [768], [517, 847], [640], [197], [], [69], [327], [790], [17], [199], [628], [135], [226], [933, 923], [735], [432], [286], [698], [189], [554], [346], [252], [433, 639], [292], [828], [849], [995], [], [292], [652], [884, 406], [241], [680], [275], [905, 750, 721], [713], [373], [399], [487], [897], [659, 969], [102], [289], [477], [216], [651], [868], [930], [247], [319], [673], [235], [829], [524, 461], [75], [735], [111], [591], [], [886], [711], [922], [318], [629], [797], [434], [867], [989], [203], [], [328], [318], [823], [770], [421], [251], [802], [938], [890], [553, 493], [173], [394], [914], [489], [262], [12], [274], [216], [278], [803], [592], [546, 402], [654], [25], [], [839], [347], [615], [662], [706], [840], [886], [535, 479], [472], [513], [871], [882], [352], [880], [607], [975], [25], [898], [977, 978], [], [39], [146], [219], [517], [], [528], [477], [721], [371], [192], [300], [], [820], [], [], [31], [629], [822], [614], [239], [820], [210], [615], [685], [], [836, 837, 879, 535], [197], [], [663], [356], [540], [273], [276], [299], [263], [291], [887], [768], [76], [466], [513], [863], [51], [850], [347], [256], [218], [490], [239], [581, 751, 479], [574], [819], [478], [], [655], [685], [75], [545], [358], [987], [265], [738, 470], [786], [226], [702], [535], [165], [977, 978], [346], [218, 215], [224], [928, 923, 960], [907, 440], [133], [735], [50], [827], [752, 852], [891], [327], [386], [51], [96], [838], [802], [20], [129], [514], [475], [581, 656, 475, 479], [], [928, 712], [488, 695], [338], [], [119], [604], [802], [617], [640], [523], [515], [518], [306], [414], [829], [403], [64, 55], [138], [4], [598], [232, 239], [949], [406], [361], [704], [756, 412], [629], [], [968, 967], [748, 636], [197, 198, 199], [738], [790], [251], [166], [78], [332], [527], [941], [502], [878], [492], [9], [82], [966, 907], [783], [806], [453, 454, 526, 527, 782, 664], [127], [190], [], [723], [656, 475, 479], [674], [650], [184], [117], [649], [418], [659], [354], [770], [681, 620, 526, 527, 782, 664, 508], [876], [], [704], [728], [267], [741], [257, 222], [542], [213], [937], [513, 776, 875], [402], [], [599], [983], [240, 241], [], [], [188, 190], [134], [322], [12], [896], [128], [470, 862], [668], [350], [608], [], [230], [], [], [843], [467], [872, 622, 759], [720], [], [106], [14], [399], [257, 222], [373], [223], [144], [800], [129], [434], [983, 801], [335], [9, 340], [953], [751, 479], [588], [972], [47], [303], [990], [547], [637], [686], [517], [530], [918], [834, 906], [963], [820], [318], [379], [983], [815], [780], [839], [272], [210], [740, 783, 784], [321], [570], [661], [593], [84], [581, 661, 479], [611], [730], [868, 567], [794], [348], [906], [70], [], [11], [674], [872], [666], [72], [805], [290], [289], [357], [392], [206], [859], [86], [237], [638, 639], [140], [493], [136], [990], [577], [728, 412], [689], [14], [806], [544], [139], [560, 981], [194], [844], [428], [692], [366], [682], [320, 319], [883], [838, 631], [894], [622], [191], [94], [67], [992], [103], [603], [43], [881], [278], [348], [804], [746], [262], [218], [619, 532, 846], [522], [389], [757], [300], [860], [781], [692], [210], [349], [], [472], [660], [398], [489], [], [523], [251], [], [386, 101], [847], [202], [896], [608], [979], [911], [959], [], [997], [51], [335], [731], [148], [693, 919, 472, 733], [450], [731], [984], [847], [71], [223], [142], [955], [], [], [576], [684], [438, 126], [739], [328], [890], [778], [496], [131], [923], [734], [374], [244], [673], [499], [700], [986], [964, 923], [92], [81], [836, 593], [326], [49, 50], [14], [250], [793], [391], [937], [210, 178], [823], [753], [122], [11], [435, 876], [1], [933], [263, 247], [719], [708], [761], [], [644], [766], [961], [774], [693], [738], [646, 884, 406], [533], [373], [], [712], [], [231], [604], [148], [595], [], [1], [905, 789, 799], [727], [804, 896], [842, 978], [986], [769], [532, 762], [434], [636], [205], [504, 968, 254], [532, 495], [132], [583], [619], [992], [783], [904, 905, 968, 610, 504], [294], [456], [444], [], [56], [140], [478], [3], [784, 499], [336], [639], [304], [319], [692], [379], [759], [738, 580], [788], [857], [114], [464, 608, 610], [192, 186], [791], [905], [85], [128], [38], [782, 851, 664], [584], [690], [72], [217], [160], [428], [959], [653], [491], [391], [923], [234], [757], [41], [169], [902], [790], [992], [553], [720], [795, 796], [], [575], [985], [604], [460], [934], [783], [368], [296], [792], [608, 610, 531], [67], [630], [721], [688], [651], [225], [648], [391], [468], [8, 7], [696], [371], [190], [882], [55], [859], [985], [22], [595], [326], [189], [228], [772], [635], [677], [915], [0], [774], [273], [208], [435], [197], [985], [922], [571], [66, 68], [295], [247], [740], [986], [934], [747], [631], [], [570, 691], [780], [524], [73, 815], [35], [988], [879], [623], [31], [918], [258], [757], [768], [2, 3], [671], [744, 657], [959], [177], [434], [827], [354], [587], [635], [168, 211], [833], [398], [761], [732], [222], [127], [52, 111], [291], [368], [107], [700], [351], [752], [335], [834], [454], [154], [872], [767], [334], [893, 446], [555], [830], [661], [428], [180], [479], [529], [603], [242], [320], [310], [349], [268], [840], [256], [955], [], [892], [423], [], [102], [514, 655], [421], [536, 718, 814, 977, 978], [83], [506], [777, 623], [977], [490], [400, 667], [404], [197], [229], [25], [425], [], [764, 597], [477], [584, 523], [], [42], [970, 795, 796], [745], [854], [864], [129], [831], [136], [939], [], [339], [470], [918], [319], [580], [769], [990], [188, 189], [9], [851], [460], [96], [893], [933], [968], [908, 404], [421, 825], [923], [519], [], [642], [28], [811], [110], [481], [102], [797], [868], [762, 934], [375], [], [581, 479, 874, 751], [876], [267], [40], [545], [495, 532], [30], [352], [433], [413], [872, 764], [365], [], [322], [719], [650], [], [212], [517], [863], [325], [], [791], [275], [562], [854], [168], [606], [19], [862], [], [175], [619], [], [252], [868, 415], [669], [526], [132], [310], [786], [618, 926], [708], [901], [109], [774], [151], [544, 909], [370], [62], [960, 954], [954], [628], [2], [52], [306], [963, 809, 923], [146], [803], [673, 681, 620, 526, 664, 508], [774], [], [235], [334], [779, 654], [617], [10, 478], [123], [7], [64, 55], [580], [], [841], [458], [46], [512], [221], [229, 200], [111], [443], [171], [376], [991], [178], [740], [165], [636], [257, 222], [319], [347], [], [641], [691], [177], [495], [875, 671], [540], [850], [923, 959], [516, 750, 431], [896, 804, 905, 700, 799], [891], [162], [393], [105], [217], [934, 923], [358], [125], [702], [469, 919], [131], [654], [897], [143], [439], [166], [632, 733], [630], [907, 440], [677], [614], [80], [], [446], [463], [214], [792], [874], [258], [810, 878], [25], [255], [990], [522], [507], [993], [217], [827], [890], [6], [769, 695], [488, 695], [872], [510], [803], [305], [35], [898, 671], [277], [174], [655], [87], [], [758], [385], [930, 868, 968, 923], [971], [451], [226], [543], [330], [770, 788, 630], [421], [760, 827], [345], [292], [330], [197], [477], [], [603], [238], [132], [305], [335], [789, 799], [171], [212], [604], [420], [818, 819], [529, 669], [269], [460], [277], [479, 817, 475], [682], [116], [661, 479], [299], [674], [650], [727], [582, 941, 951], [469], [572, 966], [938], [61], [834], [87], [451], [127], [32], [427], [614], [533], [345], [512], [607], [366], [171], [809, 618, 923], [219], [256], [], [912], [673, 526, 527, 782, 664, 508], [104], [348], [50], [253], [746], [], [903], [131, 134], [641], [822, 887], [581], [], [879, 977], [163], [294], [617], [290], [708], [678], [29], [234], [99], [], [215], [548, 664, 851, 894], [247], [707], [924], [891], [720], [923, 924], [744, 657], [888], [188], [768], [809, 925], [412], [715], [115], [948], [621], [328], [49, 50], [178], [449, 975], [833], [365], [], [965], [719], [652, 733], [518, 444], [840], [307], [760], [816], [771], [522], [289], [385], [766], [673, 526, 527, 782, 664, 508], [], [659, 923, 926], [51], [990], [253], [854], [391], [852], [891], [834, 895], [625], [373, 377], [155], [851, 921], [], [191], [452], [113], [600, 116, 126], [851], [433], [157], [], [97], [239], [323], [746], [48], [158], [703], [784, 508], [849], [386, 101], [299], [817], [722], [440], [408], [674], [868], [871], [736], [246], [985], [829], [410], [], [119, 121], [412], [320], [393], [], [843], [966], [884, 538], [63], [713], [774, 788], [748], [792], [893], [847], [782, 664], [464], [962, 932, 923], [530, 719], [788], [323], [109], [373], [434], [739], [431], [76], [859], [608, 602], [430], [755], [288], [933], [786], [567], [536], [], [291], [72], [848, 632], [138], [767], [509], [287], [255], [179], [896, 794, 861], [], [131], [438], [950], [587, 813, 910], [538, 668], [486, 594, 501], [479], [51], [816], [930], [238], [321], [992], [614], [642], [487], [329], [], [653], [653], [327], [797], [435, 876], [251], [891], [357], [452], [61], [176], [279], [515, 808], [576], [696], [143], [108], [217], [997], [581, 479, 717], [650], [809], [808], [], [934], [22], [679], [890], [275], [73, 77], [726], [869, 975], [549, 623], [838], [652], [67], [64], [484, 536, 628], [590], [357], [442], [965], [442, 494], [975], [651, 909, 827], [618, 926], [468], [96], [], [987, 998], [655], [650], [204], [840], [396], [806], [349], [899, 647, 849, 505], [872], [698], [809, 910], [870], [10], [822, 541, 542], [242], [548, 485, 851, 632], [], [], [241], [403], [327], [], [], [413], [537], [349, 350], [759], [612], [84], [1], [212], [783], [806], [], [588], [892], [955], [594], [891], [82], [673, 810, 527, 508], [672], [119], [417], [712], [626], [48], [372], [162], [339], [954, 951], [921], [557], [56], [812], [302], [717], [295], [159], [], [747], [1], [356], [458], [512], [102], [922], [], [309, 599], [644], [983], [255], [], [276], [488], [292], [894], [509], [665], [44], [359], [30], [312], [24], [167], [424], [218], [272], [947], [723], [35], [781], [672], [], [262], [995], [43], [201], [248], [670], [733], [1, 124], [492], [], [757], [124], [831], [829], [546, 650, 819], [84], [911], [183], [873], [20], [476], [475], [208], [435], [665], [817], [834, 683], [956], [640], [109], [579, 881], [752, 852], [89], [543], [332], [926], [], [539, 741], [991], [493], [440], [518], [442], [719], [425], [880], [397], [963], [840, 462, 463], [641], [751], [804], [923, 928, 291, 737], [35], [349, 350], [638, 639], [336], [923], [760], [621], [945], [133], [1], [886], [437], [265, 266], [971], [827, 840], [812], [256], [977], [442, 437], [302], [62], [434], [231], [149], [], [872], [603], [245], [270], [50], [581], [428], [721, 285, 831], [467], [412], [395, 758], [330], [391], [634], [325], [494], [169], [518, 570], [143], [511], [849], [454], [671], [515, 420], [673, 526, 527, 782, 664, 508], [883], [812], [248, 249, 537], [160], [199], [748], [530], [190], [103], [163], [117], [892], [], [616, 159], [400, 667], [796], [703], [335], [834], [673, 742, 664, 526, 527, 632, 508], [33], [849, 505], [], [15], [602], [172], [], [], [298], [37], [130], [527, 664], [], [465], [838], [294], [581, 717, 479], [746], [743], [220], [572], [], [451, 679], [931], [843], [794], [641], [154], [148], [75], [16], [790], [216], [612, 741], [873], [810, 878], [162, 166], [786], [259], [789], [484, 628], [710, 767], [224, 223], [423, 424], [658], [670], [162], [], [547], [294], [63], [926], [591], [227, 235], [437], [763, 597], [161, 676], [342], [698], [928, 659, 923], [8], [205], [788, 502], [804], [537], [464], [826], [874, 555], [248], [583], [408], [616], [304], [185], [682], [520], [169], [769], [40], [562], [463, 434], [753], [207], [676, 248], [], [995], [871], [568], [169], [990], [840], [522], [335], [346], [479], [215], [515], [858], [230], [967], [546], [673, 526, 527, 664, 508], [940, 941, 942], [797], [939], [160], [963], [658], [251, 805], [982, 439], [524, 461], [253], [979], [277], [540], [], [407], [783, 784], [583], [544, 827], [239], [160], [245], [419], [331], [25], [22], [988], [243], [], [458], [455], [116], [986], [899, 505], [268], [416], [640], [420], [354], [739], [111], [384], [616], [810, 878], [541, 542], [910], [480], [897], [], [780], [629], [866], [185], [], [966], [898, 195], [588], [238, 207], [738], [65], [222], [646], [391, 758], [100], [521], [252], [535], [884], [232, 761], [497], [881], [457, 667], [823], [577], [330], [602], [], [725, 505], [879], [522], [49], [813], [239], [886], [347], [208], [294], [320], [87], [715, 652, 671], [929], [212], [94], [533], [903], [812], [921, 917], [583], [748], [295], [372], [], [361], [108, 973], [], [455], [49, 50], [987, 998], [919, 733], [282], [274, 277], [367], [430], [44], [81], [399], [24], [120], [357], [531], [101], [644], [283], [], [], [982], [355]] \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/results/imagenet_synsets.txt b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_synsets.txt new file mode 100644 index 0000000000..88aa58f966 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/imagenet_synsets.txt @@ -0,0 +1,1000 @@ +n01440764 +n01443537 +n01484850 +n01491361 +n01494475 +n01496331 +n01498041 +n01514668 +n01514859 +n01518878 +n01530575 +n01531178 +n01532829 +n01534433 +n01537544 +n01558993 +n01560419 +n01580077 +n01582220 +n01592084 +n01601694 +n01608432 +n01614925 +n01616318 +n01622779 +n01629819 +n01630670 +n01631663 +n01632458 +n01632777 +n01641577 +n01644373 +n01644900 +n01664065 +n01665541 +n01667114 +n01667778 +n01669191 +n01675722 +n01677366 +n01682714 +n01685808 +n01687978 +n01688243 +n01689811 +n01692333 +n01693334 +n01694178 +n01695060 +n01697457 +n01698640 +n01704323 +n01728572 +n01728920 +n01729322 +n01729977 +n01734418 +n01735189 +n01737021 +n01739381 +n01740131 +n01742172 +n01744401 +n01748264 +n01749939 +n01751748 +n01753488 +n01755581 +n01756291 +n01768244 +n01770081 +n01770393 +n01773157 +n01773549 +n01773797 +n01774384 +n01774750 +n01775062 +n01776313 +n01784675 +n01795545 +n01796340 +n01797886 +n01798484 +n01806143 +n01806567 +n01807496 +n01817953 +n01818515 +n01819313 +n01820546 +n01824575 +n01828970 +n01829413 +n01833805 +n01843065 +n01843383 +n01847000 +n01855032 +n01855672 +n01860187 +n01871265 +n01872401 +n01873310 +n01877812 +n01882714 +n01883070 +n01910747 +n01914609 +n01917289 +n01924916 +n01930112 +n01943899 +n01944390 +n01945685 +n01950731 +n01955084 +n01968897 +n01978287 +n01978455 +n01980166 +n01981276 +n01983481 +n01984695 +n01985128 +n01986214 +n01990800 +n02002556 +n02002724 +n02006656 +n02007558 +n02009229 +n02009912 +n02011460 +n02012849 +n02013706 +n02017213 +n02018207 +n02018795 +n02025239 +n02027492 +n02028035 +n02033041 +n02037110 +n02051845 +n02056570 +n02058221 +n02066245 +n02071294 +n02074367 +n02077923 +n02085620 +n02085782 +n02085936 +n02086079 +n02086240 +n02086646 +n02086910 +n02087046 +n02087394 +n02088094 +n02088238 +n02088364 +n02088466 +n02088632 +n02089078 +n02089867 +n02089973 +n02090379 +n02090622 +n02090721 +n02091032 +n02091134 +n02091244 +n02091467 +n02091635 +n02091831 +n02092002 +n02092339 +n02093256 +n02093428 +n02093647 +n02093754 +n02093859 +n02093991 +n02094114 +n02094258 +n02094433 +n02095314 +n02095570 +n02095889 +n02096051 +n02096177 +n02096294 +n02096437 +n02096585 +n02097047 +n02097130 +n02097209 +n02097298 +n02097474 +n02097658 +n02098105 +n02098286 +n02098413 +n02099267 +n02099429 +n02099601 +n02099712 +n02099849 +n02100236 +n02100583 +n02100735 +n02100877 +n02101006 +n02101388 +n02101556 +n02102040 +n02102177 +n02102318 +n02102480 +n02102973 +n02104029 +n02104365 +n02105056 +n02105162 +n02105251 +n02105412 +n02105505 +n02105641 +n02105855 +n02106030 +n02106166 +n02106382 +n02106550 +n02106662 +n02107142 +n02107312 +n02107574 +n02107683 +n02107908 +n02108000 +n02108089 +n02108422 +n02108551 +n02108915 +n02109047 +n02109525 +n02109961 +n02110063 +n02110185 +n02110341 +n02110627 +n02110806 +n02110958 +n02111129 +n02111277 +n02111500 +n02111889 +n02112018 +n02112137 +n02112350 +n02112706 +n02113023 +n02113186 +n02113624 +n02113712 +n02113799 +n02113978 +n02114367 +n02114548 +n02114712 +n02114855 +n02115641 +n02115913 +n02116738 +n02117135 +n02119022 +n02119789 +n02120079 +n02120505 +n02123045 +n02123159 +n02123394 +n02123597 +n02124075 +n02125311 +n02127052 +n02128385 +n02128757 +n02128925 +n02129165 +n02129604 +n02130308 +n02132136 +n02133161 +n02134084 +n02134418 +n02137549 +n02138441 +n02165105 +n02165456 +n02167151 +n02168699 +n02169497 +n02172182 +n02174001 +n02177972 +n02190166 +n02206856 +n02219486 +n02226429 +n02229544 +n02231487 +n02233338 +n02236044 +n02256656 +n02259212 +n02264363 +n02268443 +n02268853 +n02276258 +n02277742 +n02279972 +n02280649 +n02281406 +n02281787 +n02317335 +n02319095 +n02321529 +n02325366 +n02326432 +n02328150 +n02342885 +n02346627 +n02356798 +n02361337 +n02363005 +n02364673 +n02389026 +n02391049 +n02395406 +n02396427 +n02397096 +n02398521 +n02403003 +n02408429 +n02410509 +n02412080 +n02415577 +n02417914 +n02422106 +n02422699 +n02423022 +n02437312 +n02437616 +n02441942 +n02442845 +n02443114 +n02443484 +n02444819 +n02445715 +n02447366 +n02454379 +n02457408 +n02480495 +n02480855 +n02481823 +n02483362 +n02483708 +n02484975 +n02486261 +n02486410 +n02487347 +n02488291 +n02488702 +n02489166 +n02490219 +n02492035 +n02492660 +n02493509 +n02493793 +n02494079 +n02497673 +n02500267 +n02504013 +n02504458 +n02509815 +n02510455 +n02514041 +n02526121 +n02536864 +n02606052 +n02607072 +n02640242 +n02641379 +n02643566 +n02655020 +n02666196 +n02667093 +n02669723 +n02672831 +n02676566 +n02687172 +n02690373 +n02692877 +n02699494 +n02701002 +n02704792 +n02708093 +n02727426 +n02730930 +n02747177 +n02749479 +n02769748 +n02776631 +n02777292 +n02782093 +n02783161 +n02786058 +n02787622 +n02788148 +n02790996 +n02791124 +n02791270 +n02793495 +n02794156 +n02795169 +n02797295 +n02799071 +n02802426 +n02804414 +n02804610 +n02807133 +n02808304 +n02808440 +n02814533 +n02814860 +n02815834 +n02817516 +n02823428 +n02823750 +n02825657 +n02834397 +n02835271 +n02837789 +n02840245 +n02841315 +n02843684 +n02859443 +n02860847 +n02865351 +n02869837 +n02870880 +n02871525 +n02877765 +n02879718 +n02883205 +n02892201 +n02892767 +n02894605 +n02895154 +n02906734 +n02909870 +n02910353 +n02916936 +n02917067 +n02927161 +n02930766 +n02939185 +n02948072 +n02950826 +n02951358 +n02951585 +n02963159 +n02965783 +n02966193 +n02966687 +n02971356 +n02974003 +n02977058 +n02978881 +n02979186 +n02980441 +n02981792 +n02988304 +n02992211 +n02992529 +n02999410 +n03000134 +n03000247 +n03000684 +n03014705 +n03016953 +n03017168 +n03018349 +n03026506 +n03028079 +n03032252 +n03041632 +n03042490 +n03045698 +n03047690 +n03062245 +n03063599 +n03063689 +n03065424 +n03075370 +n03085013 +n03089624 +n03095699 +n03100240 +n03109150 +n03110669 +n03124043 +n03124170 +n03125729 +n03126707 +n03127747 +n03127925 +n03131574 +n03133878 +n03134739 +n03141823 +n03146219 +n03160309 +n03179701 +n03180011 +n03187595 +n03188531 +n03196217 +n03197337 +n03201208 +n03207743 +n03207941 +n03208938 +n03216828 +n03218198 +n03220513 +n03223299 +n03240683 +n03249569 +n03250847 +n03255030 +n03259280 +n03271574 +n03272010 +n03272562 +n03290653 +n03291819 +n03297495 +n03314780 +n03325584 +n03337140 +n03344393 +n03345487 +n03347037 +n03355925 +n03372029 +n03376595 +n03379051 +n03384352 +n03388043 +n03388183 +n03388549 +n03393912 +n03394916 +n03400231 +n03404251 +n03417042 +n03424325 +n03425413 +n03443371 +n03444034 +n03445777 +n03445924 +n03447447 +n03447721 +n03450230 +n03452741 +n03457902 +n03459775 +n03461385 +n03467068 +n03476684 +n03476991 +n03478589 +n03481172 +n03482405 +n03483316 +n03485407 +n03485794 +n03492542 +n03494278 +n03495258 +n03496892 +n03498962 +n03527444 +n03529860 +n03530642 +n03532672 +n03534580 +n03535780 +n03538406 +n03544143 +n03584254 +n03584829 +n03590841 +n03594734 +n03594945 +n03595614 +n03598930 +n03599486 +n03602883 +n03617480 +n03623198 +n03627232 +n03630383 +n03633091 +n03637318 +n03642806 +n03649909 +n03657121 +n03658185 +n03661043 +n03662601 +n03666591 +n03670208 +n03673027 +n03676483 +n03680355 +n03690938 +n03691459 +n03692522 +n03697007 +n03706229 +n03709823 +n03710193 +n03710637 +n03710721 +n03717622 +n03720891 +n03721384 +n03724870 +n03729826 +n03733131 +n03733281 +n03733805 +n03742115 +n03743016 +n03759954 +n03761084 +n03763968 +n03764736 +n03769881 +n03770439 +n03770679 +n03773504 +n03775071 +n03775546 +n03776460 +n03777568 +n03777754 +n03781244 +n03782006 +n03785016 +n03786901 +n03787032 +n03788195 +n03788365 +n03791053 +n03792782 +n03792972 +n03793489 +n03794056 +n03796401 +n03803284 +n03804744 +n03814639 +n03814906 +n03825788 +n03832673 +n03837869 +n03838899 +n03840681 +n03841143 +n03843555 +n03854065 +n03857828 +n03866082 +n03868242 +n03868863 +n03871628 +n03873416 +n03874293 +n03874599 +n03876231 +n03877472 +n03877845 +n03884397 +n03887697 +n03888257 +n03888605 +n03891251 +n03891332 +n03895866 +n03899768 +n03902125 +n03903868 +n03908618 +n03908714 +n03916031 +n03920288 +n03924679 +n03929660 +n03929855 +n03930313 +n03930630 +n03933933 +n03935335 +n03937543 +n03938244 +n03942813 +n03944341 +n03947888 +n03950228 +n03954731 +n03956157 +n03958227 +n03961711 +n03967562 +n03970156 +n03976467 +n03976657 +n03977966 +n03980874 +n03982430 +n03983396 +n03991062 +n03992509 +n03995372 +n03998194 +n04004767 +n04005630 +n04008634 +n04009552 +n04019541 +n04023962 +n04026417 +n04033901 +n04033995 +n04037443 +n04039381 +n04040759 +n04041544 +n04044716 +n04049303 +n04065272 +n04067472 +n04069434 +n04070727 +n04074963 +n04081281 +n04086273 +n04090263 +n04099969 +n04111531 +n04116512 +n04118538 +n04118776 +n04120489 +n04125021 +n04127249 +n04131690 +n04133789 +n04136333 +n04141076 +n04141327 +n04141975 +n04146614 +n04147183 +n04149813 +n04152593 +n04153751 +n04154565 +n04162706 +n04179913 +n04192698 +n04200800 +n04201297 +n04204238 +n04204347 +n04208210 +n04209133 +n04209239 +n04228054 +n04229816 +n04235860 +n04238763 +n04239074 +n04243546 +n04251144 +n04252077 +n04252225 +n04254120 +n04254680 +n04254777 +n04258138 +n04259630 +n04263257 +n04264628 +n04265275 +n04266014 +n04270147 +n04273569 +n04275548 +n04277352 +n04285008 +n04286575 +n04296562 +n04310018 +n04311004 +n04311174 +n04317175 +n04325704 +n04326547 +n04328186 +n04330267 +n04332243 +n04335435 +n04336792 +n04344873 +n04346328 +n04347754 +n04350905 +n04355338 +n04355933 +n04356056 +n04357314 +n04366367 +n04367480 +n04370456 +n04371430 +n04371774 +n04372370 +n04376876 +n04380533 +n04389033 +n04392985 +n04398044 +n04399382 +n04404412 +n04409515 +n04417672 +n04418357 +n04423845 +n04428191 +n04429376 +n04435653 +n04442312 +n04443257 +n04447861 +n04456115 +n04458633 +n04461696 +n04462240 +n04465501 +n04467665 +n04476259 +n04479046 +n04482393 +n04483307 +n04485082 +n04486054 +n04487081 +n04487394 +n04493381 +n04501370 +n04505470 +n04507155 +n04509417 +n04515003 +n04517823 +n04522168 +n04523525 +n04525038 +n04525305 +n04532106 +n04532670 +n04536866 +n04540053 +n04542943 +n04548280 +n04548362 +n04550184 +n04552348 +n04553703 +n04554684 +n04557648 +n04560804 +n04562935 +n04579145 +n04579432 +n04584207 +n04589890 +n04590129 +n04591157 +n04591713 +n04592741 +n04596742 +n04597913 +n04599235 +n04604644 +n04606251 +n04612504 +n04613696 +n06359193 +n06596364 +n06785654 +n06794110 +n06874185 +n07248320 +n07565083 +n07579787 +n07583066 +n07584110 +n07590611 +n07613480 +n07614500 +n07615774 +n07684084 +n07693725 +n07695742 +n07697313 +n07697537 +n07711569 +n07714571 +n07714990 +n07715103 +n07716358 +n07716906 +n07717410 +n07717556 +n07718472 +n07718747 +n07720875 +n07730033 +n07734744 +n07742313 +n07745940 +n07747607 +n07749582 +n07753113 +n07753275 +n07753592 +n07754684 +n07760859 +n07768694 +n07802026 +n07831146 +n07836838 +n07860988 +n07871810 +n07873807 +n07875152 +n07880968 +n07892512 +n07920052 +n07930864 +n07932039 +n09193705 +n09229709 +n09246464 +n09256479 +n09288635 +n09332890 +n09399592 +n09421951 +n09428293 +n09468604 +n09472597 +n09835506 +n10148035 +n10565667 +n11879895 +n11939491 +n12057211 +n12144580 +n12267677 +n12620546 +n12768682 +n12985857 +n12998815 +n13037406 +n13040303 +n13044778 +n13052670 +n13054560 +n13133613 +n15075141 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-a-clean.csv b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-a-clean.csv new file mode 100644 index 0000000000..e97b7b05c2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-a-clean.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation +tf_efficientnet_l2_ns,98.550,1.450,99.820,0.180,480.31,800,0.960,bicubic +tf_efficientnet_l2_ns_475,98.500,1.500,99.830,0.170,480.31,475,0.936,bicubic +vit_large_patch16_384,98.220,1.780,99.800,0.200,304.72,384,1.000,bicubic +swin_large_patch4_window12_384,98.040,1.960,99.690,0.310,196.74,384,1.000,bicubic +tf_efficientnet_b7_ns,97.910,2.090,99.720,0.280,66.35,600,0.949,bicubic +swin_base_patch4_window12_384,97.890,2.110,99.710,0.290,87.90,384,1.000,bicubic +vit_large_r50_s32_384,97.860,2.140,99.670,0.330,329.09,384,1.000,bicubic +vit_base_patch16_384,97.840,2.160,99.670,0.330,86.86,384,1.000,bicubic +tf_efficientnetv2_l_in21ft1k,97.700,2.300,99.670,0.330,118.52,480,1.000,bicubic +swin_large_patch4_window7_224,97.650,2.350,99.580,0.420,196.53,224,0.900,bicubic +vit_large_patch16_224,97.640,2.360,99.590,0.410,304.33,224,0.900,bicubic +tf_efficientnet_b6_ns,97.630,2.370,99.580,0.420,43.04,528,0.942,bicubic +ig_resnext101_32x48d,97.620,2.380,99.700,0.300,828.41,224,0.875,bilinear +dm_nfnet_f6,97.600,2.400,99.550,0.450,438.36,576,0.956,bicubic +dm_nfnet_f4,97.580,2.420,99.510,0.490,316.07,512,0.951,bicubic +dm_nfnet_f5,97.540,2.460,99.570,0.430,377.21,544,0.954,bicubic +tf_efficientnet_b5_ns,97.500,2.500,99.630,0.370,30.39,456,0.934,bicubic +resnetv2_152x4_bitm,97.490,2.510,99.610,0.390,936.53,480,1.000,bilinear +cait_m48_448,97.480,2.520,99.550,0.450,356.46,448,1.000,bicubic +tf_efficientnetv2_m_in21ft1k,97.480,2.520,99.530,0.470,54.14,480,1.000,bicubic +cait_m36_384,97.400,2.600,99.510,0.490,271.22,384,1.000,bicubic +ig_resnext101_32x32d,97.360,2.640,99.680,0.320,468.53,224,0.875,bilinear +dm_nfnet_f3,97.350,2.650,99.560,0.440,254.92,416,0.940,bicubic +cait_s36_384,97.330,2.670,99.530,0.470,68.37,384,1.000,bicubic +tf_efficientnetv2_l,97.280,2.720,99.550,0.450,118.52,480,1.000,bicubic +swin_base_patch4_window7_224,97.250,2.750,99.530,0.470,87.77,224,0.900,bicubic +tf_efficientnet_b8,97.200,2.800,99.500,0.500,87.41,672,0.954,bicubic +swsl_resnext101_32x8d,97.200,2.800,99.570,0.430,88.79,224,0.875,bilinear +tf_efficientnet_b7_ap,97.200,2.800,99.540,0.460,66.35,600,0.949,bicubic +vit_base_r50_s16_384,97.180,2.820,99.560,0.440,98.95,384,1.000,bicubic +tf_efficientnetv2_m,97.140,2.860,99.410,0.590,54.14,480,1.000,bicubic +tf_efficientnet_b8_ap,97.110,2.890,99.660,0.340,87.41,672,0.954,bicubic +eca_nfnet_l2,97.090,2.910,99.510,0.490,56.72,384,1.000,bicubic +ecaresnet269d,97.080,2.920,99.470,0.530,102.09,352,1.000,bicubic +tf_efficientnet_b6_ap,97.080,2.920,99.620,0.380,43.04,528,0.942,bicubic +cait_s24_384,97.070,2.930,99.430,0.570,47.06,384,1.000,bicubic +dm_nfnet_f2,97.020,2.980,99.440,0.560,193.78,352,0.920,bicubic +resnetv2_152x2_bitm,97.010,2.990,99.590,0.410,236.34,448,1.000,bilinear +tf_efficientnet_b7,97.010,2.990,99.520,0.480,66.35,600,0.949,bicubic +resnetv2_101x3_bitm,96.990,3.010,99.490,0.510,387.93,448,1.000,bilinear +efficientnetv2_rw_m,96.980,3.020,99.540,0.460,53.24,416,1.000,bicubic +deit_base_distilled_patch16_384,96.960,3.040,99.480,0.520,87.63,384,1.000,bicubic +tf_efficientnet_b4_ns,96.950,3.050,99.580,0.420,19.34,380,0.922,bicubic +dm_nfnet_f1,96.920,3.080,99.410,0.590,132.63,320,0.910,bicubic +resnetrs420,96.910,3.090,99.460,0.540,191.89,416,1.000,bicubic +vit_base_patch16_224,96.880,3.120,99.530,0.470,86.57,224,0.900,bicubic +resnetv2_152x2_bit_teacher_384,96.830,3.170,99.450,0.550,236.34,384,1.000,bicubic +ig_resnext101_32x16d,96.820,3.180,99.590,0.410,194.03,224,0.875,bilinear +vit_large_r50_s32_224,96.790,3.210,99.350,0.650,328.99,224,0.900,bicubic +seresnet152d,96.770,3.230,99.450,0.550,66.84,320,1.000,bicubic +resnetrs350,96.760,3.240,99.370,0.630,163.96,384,1.000,bicubic +tf_efficientnetv2_s_in21ft1k,96.730,3.270,99.420,0.580,21.46,384,1.000,bicubic +resnet200d,96.720,3.280,99.330,0.670,64.69,320,1.000,bicubic +resnetv2_50x3_bitm,96.710,3.290,99.550,0.450,217.32,448,1.000,bilinear +eca_nfnet_l1,96.700,3.300,99.290,0.710,41.41,320,1.000,bicubic +vit_small_patch16_384,96.700,3.300,99.480,0.520,22.20,384,1.000,bicubic +resnetrs270,96.690,3.310,99.350,0.650,129.86,352,1.000,bicubic +pit_b_distilled_224,96.680,3.320,99.350,0.650,74.79,224,0.900,bicubic +tf_efficientnet_b5_ap,96.680,3.320,99.460,0.540,30.39,456,0.934,bicubic +vit_small_r26_s32_384,96.680,3.320,99.570,0.430,36.47,384,1.000,bicubic +tf_efficientnet_b6,96.670,3.330,99.370,0.630,43.04,528,0.942,bicubic +resmlp_big_24_224_in22ft1k,96.620,3.380,99.510,0.490,129.14,224,0.875,bicubic +resnest200e,96.610,3.390,99.350,0.650,70.20,320,0.909,bicubic +swsl_resnext101_32x16d,96.600,3.400,99.520,0.480,194.03,224,0.875,bilinear +resnetrs152,96.580,3.420,99.240,0.760,86.62,320,1.000,bicubic +cait_xs24_384,96.550,3.450,99.420,0.580,26.67,384,1.000,bicubic +efficientnetv2_rw_s,96.540,3.460,99.360,0.640,23.94,384,1.000,bicubic +resnetrs200,96.530,3.470,99.350,0.650,93.21,320,1.000,bicubic +resnest269e,96.520,3.480,99.350,0.650,110.93,416,0.928,bicubic +vit_base_patch32_384,96.490,3.510,99.410,0.590,88.30,384,1.000,bicubic +vit_base_patch16_224_miil,96.460,3.540,99.300,0.700,86.54,224,0.875,bilinear +resmlp_big_24_distilled_224,96.450,3.550,99.310,0.690,129.14,224,0.875,bicubic +swsl_resnext101_32x4d,96.420,3.580,99.470,0.530,44.18,224,0.875,bilinear +tf_efficientnet_b3_ns,96.390,3.610,99.350,0.650,12.23,300,0.904,bicubic +cait_s24_224,96.380,3.620,99.150,0.850,46.92,224,1.000,bicubic +resnet152d,96.360,3.640,99.390,0.610,60.21,320,1.000,bicubic +regnety_160,96.350,3.650,99.330,0.670,83.59,288,1.000,bicubic +tf_efficientnet_b5,96.350,3.650,99.310,0.690,30.39,456,0.934,bicubic +tf_efficientnetv2_s,96.340,3.660,99.200,0.800,21.46,384,1.000,bicubic +ig_resnext101_32x8d,96.320,3.680,99.430,0.570,88.79,224,0.875,bilinear +resnet101d,96.290,3.710,99.230,0.770,44.57,320,1.000,bicubic +twins_svt_large,96.270,3.730,99.170,0.830,99.27,224,0.900,bicubic +tf_efficientnet_b4_ap,96.160,3.840,99.280,0.720,19.34,380,0.922,bicubic +twins_svt_base,96.160,3.840,99.060,0.940,56.07,224,0.900,bicubic +deit_base_patch16_384,96.150,3.850,99.140,0.860,86.86,384,1.000,bicubic +dm_nfnet_f0,96.150,3.850,99.250,0.750,71.49,256,0.900,bicubic +efficientnet_b4,96.150,3.850,99.200,0.800,19.34,384,1.000,bicubic +twins_pcpvt_large,96.150,3.850,99.180,0.820,60.99,224,0.900,bicubic +resnetv2_50x1_bit_distilled,96.130,3.870,99.280,0.720,25.55,224,0.875,bicubic +nfnet_l0,96.120,3.880,99.240,0.760,35.07,288,1.000,bicubic +resnetv2_152x2_bit_teacher,96.100,3.900,99.280,0.720,236.34,224,0.875,bicubic +resnetv2_101x1_bitm,96.100,3.900,99.280,0.720,44.54,448,1.000,bilinear +deit_base_distilled_patch16_224,96.090,3.910,99.190,0.810,87.34,224,0.900,bicubic +regnety_032,95.970,4.030,99.190,0.810,19.44,288,1.000,bicubic +tresnet_xl_448,95.970,4.030,99.130,0.870,78.44,448,0.875,bilinear +eca_nfnet_l0,95.950,4.050,99.210,0.790,24.14,288,1.000,bicubic +swin_small_patch4_window7_224,95.910,4.090,99.020,0.980,49.61,224,0.900,bicubic +tf_efficientnet_b4,95.900,4.100,99.170,0.830,19.34,380,0.922,bicubic +swsl_resnext50_32x4d,95.870,4.130,99.250,0.750,25.03,224,0.875,bilinear +resnest101e,95.860,4.140,99.210,0.790,48.28,256,0.875,bilinear +resnet51q,95.860,4.140,99.120,0.880,35.70,288,1.000,bilinear +tresnet_l_448,95.860,4.140,99.120,0.880,55.99,448,0.875,bilinear +cait_xxs36_384,95.850,4.150,99.090,0.910,17.37,384,1.000,bicubic +vit_large_patch32_384,95.830,4.170,99.150,0.850,306.63,384,1.000,bicubic +ssl_resnext101_32x16d,95.800,4.200,99.180,0.820,194.03,224,0.875,bilinear +twins_pcpvt_base,95.790,4.210,99.130,0.870,43.83,224,0.900,bicubic +tf_efficientnet_b2_ns,95.770,4.230,99.120,0.880,9.11,260,0.890,bicubic +tresnet_m,95.720,4.280,99.030,0.970,31.39,224,0.875,bilinear +efficientnet_b3,95.710,4.290,99.040,0.960,12.23,320,1.000,bicubic +pnasnet5large,95.710,4.290,98.920,1.080,86.06,331,0.911,bicubic +nasnetalarge,95.680,4.320,98.930,1.070,88.75,331,0.911,bicubic +pit_b_224,95.640,4.360,98.660,1.340,73.76,224,0.900,bicubic +vit_small_r26_s32_224,95.630,4.370,99.190,0.810,36.43,224,0.900,bicubic +convit_base,95.550,4.450,98.870,1.130,86.54,224,0.875,bicubic +coat_lite_small,95.540,4.460,98.860,1.140,19.84,224,0.900,bicubic +ecaresnet101d,95.530,4.470,99.130,0.870,44.57,224,0.875,bicubic +levit_384,95.530,4.470,99.050,0.950,39.13,224,0.900,bicubic +ecaresnet50t,95.510,4.490,99.120,0.880,25.57,320,0.950,bicubic +visformer_small,95.490,4.510,98.900,1.100,40.22,224,0.900,bicubic +ssl_resnext101_32x8d,95.470,4.530,99.110,0.890,88.79,224,0.875,bilinear +deit_base_patch16_224,95.440,4.560,98.840,1.160,86.57,224,0.900,bicubic +ssl_resnext101_32x4d,95.440,4.560,99.130,0.870,44.18,224,0.875,bilinear +tresnet_xl,95.440,4.560,99.050,0.950,78.44,224,0.875,bilinear +resnetrs101,95.430,4.570,99.030,0.970,63.62,288,0.940,bicubic +swsl_resnet50,95.410,4.590,99.290,0.710,25.56,224,0.875,bilinear +vit_small_patch16_224,95.370,4.630,99.150,0.850,22.05,224,0.900,bicubic +tf_efficientnet_b3_ap,95.320,4.680,98.900,1.100,12.23,300,0.904,bicubic +mixer_b16_224_miil,95.300,4.700,98.880,1.120,59.88,224,0.875,bilinear +tresnet_l,95.290,4.710,99.010,0.990,55.99,224,0.875,bilinear +cait_xxs24_384,95.260,4.740,98.960,1.040,12.03,384,1.000,bicubic +pit_s_distilled_224,95.240,4.760,99.050,0.950,24.04,224,0.900,bicubic +twins_pcpvt_small,95.210,4.790,98.880,1.120,24.11,224,0.900,bicubic +convit_small,95.200,4.800,98.900,1.100,27.78,224,0.875,bicubic +twins_svt_small,95.200,4.800,98.880,1.120,24.06,224,0.900,bicubic +tf_efficientnet_b1_ns,95.170,4.830,99.110,0.890,7.79,240,0.882,bicubic +tf_efficientnetv2_b3,95.160,4.840,98.820,1.180,14.36,300,0.904,bicubic +swin_tiny_patch4_window7_224,95.140,4.860,98.850,1.150,28.29,224,0.900,bicubic +efficientnet_el,95.120,4.880,98.990,1.010,10.59,300,0.904,bicubic +gernet_l,95.090,4.910,98.900,1.100,31.08,256,0.875,bilinear +ecaresnet101d_pruned,95.080,4.920,98.980,1.020,24.88,224,0.875,bicubic +wide_resnet50_2,95.080,4.920,98.970,1.030,68.88,224,0.875,bicubic +legacy_senet154,95.070,4.930,98.830,1.170,115.09,224,0.875,bilinear +vit_small_patch32_384,95.050,4.950,98.990,1.010,22.92,384,1.000,bicubic +seresnext50_32x4d,95.040,4.960,98.880,1.120,27.56,224,0.875,bicubic +tnt_s_patch16_224,95.040,4.960,98.830,1.170,23.76,224,0.900,bicubic +gluon_resnet152_v1s,95.040,4.960,98.930,1.070,60.32,224,0.875,bicubic +levit_256,95.010,4.990,98.890,1.110,18.89,224,0.900,bicubic +resnetv2_50x1_bitm,95.010,4.990,99.060,0.940,25.55,448,1.000,bilinear +tf_efficientnet_b3,95.010,4.990,98.910,1.090,12.23,300,0.904,bicubic +vit_base_patch32_224,95.000,5.000,99.030,0.970,88.22,224,0.900,bicubic +tresnet_m_448,94.990,5.010,98.980,1.020,31.39,448,0.875,bilinear +coat_mini,94.970,5.030,98.780,1.220,10.34,224,0.900,bicubic +resnest50d_4s2x40d,94.960,5.040,99.070,0.930,30.42,224,0.875,bicubic +rexnet_200,94.940,5.060,99.010,0.990,16.37,224,0.875,bicubic +gluon_seresnext101_64x4d,94.930,5.070,98.830,1.170,88.23,224,0.875,bicubic +gluon_senet154,94.920,5.080,98.760,1.240,115.09,224,0.875,bicubic +gluon_seresnext101_32x4d,94.920,5.080,98.810,1.190,48.96,224,0.875,bicubic +tf_efficientnet_lite4,94.890,5.110,99.020,0.980,13.01,380,0.920,bilinear +resmlp_36_distilled_224,94.890,5.110,98.850,1.150,44.69,224,0.875,bicubic +ssl_resnext50_32x4d,94.870,5.130,98.880,1.120,25.03,224,0.875,bilinear +resnest50d,94.830,5.170,98.880,1.120,27.48,224,0.875,bilinear +ecaresnetlight,94.770,5.230,98.800,1.200,30.16,224,0.875,bicubic +resnest50d_1s4x24d,94.750,5.250,98.980,1.020,25.68,224,0.875,bicubic +gluon_resnet152_v1d,94.740,5.260,98.740,1.260,60.21,224,0.875,bicubic +gluon_resnet101_v1s,94.720,5.280,98.820,1.180,44.67,224,0.875,bicubic +deit_small_distilled_patch16_224,94.710,5.290,99.030,0.970,22.44,224,0.900,bicubic +gluon_resnext101_64x4d,94.670,5.330,98.650,1.350,83.46,224,0.875,bicubic +cspdarknet53,94.660,5.340,98.800,1.200,27.64,256,0.887,bilinear +resmlp_big_24_224,94.660,5.340,98.480,1.520,129.14,224,0.875,bicubic +ecaresnet50d,94.630,5.370,98.890,1.110,25.58,224,0.875,bicubic +efficientnet_b3_pruned,94.630,5.370,98.760,1.240,9.86,300,0.904,bicubic +gernet_m,94.620,5.380,98.860,1.140,21.14,224,0.875,bilinear +efficientnet_b2,94.610,5.390,98.710,1.290,9.11,288,1.000,bicubic +pit_s_224,94.590,5.410,98.710,1.290,23.46,224,0.900,bicubic +repvgg_b3,94.570,5.430,98.780,1.220,123.09,224,0.875,bilinear +nf_resnet50,94.560,5.440,98.790,1.210,25.56,288,0.940,bicubic +seresnet50,94.550,5.450,98.750,1.250,28.09,224,0.875,bicubic +inception_resnet_v2,94.540,5.460,98.790,1.210,55.84,299,0.897,bicubic +regnety_320,94.540,5.460,98.850,1.150,145.05,224,0.875,bicubic +gluon_resnext101_32x4d,94.530,5.470,98.630,1.370,44.18,224,0.875,bicubic +repvgg_b3g4,94.520,5.480,98.970,1.030,83.83,224,0.875,bilinear +tf_efficientnet_b2_ap,94.490,5.510,98.620,1.380,9.11,260,0.890,bicubic +regnety_120,94.480,5.520,98.810,1.190,51.82,224,0.875,bicubic +rexnet_150,94.480,5.520,98.790,1.210,9.73,224,0.875,bicubic +cspresnext50,94.480,5.520,98.680,1.320,20.57,224,0.875,bilinear +resmlp_24_distilled_224,94.460,5.540,98.770,1.230,30.02,224,0.875,bicubic +regnetx_320,94.460,5.540,98.740,1.260,107.81,224,0.875,bicubic +ssl_resnet50,94.450,5.550,98.920,1.080,25.56,224,0.875,bilinear +tf_efficientnetv2_b2,94.420,5.580,98.570,1.430,10.10,260,0.890,bicubic +tf_efficientnet_el,94.410,5.590,98.710,1.290,10.59,300,0.904,bicubic +deit_small_patch16_224,94.400,5.600,98.690,1.310,22.05,224,0.900,bicubic +efficientnet_el_pruned,94.400,5.600,98.740,1.260,10.59,300,0.904,bicubic +inception_v4,94.380,5.620,98.580,1.420,42.68,299,0.875,bicubic +legacy_seresnext101_32x4d,94.370,5.630,98.650,1.350,48.96,224,0.875,bilinear +tf_efficientnet_b2,94.360,5.640,98.610,1.390,9.11,260,0.890,bicubic +gluon_seresnext50_32x4d,94.340,5.660,98.610,1.390,27.56,224,0.875,bicubic +resnetrs50,94.310,5.690,98.640,1.360,35.69,224,0.910,bicubic +dpn107,94.310,5.690,98.480,1.520,86.92,224,0.875,bicubic +ecaresnet26t,94.310,5.690,98.720,1.280,16.01,320,0.950,bicubic +xception71,94.280,5.720,98.640,1.360,42.34,299,0.903,bicubic +cait_xxs36_224,94.260,5.740,98.720,1.280,17.30,224,1.000,bicubic +gluon_xception65,94.260,5.740,98.570,1.430,39.92,299,0.903,bicubic +resnet50d,94.260,5.740,98.720,1.280,25.58,224,0.875,bicubic +skresnext50_32x4d,94.260,5.740,98.460,1.540,27.48,224,0.875,bicubic +regnetx_120,94.240,5.760,98.650,1.350,46.11,224,0.875,bicubic +dpn92,94.230,5.770,98.730,1.270,37.67,224,0.875,bicubic +ecaresnet50d_pruned,94.220,5.780,98.730,1.270,19.94,224,0.875,bicubic +gluon_resnet101_v1d,94.220,5.780,98.550,1.450,44.57,224,0.875,bicubic +tf_efficientnet_lite3,94.200,5.800,98.640,1.360,8.20,300,0.904,bilinear +resmlp_36_224,94.190,5.810,98.660,1.340,44.69,224,0.875,bicubic +mixnet_xl,94.190,5.810,98.340,1.660,11.90,224,0.875,bicubic +resnext50d_32x4d,94.180,5.820,98.570,1.430,25.05,224,0.875,bicubic +levit_192,94.170,5.830,98.540,1.460,10.95,224,0.900,bicubic +regnety_080,94.170,5.830,98.680,1.320,39.18,224,0.875,bicubic +ens_adv_inception_resnet_v2,94.160,5.840,98.600,1.400,55.84,299,0.897,bicubic +gluon_resnet152_v1c,94.160,5.840,98.640,1.360,60.21,224,0.875,bicubic +regnety_064,94.150,5.850,98.730,1.270,30.58,224,0.875,bicubic +efficientnet_b2_pruned,94.140,5.860,98.530,1.470,8.31,260,0.890,bicubic +dpn98,94.130,5.870,98.570,1.430,61.57,224,0.875,bicubic +nf_regnet_b1,94.120,5.880,98.630,1.370,10.22,288,0.900,bicubic +regnetx_160,94.120,5.880,98.750,1.250,54.28,224,0.875,bicubic +resnext50_32x4d,94.100,5.900,98.350,1.650,25.03,224,0.875,bicubic +ese_vovnet39b,94.090,5.910,98.660,1.340,24.57,224,0.875,bicubic +gluon_resnet152_v1b,94.080,5.920,98.450,1.550,60.19,224,0.875,bicubic +coat_lite_mini,94.060,5.940,98.560,1.440,11.01,224,0.900,bicubic +resmlp_24_224,94.020,5.980,98.330,1.670,30.02,224,0.875,bicubic +dpn131,94.010,5.990,98.720,1.280,79.25,224,0.875,bicubic +hrnet_w64,94.010,5.990,98.610,1.390,128.06,224,0.875,bilinear +resnetblur50,93.960,6.040,98.590,1.410,25.56,224,0.875,bicubic +dla102x2,93.950,6.050,98.490,1.510,41.28,224,0.875,bilinear +tf_efficientnetv2_b1,93.940,6.060,98.620,1.380,8.14,240,0.882,bicubic +hrnet_w48,93.920,6.080,98.610,1.390,77.47,224,0.875,bilinear +rexnet_130,93.900,6.100,98.400,1.600,7.56,224,0.875,bicubic +tf_efficientnet_cc_b1_8e,93.900,6.100,98.260,1.740,39.72,240,0.882,bicubic +regnetx_064,93.890,6.110,98.630,1.370,26.21,224,0.875,bicubic +regnetx_080,93.870,6.130,98.520,1.480,39.57,224,0.875,bicubic +repvgg_b2g4,93.860,6.140,98.590,1.410,61.76,224,0.875,bilinear +regnety_040,93.860,6.140,98.650,1.350,20.65,224,0.875,bicubic +efficientnet_em,93.840,6.160,98.810,1.190,6.90,240,0.882,bicubic +resnext101_32x8d,93.830,6.170,98.580,1.420,88.79,224,0.875,bilinear +gluon_resnext50_32x4d,93.810,6.190,98.410,1.590,25.03,224,0.875,bicubic +pit_xs_distilled_224,93.810,6.190,98.670,1.330,11.00,224,0.900,bicubic +resnet50,93.810,6.190,98.390,1.610,25.56,224,0.875,bicubic +gluon_resnet50_v1d,93.770,6.230,98.390,1.610,25.58,224,0.875,bicubic +xception65,93.760,6.240,98.370,1.630,39.92,299,0.903,bicubic +gluon_resnet101_v1b,93.750,6.250,98.380,1.620,44.55,224,0.875,bicubic +res2net101_26w_4s,93.750,6.250,98.310,1.690,45.21,224,0.875,bilinear +cspresnet50,93.740,6.260,98.640,1.360,21.62,256,0.887,bilinear +legacy_seresnext50_32x4d,93.730,6.270,98.580,1.420,27.56,224,0.875,bilinear +wide_resnet101_2,93.720,6.280,98.540,1.460,126.89,224,0.875,bilinear +tf_efficientnet_b1_ap,93.690,6.310,98.360,1.640,7.79,240,0.882,bicubic +dpn68b,93.690,6.310,98.510,1.490,12.61,224,0.875,bicubic +gluon_resnet101_v1c,93.670,6.330,98.420,1.580,44.57,224,0.875,bicubic +vit_tiny_patch16_384,93.650,6.350,98.600,1.400,5.79,384,1.000,bicubic +tf_efficientnet_b0_ns,93.630,6.370,98.640,1.360,5.29,224,0.875,bicubic +gluon_resnet50_v1s,93.620,6.380,98.460,1.540,25.68,224,0.875,bicubic +cait_xxs24_224,93.600,6.400,98.440,1.560,11.96,224,1.000,bicubic +coat_tiny,93.590,6.410,98.430,1.570,5.50,224,0.900,bicubic +regnetx_040,93.560,6.440,98.540,1.460,22.12,224,0.875,bicubic +hrnet_w44,93.550,6.450,98.700,1.300,67.06,224,0.875,bilinear +res2net50_26w_8s,93.540,6.460,98.260,1.740,48.40,224,0.875,bilinear +hrnet_w32,93.530,6.470,98.450,1.550,41.23,224,0.875,bilinear +dla102x,93.520,6.480,98.510,1.490,26.31,224,0.875,bilinear +repvgg_b2,93.500,6.500,98.730,1.270,89.02,224,0.875,bilinear +tf_efficientnet_b1,93.500,6.500,98.360,1.640,7.79,240,0.882,bicubic +hrnet_w40,93.490,6.510,98.580,1.420,57.56,224,0.875,bilinear +gluon_inception_v3,93.460,6.540,98.570,1.430,23.83,299,0.875,bicubic +xception,93.460,6.540,98.530,1.470,22.86,299,0.897,bicubic +mixnet_l,93.450,6.550,98.220,1.780,7.33,224,0.875,bicubic +xception41,93.430,6.570,98.430,1.570,26.97,299,0.903,bicubic +res2net50_26w_6s,93.410,6.590,98.280,1.720,37.05,224,0.875,bilinear +legacy_seresnet152,93.400,6.600,98.350,1.650,66.82,224,0.875,bilinear +dla169,93.340,6.660,98.600,1.400,53.39,224,0.875,bilinear +levit_128,93.340,6.660,98.380,1.620,9.21,224,0.900,bicubic +repvgg_b1,93.330,6.670,98.510,1.490,57.42,224,0.875,bilinear +resnest26d,93.330,6.670,98.630,1.370,17.07,224,0.875,bilinear +tf_inception_v3,93.320,6.680,98.030,1.970,23.83,299,0.875,bicubic +tf_mixnet_l,93.310,6.690,98.030,1.970,7.33,224,0.875,bicubic +selecsls60b,93.300,6.700,98.280,1.720,32.77,224,0.875,bicubic +tv_resnet152,93.300,6.700,98.390,1.610,60.19,224,0.875,bilinear +legacy_seresnet101,93.280,6.720,98.510,1.490,49.33,224,0.875,bilinear +efficientnet_b1,93.250,6.750,98.290,1.710,7.79,256,1.000,bicubic +coat_lite_tiny,93.240,6.760,98.260,1.740,5.72,224,0.900,bicubic +hrnet_w30,93.200,6.800,98.410,1.590,37.71,224,0.875,bilinear +dla60_res2net,93.180,6.820,98.420,1.580,20.85,224,0.875,bilinear +dla60_res2next,93.180,6.820,98.410,1.590,17.03,224,0.875,bilinear +efficientnet_es,93.140,6.860,98.420,1.580,5.44,224,0.875,bicubic +dla60x,93.120,6.880,98.510,1.490,17.35,224,0.875,bilinear +regnetx_032,93.120,6.880,98.390,1.610,15.30,224,0.875,bicubic +tf_efficientnetv2_b0,93.110,6.890,98.390,1.610,7.14,224,0.875,bicubic +pit_xs_224,93.110,6.890,98.310,1.690,10.62,224,0.900,bicubic +dla102,93.060,6.940,98.540,1.460,33.27,224,0.875,bilinear +gluon_resnet50_v1c,93.030,6.970,98.390,1.610,25.58,224,0.875,bicubic +regnety_016,93.030,6.970,98.360,1.640,11.20,224,0.875,bicubic +rexnet_100,93.030,6.970,98.190,1.810,4.80,224,0.875,bicubic +selecsls60,93.030,6.970,98.300,1.700,30.67,224,0.875,bicubic +repvgg_b1g4,92.980,7.020,98.430,1.570,39.97,224,0.875,bilinear +legacy_seresnet50,92.960,7.040,98.190,1.810,28.09,224,0.875,bilinear +hardcorenas_f,92.950,7.050,98.160,1.840,8.20,224,0.875,bilinear +tf_efficientnet_em,92.930,7.070,98.190,1.810,6.90,240,0.882,bicubic +adv_inception_v3,92.880,7.120,98.140,1.860,23.83,299,0.875,bicubic +res2next50,92.840,7.160,98.180,1.820,24.67,224,0.875,bilinear +tf_efficientnet_cc_b0_8e,92.830,7.170,98.180,1.820,24.01,224,0.875,bicubic +resmlp_12_distilled_224,92.830,7.170,98.140,1.860,15.35,224,0.875,bicubic +gmixer_24_224,92.830,7.170,97.880,2.120,24.72,224,0.875,bicubic +seresnext26t_32x4d,92.820,7.180,98.370,1.630,16.81,224,0.875,bicubic +tv_resnet101,92.810,7.190,98.250,1.750,44.55,224,0.875,bilinear +efficientnet_b1_pruned,92.770,7.230,98.040,1.960,6.33,240,0.882,bicubic +densenet201,92.750,7.250,98.230,1.770,20.01,224,0.875,bicubic +res2net50_14w_8s,92.740,7.260,98.180,1.820,25.06,224,0.875,bilinear +tv_resnext50_32x4d,92.740,7.260,98.270,1.730,25.03,224,0.875,bilinear +inception_v3,92.720,7.280,97.970,2.030,23.83,299,0.875,bicubic +seresnext26d_32x4d,92.700,7.300,98.150,1.850,16.81,224,0.875,bicubic +efficientnet_b0,92.690,7.310,98.070,1.930,5.29,224,0.875,bicubic +resnet34d,92.680,7.320,98.310,1.690,21.82,224,0.875,bicubic +tf_efficientnet_lite2,92.650,7.350,98.230,1.770,6.09,260,0.890,bicubic +legacy_seresnext26_32x4d,92.640,7.360,98.130,1.870,16.79,224,0.875,bicubic +tf_efficientnet_lite1,92.620,7.380,98.080,1.920,5.42,240,0.882,bicubic +tf_efficientnet_cc_b0_4e,92.590,7.410,98.080,1.920,13.31,224,0.875,bicubic +hardcorenas_e,92.570,7.430,98.110,1.890,8.07,224,0.875,bilinear +res2net50_48w_2s,92.550,7.450,98.080,1.920,25.29,224,0.875,bilinear +gluon_resnet50_v1b,92.540,7.460,98.170,1.830,25.56,224,0.875,bicubic +densenet161,92.500,7.500,98.290,1.710,28.68,224,0.875,bicubic +res2net50_26w_4s,92.500,7.500,98.060,1.940,25.70,224,0.875,bilinear +mixnet_m,92.430,7.570,97.870,2.130,5.01,224,0.875,bicubic +hardcorenas_d,92.400,7.600,98.070,1.930,7.50,224,0.875,bilinear +mobilenetv2_120d,92.400,7.600,98.050,1.950,5.83,224,0.875,bicubic +skresnet34,92.390,7.610,98.150,1.850,22.28,224,0.875,bicubic +tf_mixnet_m,92.330,7.670,97.890,2.110,5.01,224,0.875,bicubic +hrnet_w18,92.320,7.680,98.240,1.760,21.30,224,0.875,bilinear +ese_vovnet19b_dw,92.290,7.710,98.090,1.910,6.54,224,0.875,bicubic +selecsls42b,92.280,7.720,98.150,1.850,32.46,224,0.875,bicubic +mobilenetv3_large_100_miil,92.260,7.740,97.640,2.360,5.48,224,0.875,bilinear +tf_efficientnet_b0,92.250,7.750,98.000,2.000,5.29,224,0.875,bicubic +dla60,92.230,7.770,98.110,1.890,22.04,224,0.875,bilinear +resmlp_12_224,92.210,7.790,98.160,1.840,15.35,224,0.875,bicubic +tf_efficientnet_b0_ap,92.200,7.800,98.020,1.980,5.29,224,0.875,bicubic +regnetx_016,92.170,7.830,98.210,1.790,9.19,224,0.875,bicubic +gernet_s,92.140,7.860,98.190,1.810,8.17,224,0.875,bilinear +resnet26d,92.070,7.930,97.960,2.040,16.01,224,0.875,bicubic +vit_small_patch32_224,92.040,7.960,98.230,1.770,22.88,224,0.900,bicubic +vit_tiny_r_s16_p8_384,92.040,7.960,98.290,1.710,6.36,384,1.000,bicubic +hardcorenas_c,92.020,7.980,97.840,2.160,5.52,224,0.875,bilinear +dpn68,92.010,7.990,98.050,1.950,12.61,224,0.875,bicubic +tf_efficientnet_es,91.980,8.020,97.860,2.140,5.44,224,0.875,bicubic +levit_128s,91.970,8.030,98.060,1.940,7.78,224,0.900,bicubic +repvgg_a2,91.940,8.060,98.150,1.850,28.21,224,0.875,bilinear +densenet169,91.930,8.070,98.100,1.900,14.15,224,0.875,bicubic +densenetblur121d,91.910,8.090,98.070,1.930,8.00,224,0.875,bicubic +tv_resnet50,91.880,8.120,98.040,1.960,25.56,224,0.875,bilinear +mixer_b16_224,91.870,8.130,97.250,2.750,59.88,224,0.875,bicubic +mixnet_s,91.830,8.170,97.690,2.310,4.13,224,0.875,bicubic +mobilenetv2_140,91.830,8.170,97.860,2.140,6.11,224,0.875,bicubic +hardcorenas_b,91.770,8.230,97.780,2.220,5.18,224,0.875,bilinear +vit_tiny_patch16_224,91.760,8.240,98.040,1.960,5.72,224,0.900,bicubic +regnety_008,91.750,8.250,98.180,1.820,6.26,224,0.875,bicubic +resnest14d,91.720,8.280,97.870,2.130,10.61,224,0.875,bilinear +densenet121,91.570,8.430,98.030,1.970,7.98,224,0.875,bicubic +tf_mixnet_s,91.510,8.490,97.620,2.380,4.13,224,0.875,bicubic +repvgg_b0,91.430,8.570,97.990,2.010,15.82,224,0.875,bilinear +regnety_006,91.370,8.630,97.710,2.290,6.06,224,0.875,bicubic +hardcorenas_a,91.350,8.650,97.860,2.140,5.26,224,0.875,bilinear +mobilenetv3_large_100,91.320,8.680,97.710,2.290,5.48,224,0.875,bicubic +semnasnet_100,91.280,8.720,97.560,2.440,3.89,224,0.875,bicubic +tf_mobilenetv3_large_100,91.240,8.760,97.660,2.340,5.48,224,0.875,bilinear +mobilenetv3_rw,91.210,8.790,97.660,2.340,5.48,224,0.875,bicubic +hrnet_w18_small_v2,91.190,8.810,97.900,2.100,15.60,224,0.875,bilinear +efficientnet_es_pruned,91.180,8.820,97.750,2.250,5.44,224,0.875,bicubic +efficientnet_lite0,91.140,8.860,97.630,2.370,4.65,224,0.875,bicubic +resnet34,91.130,8.870,97.620,2.380,21.80,224,0.875,bilinear +resnet26,91.110,8.890,97.740,2.260,16.00,224,0.875,bicubic +regnetx_008,91.050,8.950,97.710,2.290,7.26,224,0.875,bicubic +tf_efficientnet_lite0,91.040,8.960,97.590,2.410,4.65,224,0.875,bicubic +gluon_resnet34_v1b,90.960,9.040,97.630,2.370,21.80,224,0.875,bicubic +mobilenetv2_110d,90.950,9.050,97.550,2.450,4.52,224,0.875,bicubic +pit_ti_distilled_224,90.900,9.100,97.700,2.300,5.10,224,0.900,bicubic +legacy_seresnet34,90.890,9.110,97.580,2.420,21.96,224,0.875,bilinear +tv_densenet121,90.890,9.110,97.710,2.290,7.98,224,0.875,bicubic +dla34,90.760,9.240,97.660,2.340,15.74,224,0.875,bilinear +deit_tiny_distilled_patch16_224,90.700,9.300,97.570,2.430,5.91,224,0.900,bicubic +fbnetc_100,90.700,9.300,97.210,2.790,5.57,224,0.875,bilinear +swsl_resnet18,90.690,9.310,97.700,2.300,11.69,224,0.875,bilinear +convit_tiny,90.630,9.370,97.740,2.260,5.71,224,0.875,bicubic +mnasnet_100,90.510,9.490,97.470,2.530,4.38,224,0.875,bicubic +regnety_004,90.500,9.500,97.540,2.460,4.34,224,0.875,bicubic +regnetx_006,90.350,9.650,97.430,2.570,6.20,224,0.875,bicubic +spnasnet_100,90.350,9.650,97.190,2.810,4.42,224,0.875,bilinear +ssl_resnet18,90.220,9.780,97.550,2.450,11.69,224,0.875,bilinear +vgg16_bn,90.090,9.910,97.370,2.630,138.37,224,0.875,bilinear +vgg19_bn,90.080,9.920,97.580,2.420,143.68,224,0.875,bilinear +ghostnet_100,90.020,9.980,97.370,2.630,5.18,224,0.875,bilinear +pit_ti_224,89.940,10.060,97.450,2.550,4.85,224,0.900,bicubic +tv_resnet34,89.940,10.060,97.340,2.660,21.80,224,0.875,bilinear +tf_mobilenetv3_large_075,89.680,10.320,97.210,2.790,3.99,224,0.875,bilinear +deit_tiny_patch16_224,89.670,10.330,97.450,2.550,5.72,224,0.900,bicubic +skresnet18,89.660,10.340,97.230,2.770,11.96,224,0.875,bicubic +mobilenetv2_100,89.600,10.400,97.140,2.860,3.50,224,0.875,bicubic +resnet18d,89.280,10.720,97.150,2.850,11.71,224,0.875,bicubic +vit_tiny_r_s16_p8_224,89.170,10.830,97.230,2.770,6.34,224,0.900,bicubic +hrnet_w18_small,89.050,10.950,97.110,2.890,13.19,224,0.875,bilinear +vgg19,89.040,10.960,96.870,3.130,143.67,224,0.875,bilinear +tf_mobilenetv3_large_minimal_100,88.970,11.030,96.860,3.140,3.92,224,0.875,bilinear +regnetx_004,88.900,11.100,97.120,2.880,5.16,224,0.875,bicubic +legacy_seresnet18,88.880,11.120,96.980,3.020,11.78,224,0.875,bicubic +vgg13_bn,88.760,11.240,96.970,3.030,133.05,224,0.875,bilinear +vgg16,88.550,11.450,96.790,3.210,138.36,224,0.875,bilinear +gluon_resnet18_v1b,88.400,11.600,96.680,3.320,11.69,224,0.875,bicubic +vgg11_bn,87.500,12.500,96.820,3.180,132.87,224,0.875,bilinear +resnet18,87.390,12.610,96.290,3.710,11.69,224,0.875,bilinear +regnety_002,87.380,12.620,96.590,3.410,3.16,224,0.875,bicubic +mixer_l16_224,87.150,12.850,93.520,6.480,208.20,224,0.875,bicubic +vgg13,87.050,12.950,96.320,3.680,133.05,224,0.875,bilinear +vgg11,86.550,13.450,96.280,3.720,132.86,224,0.875,bilinear +dla60x_c,86.290,13.710,96.160,3.840,1.32,224,0.875,bilinear +regnetx_002,86.190,13.810,95.980,4.020,2.68,224,0.875,bicubic +tf_mobilenetv3_small_100,85.190,14.810,95.770,4.230,2.54,224,0.875,bilinear +dla46x_c,84.250,15.750,95.270,4.730,1.07,224,0.875,bilinear +dla46_c,83.650,16.350,94.920,5.080,1.30,224,0.875,bilinear +tf_mobilenetv3_small_075,83.520,16.480,94.790,5.210,2.04,224,0.875,bilinear +tf_mobilenetv3_small_minimal_100,81.380,18.620,93.670,6.330,2.04,224,0.875,bilinear diff --git a/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-a.csv b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-a.csv new file mode 100644 index 0000000000..e8409327f3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-a.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation,top1_diff,top5_diff,rank_diff +tf_efficientnet_l2_ns,84.760,15.240,96.147,3.853,480.31,800,0.960,bicubic,-13.790,-3.673,0 +tf_efficientnet_l2_ns_475,83.373,16.627,95.453,4.547,480.31,475,0.936,bicubic,-15.127,-4.377,0 +vit_large_patch16_384,71.227,28.773,89.840,10.160,304.72,384,1.000,bicubic,-26.993,-9.960,0 +swin_large_patch4_window12_384,69.627,30.373,89.560,10.440,196.74,384,1.000,bicubic,-28.413,-10.130,0 +tf_efficientnet_b7_ns,67.040,32.960,88.667,11.333,66.35,600,0.949,bicubic,-30.870,-11.053,0 +tf_efficientnetv2_l_in21ft1k,66.333,33.667,87.853,12.147,118.52,480,1.000,bicubic,-31.367,-11.817,+3 +swin_base_patch4_window12_384,64.480,35.520,87.493,12.507,87.90,384,1.000,bicubic,-33.410,-12.217,-1 +vit_base_patch16_384,63.693,36.307,86.707,13.293,86.86,384,1.000,bicubic,-34.147,-12.963,0 +cait_m48_448,62.373,37.627,86.453,13.547,356.46,448,1.000,bicubic,-35.107,-13.097,+10 +tf_efficientnet_b6_ns,62.267,37.733,85.173,14.827,43.04,528,0.942,bicubic,-35.363,-14.407,+2 +vit_large_r50_s32_384,61.507,38.493,83.960,16.040,329.09,384,1.000,bicubic,-36.353,-15.710,-4 +tf_efficientnetv2_m_in21ft1k,61.387,38.613,85.413,14.587,54.14,480,1.000,bicubic,-36.093,-14.117,+8 +ig_resnext101_32x48d,61.013,38.987,83.347,16.653,828.41,224,0.875,bilinear,-36.607,-16.353,0 +swin_large_patch4_window7_224,60.893,39.107,85.840,14.160,196.53,224,0.900,bicubic,-36.757,-13.740,-4 +resnetv2_152x4_bitm,60.787,39.213,83.573,16.427,936.53,480,1.000,bilinear,-36.703,-16.037,+3 +tf_efficientnet_b5_ns,60.320,39.680,84.493,15.507,30.39,456,0.934,bicubic,-37.180,-15.137,+1 +dm_nfnet_f6,59.160,40.840,82.333,17.667,438.36,576,0.956,bicubic,-38.440,-17.217,-3 +dm_nfnet_f5,58.573,41.427,82.773,17.227,377.21,544,0.954,bicubic,-38.967,-16.797,-2 +dm_nfnet_f4,58.120,41.880,81.987,18.013,316.07,512,0.951,bicubic,-39.460,-17.523,-4 +ig_resnext101_32x32d,58.093,41.907,80.653,19.347,468.53,224,0.875,bilinear,-39.267,-19.027,+2 +cait_m36_384,57.840,42.160,84.813,15.187,271.22,384,1.000,bicubic,-39.560,-14.697,0 +dm_nfnet_f3,55.827,44.173,80.947,19.053,254.92,416,0.940,bicubic,-41.523,-18.613,+1 +vit_large_patch16_224,55.627,44.373,80.093,19.907,304.33,224,0.900,bicubic,-42.013,-19.497,-12 +vit_base_r50_s16_384,54.627,45.373,81.213,18.787,98.95,384,1.000,bicubic,-42.553,-18.347,+6 +cait_s36_384,54.413,45.587,81.360,18.640,68.37,384,1.000,bicubic,-42.917,-18.170,-1 +resnetv2_101x3_bitm,54.027,45.973,81.027,18.973,387.93,448,1.000,bilinear,-42.963,-18.463,+14 +resnetv2_152x2_bitm,54.013,45.987,82.000,18.000,236.34,448,1.000,bilinear,-42.997,-17.590,+11 +tf_efficientnetv2_l,53.187,46.813,79.133,20.867,118.52,480,1.000,bicubic,-44.093,-20.417,-3 +ig_resnext101_32x16d,53.067,46.933,76.907,23.093,194.03,224,0.875,bilinear,-43.753,-22.683,+19 +swin_base_patch4_window7_224,51.453,48.547,79.973,20.027,87.77,224,0.900,bicubic,-45.797,-19.557,-4 +tf_efficientnet_b4_ns,51.213,48.787,79.187,20.813,19.34,380,0.922,bicubic,-45.737,-20.393,+12 +resnetv2_152x2_bit_teacher_384,51.187,48.813,78.493,21.507,236.34,384,1.000,bicubic,-45.643,-20.957,+15 +swsl_resnext101_32x8d,51.187,48.813,78.240,21.760,88.79,224,0.875,bilinear,-46.013,-21.300,-5 +cait_s24_384,49.733,50.267,78.733,21.267,47.06,384,1.000,bicubic,-47.337,-20.697,+2 +deit_base_distilled_patch16_384,49.333,50.667,79.253,20.747,87.63,384,1.000,bicubic,-47.627,-20.227,+7 +tf_efficientnet_b8,48.947,51.053,77.240,22.760,87.41,672,0.954,bicubic,-48.253,-22.330,-9 +dm_nfnet_f2,48.920,51.080,77.160,22.840,193.78,352,0.920,bicubic,-48.100,-22.280,0 +tf_efficientnetv2_s_in21ft1k,48.507,51.493,77.880,22.120,21.46,384,1.000,bicubic,-48.223,-21.540,+14 +resnest269e,48.187,51.813,74.333,25.667,110.93,416,0.928,bicubic,-48.333,-25.017,+30 +resnetv2_50x3_bitm,47.293,52.707,77.333,22.667,217.32,448,1.000,bilinear,-49.417,-22.217,+14 +tf_efficientnet_b8_ap,46.893,53.107,76.507,23.493,87.41,672,0.954,bicubic,-50.217,-23.153,-9 +efficientnetv2_rw_m,46.280,53.720,75.707,24.293,53.24,416,1.000,bicubic,-50.700,-23.833,-1 +swsl_resnext101_32x16d,46.200,53.800,72.200,27.800,194.03,224,0.875,bilinear,-50.400,-27.320,+21 +vit_small_patch16_384,45.933,54.067,76.720,23.280,22.20,384,1.000,bicubic,-50.767,-22.760,+12 +ecaresnet269d,45.893,54.107,75.133,24.867,102.09,352,1.000,bicubic,-51.187,-24.337,-11 +vit_small_r26_s32_384,45.720,54.280,76.067,23.933,36.47,384,1.000,bicubic,-50.960,-23.503,+14 +tf_efficientnetv2_m,45.533,54.467,74.533,25.467,54.14,480,1.000,bicubic,-51.607,-24.877,-16 +tf_efficientnet_b7_ap,45.373,54.627,74.213,25.787,66.35,600,0.949,bicubic,-51.827,-25.287,-19 +dm_nfnet_f1,45.333,54.667,74.107,25.893,132.63,320,0.910,bicubic,-51.587,-25.303,-5 +ig_resnext101_32x8d,45.320,54.680,70.867,29.133,88.79,224,0.875,bilinear,-51.000,-28.563,+30 +eca_nfnet_l2,44.960,55.040,75.893,24.107,56.72,384,1.000,bicubic,-52.130,-23.617,-18 +resnest200e,44.147,55.853,73.467,26.533,70.20,320,0.909,bicubic,-52.463,-25.883,+11 +cait_xs24_384,43.947,56.053,75.187,24.813,26.67,384,1.000,bicubic,-52.603,-24.233,+13 +tresnet_xl_448,43.480,56.520,72.453,27.547,78.44,448,0.875,bilinear,-52.490,-26.677,+41 +vit_base_patch16_224,43.240,56.760,72.920,27.080,86.57,224,0.900,bicubic,-53.640,-26.610,-9 +resnetrs420,43.147,56.853,70.453,29.547,191.89,416,1.000,bicubic,-53.763,-29.007,-11 +tf_efficientnet_b7,42.960,57.040,73.133,26.867,66.35,600,0.949,bicubic,-54.050,-26.387,-18 +vit_large_r50_s32_224,41.640,58.360,70.227,29.773,328.99,224,0.900,bicubic,-55.150,-29.123,-9 +swsl_resnext101_32x4d,41.560,58.440,71.760,28.240,44.18,224,0.875,bilinear,-54.860,-27.710,+14 +tf_efficientnet_b6_ap,40.800,59.200,71.627,28.373,43.04,528,0.942,bicubic,-56.280,-27.993,-25 +resmlp_big_24_224_in22ft1k,40.373,59.627,74.760,25.240,129.14,224,0.875,bicubic,-56.247,-24.750,+1 +tresnet_l_448,40.200,59.800,69.893,30.107,55.99,448,0.875,bilinear,-55.660,-29.227,+40 +deit_base_patch16_384,40.173,59.827,70.760,29.240,86.86,384,1.000,bicubic,-55.977,-28.380,+22 +resnetrs350,39.960,60.040,68.907,31.093,163.96,384,1.000,bicubic,-56.800,-30.463,-13 +vit_large_patch32_384,38.933,61.067,68.920,31.080,306.63,384,1.000,bicubic,-56.897,-30.230,+39 +resnetv2_101x1_bitm,38.920,61.080,71.040,28.960,44.54,448,1.000,bilinear,-57.180,-28.240,+26 +resnet200d,38.147,61.853,68.613,31.387,64.69,320,1.000,bicubic,-58.573,-30.717,-14 +seresnet152d,37.640,62.360,69.480,30.520,66.84,320,1.000,bicubic,-59.130,-29.970,-18 +eca_nfnet_l1,37.533,62.467,70.947,29.053,41.41,320,1.000,bicubic,-59.167,-28.343,-14 +twins_svt_large,37.200,62.800,69.227,30.773,99.27,224,0.900,bicubic,-59.070,-29.943,+12 +vit_base_patch32_384,37.080,62.920,69.760,30.240,88.30,384,1.000,bicubic,-59.410,-29.650,-1 +efficientnetv2_rw_s,36.787,63.213,68.320,31.680,23.94,384,1.000,bicubic,-59.753,-31.040,-5 +regnety_160,36.747,63.253,69.107,30.893,83.59,288,1.000,bicubic,-59.603,-30.223,+4 +cait_xxs36_384,36.227,63.773,67.800,32.200,17.37,384,1.000,bicubic,-59.623,-31.290,+29 +pit_b_distilled_224,35.627,64.373,69.120,30.880,74.79,224,0.900,bicubic,-61.053,-30.230,-17 +tf_efficientnet_b3_ns,35.520,64.480,67.773,32.227,12.23,300,0.904,bicubic,-60.870,-31.577,-2 +tf_efficientnet_b6,35.213,64.787,67.720,32.280,43.04,528,0.942,bicubic,-61.457,-31.650,-16 +resnetrs270,35.013,64.987,65.480,34.520,129.86,352,1.000,bicubic,-61.677,-33.870,-21 +tf_efficientnet_b5_ap,34.787,65.213,67.493,32.507,30.39,456,0.934,bicubic,-61.893,-31.967,-20 +vit_base_patch16_224_miil,34.507,65.493,65.000,35.000,86.54,224,0.875,bilinear,-61.953,-34.300,-9 +resnet152d,34.320,65.680,65.907,34.093,60.21,320,1.000,bicubic,-62.040,-33.483,-5 +tresnet_m_448,34.107,65.893,64.493,35.507,31.39,448,0.875,bilinear,-60.883,-34.487,+69 +resmlp_big_24_distilled_224,34.067,65.933,69.600,30.400,129.14,224,0.875,bicubic,-62.383,-29.710,-11 +twins_pcpvt_large,33.387,66.613,67.933,32.067,60.99,224,0.900,bicubic,-62.763,-31.247,+4 +pit_b_224,33.173,66.827,62.320,37.680,73.76,224,0.900,bicubic,-62.467,-36.340,+27 +twins_svt_base,33.173,66.827,65.773,34.227,56.07,224,0.900,bicubic,-62.987,-33.287,-2 +resnetv2_152x2_bit_teacher,33.053,66.947,64.267,35.733,236.34,224,0.875,bicubic,-63.047,-35.013,+4 +swsl_resnext50_32x4d,33.013,66.987,65.067,34.933,25.03,224,0.875,bilinear,-62.857,-34.183,+11 +ssl_resnext101_32x16d,32.600,67.400,64.000,36.000,194.03,224,0.875,bilinear,-63.200,-35.180,+16 +swin_small_patch4_window7_224,32.600,67.400,65.440,34.560,49.61,224,0.900,bicubic,-63.310,-33.580,+7 +tf_efficientnet_b5,31.840,68.160,65.293,34.707,30.39,456,0.934,bicubic,-64.510,-34.017,-13 +resnest101e,31.413,68.587,64.360,35.640,48.28,256,0.875,bilinear,-64.447,-34.850,+8 +cait_s24_224,31.200,68.800,64.560,35.440,46.92,224,1.000,bicubic,-65.180,-34.590,-18 +efficientnet_b4,30.867,69.133,64.600,35.400,19.34,384,1.000,bicubic,-65.283,-34.600,-7 +resnetrs200,30.773,69.227,63.320,36.680,93.21,320,1.000,bicubic,-65.757,-36.030,-27 +dm_nfnet_f0,30.547,69.453,62.867,37.133,71.49,256,0.900,bicubic,-65.603,-36.383,-10 +cait_xxs24_384,30.027,69.973,63.933,36.067,12.03,384,1.000,bicubic,-65.233,-35.027,+33 +twins_pcpvt_base,29.960,70.040,64.587,35.413,43.83,224,0.900,bicubic,-65.830,-34.543,+8 +swsl_resnet50,29.867,70.133,63.853,36.147,25.56,224,0.875,bilinear,-65.543,-35.437,+26 +deit_base_distilled_patch16_224,29.600,70.400,64.453,35.547,87.34,224,0.900,bicubic,-66.490,-34.737,-7 +convit_base,29.520,70.480,61.787,38.213,86.54,224,0.875,bicubic,-66.030,-37.083,+13 +ssl_resnext101_32x8d,29.040,70.960,60.973,39.027,88.79,224,0.875,bilinear,-66.430,-38.137,+18 +tf_efficientnetv2_s,29.040,70.960,61.213,38.787,21.46,384,1.000,bicubic,-67.300,-37.987,-24 +resnet101d,28.987,71.013,62.053,37.947,44.57,320,1.000,bicubic,-67.303,-37.177,-23 +resnetrs152,28.920,71.080,60.520,39.480,86.62,320,1.000,bicubic,-67.660,-38.720,-40 +coat_lite_small,27.547,72.453,58.547,41.453,19.84,224,0.900,bicubic,-67.993,-40.313,+9 +deit_base_patch16_224,27.440,72.560,58.893,41.107,86.57,224,0.900,bicubic,-68.000,-39.947,+14 +resnetv2_50x1_bitm,27.293,72.707,62.853,37.147,25.55,448,1.000,bilinear,-67.717,-36.207,+40 +vit_small_patch16_224,27.053,72.947,59.213,40.787,22.05,224,0.900,bicubic,-68.317,-39.937,+17 +tf_efficientnet_b4,26.293,73.707,60.107,39.893,19.34,380,0.922,bicubic,-69.607,-39.063,-12 +tf_efficientnet_b4_ap,26.240,73.760,60.227,39.773,19.34,380,0.922,bicubic,-69.920,-39.053,-28 +nfnet_l0,26.213,73.787,61.720,38.280,35.07,288,1.000,bicubic,-69.907,-37.520,-22 +regnety_032,26.213,73.787,60.987,39.013,19.44,288,1.000,bicubic,-69.757,-38.203,-19 +ecaresnet50t,26.133,73.867,60.027,39.973,25.57,320,0.950,bicubic,-69.377,-39.093,+4 +ecaresnet101d,26.027,73.973,58.987,41.013,44.57,224,0.875,bicubic,-69.503,-40.143,+1 +visformer_small,25.840,74.160,58.907,41.093,40.22,224,0.900,bicubic,-69.650,-39.993,+3 +coat_mini,25.520,74.480,57.693,42.307,10.34,224,0.900,bicubic,-69.450,-41.087,+35 +resnetv2_50x1_bit_distilled,25.107,74.893,59.613,40.387,25.55,224,0.875,bicubic,-71.023,-39.667,-29 +convit_small,25.093,74.907,57.280,42.720,27.78,224,0.875,bicubic,-70.107,-41.620,+14 +eca_nfnet_l0,24.827,75.173,60.093,39.907,24.14,288,1.000,bicubic,-71.123,-39.117,-24 +tnt_s_patch16_224,24.733,75.267,58.187,41.813,23.76,224,0.900,bicubic,-70.307,-40.693,+24 +ssl_resnext101_32x4d,24.173,75.827,57.413,42.587,44.18,224,0.875,bilinear,-71.267,-41.717,0 +twins_svt_small,24.133,75.867,57.147,42.853,24.06,224,0.900,bicubic,-71.067,-41.733,+11 +vit_small_r26_s32_224,24.080,75.920,56.213,43.787,36.43,224,0.900,bicubic,-71.550,-42.977,-11 +tf_efficientnet_b2_ns,24.013,75.987,57.293,42.707,9.11,260,0.890,bicubic,-71.757,-41.827,-18 +vit_small_patch32_384,23.773,76.227,57.307,42.693,22.92,384,1.000,bicubic,-71.277,-41.683,+17 +nasnetalarge,23.493,76.507,55.027,44.973,88.75,331,0.911,bicubic,-72.187,-43.903,-16 +levit_384,23.440,76.560,56.387,43.613,39.13,224,0.900,bicubic,-72.090,-42.663,-11 +pnasnet5large,23.333,76.667,53.640,46.360,86.06,331,0.911,bicubic,-72.377,-45.280,-19 +efficientnet_b3,23.213,76.787,55.960,44.040,12.23,320,1.000,bicubic,-72.497,-43.080,-21 +resmlp_big_24_224,22.853,77.147,54.307,45.693,129.14,224,0.875,bicubic,-71.807,-44.173,+38 +twins_pcpvt_small,22.720,77.280,56.853,43.147,24.11,224,0.900,bicubic,-72.490,-42.027,0 +vit_base_patch32_224,22.400,77.600,53.933,46.067,88.22,224,0.900,bicubic,-72.600,-45.097,+17 +pit_s_distilled_224,22.360,77.640,57.120,42.880,24.04,224,0.900,bicubic,-72.880,-41.930,-3 +tresnet_m,21.680,78.320,53.840,46.160,31.39,224,0.875,bilinear,-74.040,-45.190,-27 +swin_tiny_patch4_window7_224,21.173,78.827,55.973,44.027,28.29,224,0.900,bicubic,-73.967,-42.877,+1 +pit_s_224,21.080,78.920,53.573,46.427,23.46,224,0.900,bicubic,-73.510,-45.137,+37 +resnet51q,20.960,79.040,55.720,44.280,35.70,288,1.000,bilinear,-74.900,-43.400,-37 +resnetrs101,20.893,79.107,52.813,47.187,63.62,288,0.940,bicubic,-74.537,-46.217,-15 +deit_small_distilled_patch16_224,20.707,79.293,55.133,44.867,22.44,224,0.900,bicubic,-74.003,-43.897,+26 +resnest50d_4s2x40d,20.387,79.613,52.800,47.200,30.42,224,0.875,bicubic,-74.573,-46.270,+12 +ssl_resnext50_32x4d,20.000,80.000,53.613,46.387,25.03,224,0.875,bilinear,-74.870,-45.267,+18 +tresnet_xl,19.640,80.360,53.133,46.867,78.44,224,0.875,bilinear,-75.800,-45.917,-20 +gluon_senet154,19.307,80.693,47.533,52.467,115.09,224,0.875,bicubic,-75.613,-51.227,+12 +rexnet_200,19.227,80.773,52.720,47.280,16.37,224,0.875,bicubic,-75.713,-46.290,+9 +levit_256,19.200,80.800,50.067,49.933,18.89,224,0.900,bicubic,-75.810,-48.823,+1 +repvgg_b3,19.107,80.893,50.253,49.747,123.09,224,0.875,bilinear,-75.463,-48.527,+28 +legacy_senet154,19.053,80.947,47.947,52.053,115.09,224,0.875,bilinear,-76.017,-50.883,-6 +mixer_b16_224_miil,19.053,80.947,51.227,48.773,59.88,224,0.875,bilinear,-76.247,-47.653,-21 +deit_small_patch16_224,18.907,81.093,51.413,48.587,22.05,224,0.900,bicubic,-75.493,-47.277,+41 +gluon_seresnext101_64x4d,18.907,81.093,49.187,50.813,88.23,224,0.875,bicubic,-76.023,-49.643,+4 +tf_efficientnet_b1_ns,18.693,81.307,51.667,48.333,7.79,240,0.882,bicubic,-76.477,-47.443,-17 +seresnext50_32x4d,18.360,81.640,50.973,49.027,27.56,224,0.875,bicubic,-76.680,-47.957,-9 +cait_xxs36_224,18.253,81.747,49.427,50.573,17.30,224,1.000,bicubic,-76.007,-49.293,+47 +ecaresnet50d,18.227,81.773,51.880,48.120,25.58,224,0.875,bicubic,-76.403,-47.010,+15 +tf_efficientnet_lite4,18.133,81.867,50.707,49.293,13.01,380,0.920,bilinear,-76.757,-48.143,+2 +vit_tiny_patch16_384,18.027,81.973,50.307,49.693,5.79,384,1.000,bicubic,-75.623,-48.293,+97 +resnest50d_1s4x24d,17.693,82.307,49.800,50.200,25.68,224,0.875,bicubic,-77.057,-49.180,+5 +resnest50d,17.373,82.627,50.707,49.293,27.48,224,0.875,bilinear,-77.457,-48.173,+1 +gluon_seresnext101_32x4d,17.373,82.627,46.373,53.627,48.96,224,0.875,bicubic,-77.547,-52.437,-2 +efficientnet_el,17.347,82.653,49.987,50.013,10.59,300,0.904,bicubic,-77.773,-49.003,-23 +inception_v4,17.267,82.733,45.920,54.080,42.68,299,0.875,bicubic,-77.113,-52.660,+31 +tf_efficientnet_b3_ap,17.187,82.813,49.680,50.320,12.23,300,0.904,bicubic,-78.133,-49.220,-36 +tf_efficientnet_b3,17.000,83.000,49.267,50.733,12.23,300,0.904,bicubic,-78.010,-49.643,-15 +xception71,17.000,83.000,45.520,54.480,42.34,299,0.903,bicubic,-77.280,-53.120,+35 +resmlp_36_distilled_224,16.880,83.120,51.467,48.533,44.69,224,0.875,bicubic,-78.010,-47.553,-7 +gluon_resnext101_64x4d,16.853,83.147,44.213,55.787,83.46,224,0.875,bicubic,-77.817,-54.437,0 +tf_efficientnetv2_b3,16.667,83.333,48.680,51.320,14.36,300,0.904,bicubic,-78.493,-50.140,-32 +tresnet_l,16.600,83.400,49.920,50.080,55.99,224,0.875,bilinear,-78.690,-49.090,-40 +gluon_resnet152_v1d,16.573,83.427,44.280,55.720,60.21,224,0.875,bicubic,-78.167,-54.460,-6 +gluon_resnet152_v1s,16.573,83.427,44.533,55.467,60.32,224,0.875,bicubic,-78.467,-54.297,-25 +inception_resnet_v2,16.573,83.427,44.960,55.040,55.84,299,0.897,bicubic,-77.967,-53.830,+6 +resmlp_24_distilled_224,16.467,83.533,50.387,49.613,30.02,224,0.875,bicubic,-77.993,-48.353,+13 +gluon_xception65,16.440,83.560,46.027,53.973,39.92,299,0.903,bicubic,-77.820,-52.543,+28 +gernet_l,16.373,83.627,47.213,52.787,31.08,256,0.875,bilinear,-78.717,-51.687,-36 +wide_resnet50_2,16.280,83.720,48.347,51.653,68.88,224,0.875,bicubic,-78.800,-50.623,-35 +ens_adv_inception_resnet_v2,16.240,83.760,43.640,56.360,55.84,299,0.897,bicubic,-77.920,-54.960,+38 +repvgg_b3g4,16.213,83.787,47.653,52.347,83.83,224,0.875,bilinear,-78.307,-51.317,+3 +xception65,16.027,83.973,43.773,56.227,39.92,299,0.903,bicubic,-77.733,-54.597,+66 +ssl_resnet50,15.960,84.040,49.467,50.533,25.56,224,0.875,bilinear,-78.490,-49.453,+8 +regnety_320,15.627,84.373,44.827,55.173,145.05,224,0.875,bicubic,-78.913,-54.023,-2 +ecaresnet101d_pruned,15.600,84.400,48.027,51.973,24.88,224,0.875,bicubic,-79.480,-50.953,-42 +ecaresnet26t,15.467,84.533,47.920,52.080,16.01,320,0.950,bicubic,-78.843,-50.720,+16 +coat_tiny,15.413,84.587,45.600,54.400,5.50,224,0.900,bicubic,-78.177,-52.830,+74 +skresnext50_32x4d,15.373,84.627,44.493,55.507,27.48,224,0.875,bicubic,-78.887,-53.967,+19 +ecaresnetlight,15.160,84.840,45.827,54.173,30.16,224,0.875,bicubic,-79.610,-52.973,-25 +cait_xxs24_224,15.160,84.840,44.960,55.040,11.96,224,1.000,bicubic,-78.440,-53.480,+71 +levit_192,14.893,85.107,44.920,55.080,10.95,224,0.900,bicubic,-79.277,-53.620,+25 +rexnet_150,14.720,85.280,46.907,53.093,9.73,224,0.875,bicubic,-79.760,-51.903,-5 +coat_lite_mini,14.507,85.493,44.507,55.493,11.01,224,0.900,bicubic,-79.553,-54.053,+35 +efficientnet_el_pruned,14.480,85.520,46.120,53.880,10.59,300,0.904,bicubic,-79.920,-52.620,+1 +efficientnet_b2,14.440,85.560,46.080,53.920,9.11,288,1.000,bicubic,-80.170,-52.630,-19 +legacy_seresnext101_32x4d,14.147,85.853,42.973,57.027,48.96,224,0.875,bilinear,-80.223,-55.677,+1 +seresnet50,14.147,85.853,45.467,54.533,28.09,224,0.875,bicubic,-80.403,-53.283,-17 +gernet_m,14.013,85.987,46.067,53.933,21.14,224,0.875,bilinear,-80.607,-52.793,-23 +gluon_resnext101_32x4d,13.867,86.133,41.653,58.347,44.18,224,0.875,bicubic,-80.663,-56.977,-16 +gluon_seresnext50_32x4d,13.600,86.400,43.760,56.240,27.56,224,0.875,bicubic,-80.740,-54.850,-1 +resmlp_36_224,13.507,86.493,46.693,53.307,44.69,224,0.875,bicubic,-80.683,-51.647,+12 +repvgg_b2g4,13.440,86.560,43.787,56.213,61.76,224,0.875,bilinear,-80.420,-54.863,+38 +ese_vovnet39b,13.320,86.680,43.813,56.187,24.57,224,0.875,bicubic,-80.770,-54.847,+23 +regnetx_320,13.307,86.693,40.720,59.280,107.81,224,0.875,bicubic,-81.153,-58.050,-14 +pit_xs_distilled_224,13.240,86.760,44.573,55.427,11.00,224,0.900,bicubic,-80.570,-54.097,+40 +efficientnet_b3_pruned,13.173,86.827,45.213,54.787,9.86,300,0.904,bicubic,-81.457,-53.547,-32 +gluon_resnet101_v1d,13.160,86.840,41.493,58.507,44.57,224,0.875,bicubic,-81.060,-57.057,+4 +mixnet_xl,13.120,86.880,43.253,56.747,11.90,224,0.875,bicubic,-81.070,-55.407,+6 +nf_regnet_b1,12.947,87.053,44.400,55.600,10.22,288,0.900,bicubic,-81.173,-54.230,+14 +pit_xs_224,12.813,87.187,42.840,57.160,10.62,224,0.900,bicubic,-80.297,-55.550,+84 +gluon_inception_v3,12.640,87.360,40.493,59.507,23.83,299,0.875,bicubic,-80.820,-58.077,+59 +coat_lite_tiny,12.520,87.480,41.160,58.840,5.72,224,0.900,bicubic,-80.720,-57.100,+74 +resmlp_24_224,12.493,87.507,43.427,56.573,30.02,224,0.875,bicubic,-81.527,-54.903,+16 +regnety_120,12.427,87.573,42.200,57.800,51.82,224,0.875,bicubic,-82.053,-56.480,-28 +efficientnet_em,12.360,87.640,43.880,56.120,6.90,240,0.882,bicubic,-81.480,-54.930,+27 +hrnet_w64,12.027,87.973,40.787,59.213,128.06,224,0.875,bilinear,-81.983,-57.823,+15 +cspdarknet53,12.013,87.987,43.253,56.747,27.64,256,0.887,bilinear,-82.647,-55.547,-46 +gluon_resnet101_v1s,11.880,88.120,40.973,59.027,44.67,224,0.875,bicubic,-82.840,-57.847,-50 +gmixer_24_224,11.853,88.147,37.773,62.227,24.72,224,0.875,bicubic,-80.977,-60.407,+89 +nf_resnet50,11.760,88.240,45.933,54.067,25.56,288,0.940,bicubic,-82.800,-52.857,-41 +resnet50d,11.693,88.307,42.453,57.547,25.58,224,0.875,bicubic,-82.567,-56.267,-15 +dpn92,11.627,88.373,40.267,59.733,37.67,224,0.875,bicubic,-82.603,-58.463,-13 +xception41,11.600,88.400,39.133,60.867,26.97,299,0.903,bicubic,-81.830,-59.297,+50 +dla102x2,11.573,88.427,41.293,58.707,41.28,224,0.875,bilinear,-82.377,-57.197,+9 +vit_small_patch32_224,11.480,88.520,39.573,60.427,22.88,224,0.900,bicubic,-80.560,-58.657,+119 +levit_128,11.427,88.573,40.267,59.733,9.21,224,0.900,bicubic,-81.913,-58.113,+51 +regnety_080,11.413,88.587,40.613,59.387,39.18,224,0.875,bicubic,-82.757,-58.067,-10 +efficientnet_b2_pruned,11.360,88.640,42.027,57.973,8.31,260,0.890,bicubic,-82.780,-56.503,-7 +tf_efficientnet_el,11.333,88.667,42.040,57.960,10.59,300,0.904,bicubic,-83.077,-56.670,-36 +gluon_resnet152_v1c,11.093,88.907,37.120,62.880,60.21,224,0.875,bicubic,-83.067,-61.520,-11 +vit_tiny_r_s16_p8_384,11.093,88.907,39.987,60.013,6.36,384,1.000,bicubic,-80.947,-58.303,+114 +dpn107,11.080,88.920,38.693,61.307,86.92,224,0.875,bicubic,-83.230,-60.027,-31 +hrnet_w48,11.080,88.920,40.320,59.680,77.47,224,0.875,bilinear,-82.840,-58.290,+2 +ecaresnet50d_pruned,11.027,88.973,41.947,58.053,19.94,224,0.875,bicubic,-83.193,-56.783,-24 +tf_efficientnetv2_b2,11.027,88.973,39.760,60.240,10.10,260,0.890,bicubic,-83.393,-58.810,-43 +adv_inception_v3,11.013,88.987,36.720,63.280,23.83,299,0.875,bicubic,-81.867,-61.420,+68 +tf_efficientnet_b0_ns,10.933,89.067,40.067,59.933,5.29,224,0.875,bicubic,-82.697,-58.573,+21 +tf_inception_v3,10.840,89.160,36.853,63.147,23.83,299,0.875,bicubic,-82.480,-61.177,+42 +resnext50_32x4d,10.800,89.200,40.307,59.693,25.03,224,0.875,bicubic,-83.300,-58.043,-14 +dpn131,10.787,89.213,37.200,62.800,79.25,224,0.875,bicubic,-83.223,-61.520,-10 +tf_efficientnet_b2_ap,10.533,89.467,40.107,59.893,9.11,260,0.890,bicubic,-83.957,-58.513,-56 +resnext50d_32x4d,10.413,89.587,39.733,60.267,25.05,224,0.875,bicubic,-83.767,-58.837,-27 +rexnet_130,10.400,89.600,41.547,58.453,7.56,224,0.875,bicubic,-83.500,-56.853,-7 +hrnet_w44,10.320,89.680,39.507,60.493,67.06,224,0.875,bilinear,-83.230,-59.193,+19 +resnext101_32x8d,10.187,89.813,37.827,62.173,88.79,224,0.875,bilinear,-83.643,-60.753,-2 +regnetx_160,10.147,89.853,38.000,62.000,54.28,224,0.875,bicubic,-83.973,-60.750,-22 +dpn98,10.133,89.867,36.587,63.413,61.57,224,0.875,bicubic,-83.997,-61.983,-25 +cspresnext50,10.120,89.880,40.373,59.627,20.57,224,0.875,bilinear,-84.360,-58.417,-60 +legacy_seresnext50_32x4d,10.107,89.893,39.200,60.800,27.56,224,0.875,bilinear,-83.623,-59.380,+3 +resnetrs50,10.093,89.907,37.507,62.493,35.69,224,0.910,bicubic,-84.217,-60.973,-50 +inception_v3,10.027,89.973,35.227,64.773,23.83,299,0.875,bicubic,-82.693,-62.743,+64 +efficientnet_b1,10.013,89.987,37.547,62.453,7.79,256,1.000,bicubic,-83.237,-60.743,+33 +xception,9.987,90.013,38.027,61.973,22.86,299,0.897,bicubic,-83.473,-60.503,+18 +regnety_064,9.947,90.053,39.067,60.933,30.58,224,0.875,bicubic,-84.203,-59.663,-34 +dpn68b,9.787,90.213,38.053,61.947,12.61,224,0.875,bicubic,-83.903,-60.307,0 +gluon_resnet152_v1b,9.747,90.253,36.067,63.933,60.19,224,0.875,bicubic,-84.333,-62.383,-29 +tf_efficientnet_lite3,9.667,90.333,39.000,61.000,8.20,300,0.904,bilinear,-84.533,-59.640,-45 +tf_efficientnet_b2,9.653,90.347,38.880,61.120,9.11,260,0.890,bicubic,-84.707,-59.730,-60 +tf_efficientnet_cc_b1_8e,9.573,90.427,36.773,63.227,39.72,240,0.882,bicubic,-84.327,-61.487,-22 +res2net101_26w_4s,9.520,90.480,35.027,64.973,45.21,224,0.875,bilinear,-84.230,-63.283,-10 +legacy_seresnet152,9.347,90.653,37.413,62.587,66.82,224,0.875,bilinear,-84.053,-60.937,+14 +cspresnet50,9.253,90.747,39.640,60.360,21.62,256,0.887,bilinear,-84.487,-59.000,-11 +hrnet_w40,9.227,90.773,36.893,63.107,57.56,224,0.875,bilinear,-84.263,-61.687,+6 +regnetx_120,9.187,90.813,37.200,62.800,46.11,224,0.875,bicubic,-85.053,-61.450,-56 +seresnext26d_32x4d,9.147,90.853,36.840,63.160,16.81,224,0.875,bicubic,-83.553,-61.310,+51 +resnest26d,9.080,90.920,37.853,62.147,17.07,224,0.875,bilinear,-84.250,-60.777,+13 +vit_tiny_patch16_224,9.067,90.933,34.573,65.427,5.72,224,0.900,bicubic,-82.693,-63.467,+91 +regnety_040,9.000,91.000,37.053,62.947,20.65,224,0.875,bicubic,-84.860,-61.537,-27 +gluon_resnext50_32x4d,8.947,91.053,36.333,63.667,25.03,224,0.875,bicubic,-84.863,-62.077,-25 +rexnet_100,8.893,91.107,36.373,63.627,4.80,224,0.875,bicubic,-84.137,-61.817,+28 +seresnext26t_32x4d,8.893,91.107,36.907,63.093,16.81,224,0.875,bicubic,-83.927,-61.463,+38 +mixnet_l,8.853,91.147,36.187,63.813,7.33,224,0.875,bicubic,-84.597,-62.033,0 +convit_tiny,8.840,91.160,34.360,65.640,5.71,224,0.875,bicubic,-81.790,-63.380,+113 +mobilenetv3_large_100_miil,8.840,91.160,32.973,67.027,5.48,224,0.875,bilinear,-83.420,-64.667,+62 +levit_128s,8.653,91.347,33.107,66.893,7.78,224,0.900,bicubic,-83.317,-64.953,+74 +dla169,8.640,91.360,36.040,63.960,53.39,224,0.875,bilinear,-84.700,-62.560,0 +hrnet_w30,8.613,91.387,37.040,62.960,37.71,224,0.875,bilinear,-84.587,-61.370,+10 +mixer_b16_224,8.600,91.400,29.413,70.587,59.88,224,0.875,bicubic,-83.270,-67.837,+76 +legacy_seresnet101,8.533,91.467,36.013,63.987,49.33,224,0.875,bilinear,-84.747,-62.497,+5 +tf_efficientnet_b1_ap,8.453,91.547,35.253,64.747,7.79,240,0.882,bicubic,-85.237,-63.257,-26 +repvgg_b2,8.427,91.573,36.467,63.533,89.02,224,0.875,bilinear,-85.073,-62.263,-14 +resmlp_12_distilled_224,8.307,91.693,36.853,63.147,15.35,224,0.875,bicubic,-84.523,-61.287,+25 +resnetblur50,8.240,91.760,37.400,62.600,25.56,224,0.875,bicubic,-85.720,-61.190,-51 +dla102x,8.200,91.800,37.013,62.987,26.31,224,0.875,bilinear,-85.320,-61.497,-18 +hrnet_w32,8.040,91.960,37.507,62.493,41.23,224,0.875,bilinear,-85.490,-60.943,-20 +res2net50_26w_8s,8.000,92.000,33.853,66.147,48.40,224,0.875,bilinear,-85.540,-64.407,-22 +gluon_resnet101_v1c,7.987,92.013,33.360,66.640,44.57,224,0.875,bicubic,-85.683,-65.060,-31 +gluon_resnet50_v1d,7.920,92.080,35.000,65.000,25.58,224,0.875,bicubic,-85.850,-63.390,-41 +dla60_res2next,7.787,92.213,34.987,65.013,17.03,224,0.875,bilinear,-85.393,-63.423,0 +densenetblur121d,7.720,92.280,34.733,65.267,8.00,224,0.875,bicubic,-84.190,-63.337,+62 +deit_tiny_distilled_patch16_224,7.707,92.293,33.560,66.440,5.91,224,0.900,bicubic,-82.993,-64.010,+92 +tf_efficientnetv2_b1,7.693,92.307,34.653,65.347,8.14,240,0.882,bicubic,-86.247,-63.967,-58 +dla60_res2net,7.560,92.440,34.627,65.373,20.85,224,0.875,bilinear,-85.620,-63.793,-5 +efficientnet_b1_pruned,7.440,92.560,34.533,65.467,6.33,240,0.882,bicubic,-85.330,-63.507,+17 +wide_resnet101_2,7.360,92.640,34.147,65.853,126.89,224,0.875,bilinear,-86.360,-64.393,-42 +regnetx_064,7.333,92.667,34.373,65.627,26.21,224,0.875,bicubic,-86.557,-64.257,-58 +deit_tiny_patch16_224,7.307,92.693,30.707,69.293,5.72,224,0.900,bicubic,-82.363,-66.743,+101 +hardcorenas_e,7.240,92.760,33.293,66.707,8.07,224,0.875,bilinear,-85.330,-64.817,+25 +gluon_resnet101_v1b,7.227,92.773,32.773,67.227,44.55,224,0.875,bicubic,-86.523,-65.607,-50 +efficientnet_b0,7.213,92.787,34.013,65.987,5.29,224,0.875,bicubic,-85.477,-64.057,+17 +gluon_resnet50_v1s,7.213,92.787,33.507,66.493,25.68,224,0.875,bicubic,-86.407,-64.953,-42 +tf_mixnet_l,7.147,92.853,31.613,68.387,7.33,224,0.875,bicubic,-86.163,-66.417,-21 +tf_efficientnet_b1,7.133,92.867,33.040,66.960,7.79,240,0.882,bicubic,-86.367,-65.320,-35 +tf_efficientnet_cc_b0_8e,7.120,92.880,31.787,68.213,24.01,224,0.875,bicubic,-85.710,-66.093,+2 +resmlp_12_224,7.013,92.987,33.947,66.053,15.35,224,0.875,bicubic,-85.197,-64.213,+34 +hardcorenas_f,6.827,93.173,34.093,65.907,8.20,224,0.875,bilinear,-86.123,-64.067,-4 +ese_vovnet19b_dw,6.733,93.267,33.413,66.587,6.54,224,0.875,bicubic,-85.557,-64.677,+27 +selecsls60b,6.733,93.267,33.267,66.733,32.77,224,0.875,bicubic,-86.567,-65.013,-26 +efficientnet_es,6.707,93.293,33.840,66.160,5.44,224,0.875,bicubic,-86.433,-64.580,-19 +res2net50_26w_6s,6.693,93.307,31.653,68.347,37.05,224,0.875,bilinear,-86.717,-66.627,-36 +legacy_seresnext26_32x4d,6.627,93.373,33.253,66.747,16.79,224,0.875,bicubic,-86.013,-64.877,+9 +mixnet_m,6.627,93.373,32.053,67.947,5.01,224,0.875,bicubic,-85.803,-65.817,+16 +pit_ti_distilled_224,6.627,93.373,30.760,69.240,5.10,224,0.900,bicubic,-84.273,-66.940,+66 +skresnet34,6.480,93.520,31.547,68.453,22.28,224,0.875,bicubic,-85.910,-66.603,+17 +repvgg_b1,6.467,93.533,33.827,66.173,57.42,224,0.875,bilinear,-86.863,-64.683,-37 +hardcorenas_d,6.440,93.560,32.213,67.787,7.50,224,0.875,bilinear,-85.960,-65.857,+13 +dla60x,6.427,93.573,34.080,65.920,17.35,224,0.875,bilinear,-86.693,-64.430,-26 +resnet34d,6.400,93.600,31.493,68.507,21.82,224,0.875,bicubic,-86.280,-66.817,0 +regnetx_080,6.307,93.693,32.320,67.680,39.57,224,0.875,bicubic,-87.563,-66.200,-80 +swsl_resnet18,6.240,93.760,31.600,68.400,11.69,224,0.875,bilinear,-84.450,-66.100,+65 +legacy_seresnet50,6.187,93.813,32.653,67.347,28.09,224,0.875,bilinear,-86.773,-65.537,-20 +pit_ti_224,6.120,93.880,30.227,69.773,4.85,224,0.900,bicubic,-83.820,-67.223,+73 +tv_resnet152,6.040,93.960,32.053,67.947,60.19,224,0.875,bilinear,-87.260,-66.337,-40 +regnetx_040,5.973,94.027,31.547,68.453,22.12,224,0.875,bicubic,-87.587,-66.993,-62 +tf_efficientnet_cc_b0_4e,5.973,94.027,29.600,70.400,13.31,224,0.875,bicubic,-86.617,-68.480,-3 +resnet50,5.933,94.067,29.093,70.907,25.56,224,0.875,bicubic,-87.877,-69.297,-80 +tf_efficientnetv2_b0,5.893,94.107,30.773,69.227,7.14,224,0.875,bicubic,-87.217,-67.537,-34 +dla102,5.880,94.120,32.707,67.293,33.27,224,0.875,bilinear,-87.180,-65.833,-33 +mixer_l16_224,5.867,94.133,18.533,81.467,208.20,224,0.875,bicubic,-81.283,-74.987,+85 +regnety_016,5.680,94.320,30.413,69.587,11.20,224,0.875,bicubic,-87.350,-67.947,-33 +selecsls60,5.653,94.347,32.507,67.493,30.67,224,0.875,bicubic,-87.377,-65.793,-32 +hardcorenas_c,5.640,94.360,30.400,69.600,5.52,224,0.875,bilinear,-86.380,-67.440,+14 +res2next50,5.627,94.373,30.867,69.133,24.67,224,0.875,bilinear,-87.213,-67.313,-28 +hrnet_w18,5.493,94.507,30.960,69.040,21.30,224,0.875,bilinear,-86.827,-67.280,-1 +resnest14d,5.480,94.520,28.547,71.453,10.61,224,0.875,bilinear,-86.240,-69.323,+25 +tf_efficientnet_lite2,5.360,94.640,30.907,69.093,6.09,260,0.890,bicubic,-87.290,-67.323,-17 +tf_efficientnet_em,5.347,94.653,31.107,68.893,6.90,240,0.882,bicubic,-87.583,-67.083,-34 +gernet_s,5.307,94.693,30.133,69.867,8.17,224,0.875,bilinear,-86.833,-68.057,+4 +tf_efficientnet_b0_ap,5.307,94.693,28.813,71.187,5.29,224,0.875,bicubic,-86.893,-69.207,+1 +densenet121,5.293,94.707,29.907,70.093,7.98,224,0.875,bicubic,-86.277,-68.123,+21 +repvgg_b1g4,5.293,94.707,30.813,69.187,39.97,224,0.875,bilinear,-87.687,-67.617,-41 +res2net50_26w_4s,5.160,94.840,29.360,70.640,25.70,224,0.875,bilinear,-87.340,-68.700,-15 +tf_mixnet_m,5.080,94.920,28.147,71.853,5.01,224,0.875,bicubic,-87.250,-69.743,-11 +vit_tiny_r_s16_p8_224,5.080,94.920,27.080,72.920,6.34,224,0.900,bicubic,-84.090,-70.150,+58 +tf_efficientnet_b0,5.067,94.933,28.800,71.200,5.29,224,0.875,bicubic,-87.183,-69.200,-9 +mobilenetv3_large_100,5.067,94.933,28.187,71.813,5.48,224,0.875,bicubic,-86.253,-69.523,+21 +res2net50_14w_8s,5.040,94.960,28.773,71.227,25.06,224,0.875,bilinear,-87.700,-69.407,-34 +hardcorenas_b,4.947,95.053,28.120,71.880,5.18,224,0.875,bilinear,-86.823,-69.660,+9 +mixnet_s,4.907,95.093,28.573,71.427,4.13,224,0.875,bicubic,-86.923,-69.117,+6 +mobilenetv3_rw,4.907,95.093,29.853,70.147,5.48,224,0.875,bicubic,-86.303,-67.807,+19 +gluon_resnet50_v1c,4.893,95.107,28.147,71.853,25.58,224,0.875,bicubic,-88.137,-70.243,-55 +hardcorenas_a,4.867,95.133,28.093,71.907,5.26,224,0.875,bilinear,-86.483,-69.767,+13 +regnetx_032,4.853,95.147,30.280,69.720,15.30,224,0.875,bicubic,-88.267,-68.110,-61 +tv_resnext50_32x4d,4.840,95.160,30.307,69.693,25.03,224,0.875,bilinear,-87.900,-67.963,-40 +tv_resnet101,4.707,95.293,29.333,70.667,44.55,224,0.875,bilinear,-88.103,-68.917,-45 +densenet161,4.693,95.307,29.547,70.453,28.68,224,0.875,bicubic,-87.807,-68.743,-30 +selecsls42b,4.667,95.333,28.587,71.413,32.46,224,0.875,bicubic,-87.613,-69.563,-22 +tf_efficientnet_lite1,4.613,95.387,28.387,71.613,5.42,240,0.882,bicubic,-88.007,-69.693,-37 +mobilenetv2_120d,4.533,95.467,29.280,70.720,5.83,224,0.875,bicubic,-87.867,-68.770,-29 +efficientnet_es_pruned,4.187,95.813,26.520,73.480,5.44,224,0.875,bicubic,-86.993,-71.230,+11 +fbnetc_100,4.133,95.867,25.933,74.067,5.57,224,0.875,bilinear,-86.567,-71.277,+23 +densenet201,4.120,95.880,27.547,72.453,20.01,224,0.875,bicubic,-88.630,-70.683,-50 +gluon_resnet50_v1b,4.120,95.880,26.933,73.067,25.56,224,0.875,bicubic,-88.420,-71.237,-38 +resnet26d,4.040,95.960,28.520,71.480,16.01,224,0.875,bicubic,-88.030,-69.440,-21 +semnasnet_100,3.960,96.040,26.947,73.053,3.89,224,0.875,bicubic,-87.320,-70.613,+2 +repvgg_a2,3.947,96.053,27.267,72.733,28.21,224,0.875,bilinear,-87.993,-70.883,-16 +tf_mixnet_s,3.880,96.120,25.253,74.747,4.13,224,0.875,bicubic,-87.630,-72.367,-5 +dpn68,3.867,96.133,26.080,73.920,12.61,224,0.875,bicubic,-88.143,-71.970,-21 +tf_efficientnet_es,3.827,96.173,26.107,73.893,5.44,224,0.875,bicubic,-88.153,-71.753,-21 +regnety_008,3.813,96.187,27.133,72.867,6.26,224,0.875,bicubic,-87.937,-71.047,-11 +dla60,3.773,96.227,27.933,72.067,22.04,224,0.875,bilinear,-88.457,-70.177,-33 +ssl_resnet18,3.747,96.253,25.427,74.573,11.69,224,0.875,bilinear,-86.473,-72.123,+19 +mobilenetv2_140,3.720,96.280,26.747,73.253,6.11,224,0.875,bicubic,-88.110,-71.113,-17 +densenet169,3.707,96.293,25.613,74.387,14.15,224,0.875,bicubic,-88.223,-72.487,-23 +regnetx_016,3.627,96.373,26.293,73.707,9.19,224,0.875,bicubic,-88.543,-71.917,-34 +res2net50_48w_2s,3.587,96.413,26.613,73.387,25.29,224,0.875,bilinear,-88.963,-71.467,-52 +spnasnet_100,3.547,96.453,24.293,75.707,4.42,224,0.875,bilinear,-86.803,-72.897,+13 +tf_mobilenetv3_large_100,3.547,96.453,25.053,74.947,5.48,224,0.875,bilinear,-87.693,-72.607,-10 +regnety_006,3.467,96.533,24.893,75.107,6.06,224,0.875,bicubic,-87.903,-72.817,-15 +legacy_seresnet34,3.333,96.667,23.800,76.200,21.96,224,0.875,bilinear,-87.557,-73.780,0 +efficientnet_lite0,3.253,96.747,25.867,74.133,4.65,224,0.875,bicubic,-87.887,-71.763,-9 +dla34,3.227,96.773,23.573,76.427,15.74,224,0.875,bilinear,-87.533,-74.087,0 +ghostnet_100,3.227,96.773,24.853,75.147,5.18,224,0.875,bilinear,-86.793,-72.517,+11 +regnety_004,3.200,96.800,22.653,77.347,4.34,224,0.875,bicubic,-87.300,-74.887,+4 +mobilenetv2_110d,3.173,96.827,24.587,75.413,4.52,224,0.875,bicubic,-87.777,-72.963,-7 +mnasnet_100,3.120,96.880,24.227,75.773,4.38,224,0.875,bicubic,-87.390,-73.243,+1 +tf_efficientnet_lite0,3.080,96.920,22.907,77.093,4.65,224,0.875,bicubic,-87.960,-74.683,-11 +skresnet18,3.013,96.987,22.800,77.200,11.96,224,0.875,bicubic,-86.647,-74.430,+11 +vgg19_bn,2.947,97.053,23.480,76.520,143.68,224,0.875,bilinear,-87.133,-74.100,+4 +resnet34,2.920,97.080,23.680,76.320,21.80,224,0.875,bilinear,-88.210,-73.940,-17 +tf_mobilenetv3_large_075,2.867,97.133,21.573,78.427,3.99,224,0.875,bilinear,-86.813,-75.637,+6 +hrnet_w18_small_v2,2.720,97.280,23.693,76.307,15.60,224,0.875,bilinear,-88.470,-74.207,-22 +gluon_resnet34_v1b,2.667,97.333,21.680,78.320,21.80,224,0.875,bicubic,-88.293,-75.950,-16 +regnetx_008,2.653,97.347,22.453,77.547,7.26,224,0.875,bicubic,-88.397,-75.257,-19 +vgg16_bn,2.653,97.347,23.773,76.227,138.37,224,0.875,bilinear,-87.437,-73.597,-3 +vgg16,2.640,97.360,20.427,79.573,138.36,224,0.875,bilinear,-85.910,-76.363,+13 +resnet18d,2.600,97.400,21.613,78.387,11.71,224,0.875,bicubic,-86.680,-75.537,+4 +tv_densenet121,2.560,97.440,22.667,77.333,7.98,224,0.875,bicubic,-88.330,-75.043,-17 +repvgg_b0,2.547,97.453,24.013,75.987,15.82,224,0.875,bilinear,-88.883,-73.977,-36 +regnetx_006,2.507,97.493,20.653,79.347,6.20,224,0.875,bicubic,-87.843,-76.777,-11 +legacy_seresnet18,2.493,97.507,20.080,79.920,11.78,224,0.875,bicubic,-86.387,-76.900,+6 +resnet26,2.480,97.520,22.987,77.013,16.00,224,0.875,bicubic,-88.630,-74.753,-28 +mobilenetv2_100,2.147,97.853,19.907,80.093,3.50,224,0.875,bicubic,-87.453,-77.233,-3 +regnety_002,2.147,97.853,18.880,81.120,3.16,224,0.875,bicubic,-85.233,-77.710,+9 +vgg19,2.107,97.893,20.733,79.267,143.67,224,0.875,bilinear,-86.933,-76.137,-1 +vgg13_bn,2.093,97.907,20.307,79.693,133.05,224,0.875,bilinear,-86.667,-76.663,+2 +tf_mobilenetv3_small_100,2.013,97.987,15.867,84.133,2.54,224,0.875,bilinear,-83.177,-79.903,+12 +tf_mobilenetv3_small_075,2.000,98.000,14.813,85.187,2.04,224,0.875,bilinear,-81.520,-79.977,+14 +regnetx_004,1.960,98.040,19.173,80.827,5.16,224,0.875,bicubic,-86.940,-77.947,-3 +vgg13,1.867,98.133,17.960,82.040,133.05,224,0.875,bilinear,-85.183,-78.360,+4 +tv_resnet34,1.867,98.133,20.000,80.000,21.80,224,0.875,bilinear,-88.073,-77.340,-14 +dla46x_c,1.760,98.240,16.480,83.520,1.07,224,0.875,bilinear,-82.490,-78.790,+8 +vgg11_bn,1.720,98.280,18.093,81.907,132.87,224,0.875,bilinear,-85.780,-78.727,-2 +tf_mobilenetv3_large_minimal_100,1.627,98.373,17.120,82.880,3.92,224,0.875,bilinear,-87.343,-79.740,-9 +dla60x_c,1.613,98.387,18.040,81.960,1.32,224,0.875,bilinear,-84.677,-78.120,+2 +vgg11,1.560,98.440,16.227,83.773,132.86,224,0.875,bilinear,-84.990,-80.053,0 +gluon_resnet18_v1b,1.547,98.453,16.613,83.387,11.69,224,0.875,bicubic,-86.853,-80.067,-7 +hrnet_w18_small,1.533,98.467,18.120,81.880,13.19,224,0.875,bilinear,-87.517,-78.990,-15 +dla46_c,1.520,98.480,15.267,84.733,1.30,224,0.875,bilinear,-82.130,-79.653,+2 +regnetx_002,1.373,98.627,15.027,84.973,2.68,224,0.875,bicubic,-84.817,-80.953,-2 +resnet18,1.160,98.840,16.213,83.787,11.69,224,0.875,bilinear,-86.230,-80.077,-9 +tf_mobilenetv3_small_minimal_100,1.013,98.987,11.493,88.507,2.04,224,0.875,bilinear,-80.367,-82.177,+1 +tv_resnet50,0.000,100.000,14.453,85.547,25.56,224,0.875,bilinear,-91.880,-83.587,-70 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-r-clean.csv b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-r-clean.csv new file mode 100644 index 0000000000..b76f0d58c9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-r-clean.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation +tf_efficientnet_l2_ns,97.780,2.220,99.890,0.110,480.31,800,0.960,bicubic +tf_efficientnet_l2_ns_475,97.750,2.250,99.820,0.180,480.31,475,0.936,bicubic +vit_large_patch16_384,97.420,2.580,99.780,0.220,304.72,384,1.000,bicubic +tf_efficientnet_b7_ns,97.200,2.800,99.700,0.300,66.35,600,0.949,bicubic +swin_large_patch4_window12_384,97.170,2.830,99.680,0.320,196.74,384,1.000,bicubic +swin_base_patch4_window12_384,97.120,2.880,99.780,0.220,87.90,384,1.000,bicubic +tf_efficientnetv2_l_in21ft1k,97.110,2.890,99.710,0.290,118.52,480,1.000,bicubic +tf_efficientnet_b6_ns,97.020,2.980,99.710,0.290,43.04,528,0.942,bicubic +vit_base_patch16_384,97.020,2.980,99.710,0.290,86.86,384,1.000,bicubic +ig_resnext101_32x48d,96.970,3.030,99.670,0.330,828.41,224,0.875,bilinear +tf_efficientnetv2_m_in21ft1k,96.970,3.030,99.610,0.390,54.14,480,1.000,bicubic +swin_large_patch4_window7_224,96.950,3.050,99.660,0.340,196.53,224,0.900,bicubic +vit_large_r50_s32_384,96.950,3.050,99.710,0.290,329.09,384,1.000,bicubic +dm_nfnet_f6,96.920,3.080,99.720,0.280,438.36,576,0.956,bicubic +cait_m48_448,96.880,3.120,99.620,0.380,356.46,448,1.000,bicubic +resnetv2_152x4_bitm,96.870,3.130,99.660,0.340,936.53,480,1.000,bilinear +tf_efficientnet_b5_ns,96.870,3.130,99.640,0.360,30.39,456,0.934,bicubic +cait_m36_384,96.830,3.170,99.660,0.340,271.22,384,1.000,bicubic +dm_nfnet_f5,96.810,3.190,99.670,0.330,377.21,544,0.954,bicubic +dm_nfnet_f4,96.780,3.220,99.620,0.380,316.07,512,0.951,bicubic +ig_resnext101_32x32d,96.780,3.220,99.530,0.470,468.53,224,0.875,bilinear +dm_nfnet_f3,96.730,3.270,99.630,0.370,254.92,416,0.940,bicubic +tf_efficientnet_b4_ns,96.710,3.290,99.640,0.360,19.34,380,0.922,bicubic +vit_large_patch16_224,96.710,3.290,99.650,0.350,304.33,224,0.900,bicubic +tf_efficientnet_b8,96.700,3.300,99.530,0.470,87.41,672,0.954,bicubic +swin_base_patch4_window7_224,96.680,3.320,99.660,0.340,87.77,224,0.900,bicubic +tf_efficientnetv2_l,96.650,3.350,99.560,0.440,118.52,480,1.000,bicubic +cait_s36_384,96.630,3.370,99.600,0.400,68.37,384,1.000,bicubic +tf_efficientnet_b7,96.580,3.420,99.510,0.490,66.35,600,0.949,bicubic +cait_s24_384,96.570,3.430,99.550,0.450,47.06,384,1.000,bicubic +tf_efficientnet_b8_ap,96.550,3.450,99.540,0.460,87.41,672,0.954,bicubic +tf_efficientnetv2_m,96.550,3.450,99.570,0.430,54.14,480,1.000,bicubic +resnetv2_152x2_bitm,96.520,3.480,99.590,0.410,236.34,448,1.000,bilinear +deit_base_distilled_patch16_384,96.510,3.490,99.590,0.410,87.63,384,1.000,bicubic +tf_efficientnetv2_s_in21ft1k,96.470,3.530,99.570,0.430,21.46,384,1.000,bicubic +dm_nfnet_f2,96.460,3.540,99.540,0.460,193.78,352,0.920,bicubic +ecaresnet269d,96.460,3.540,99.610,0.390,102.09,352,1.000,bicubic +eca_nfnet_l2,96.450,3.550,99.620,0.380,56.72,384,1.000,bicubic +vit_base_r50_s16_384,96.450,3.550,99.660,0.340,98.95,384,1.000,bicubic +ig_resnext101_32x16d,96.440,3.560,99.540,0.460,194.03,224,0.875,bilinear +resnetrs420,96.400,3.600,99.540,0.460,191.89,416,1.000,bicubic +dm_nfnet_f1,96.390,3.610,99.470,0.530,132.63,320,0.910,bicubic +tf_efficientnet_b6_ap,96.370,3.630,99.550,0.450,43.04,528,0.942,bicubic +resmlp_big_24_224_in22ft1k,96.350,3.650,99.520,0.480,129.14,224,0.875,bicubic +tf_efficientnet_b7_ap,96.350,3.650,99.590,0.410,66.35,600,0.949,bicubic +seresnet152d,96.310,3.690,99.510,0.490,66.84,320,1.000,bicubic +vit_base_patch16_224,96.300,3.700,99.560,0.440,86.57,224,0.900,bicubic +tf_efficientnet_b6,96.290,3.710,99.520,0.480,43.04,528,0.942,bicubic +efficientnetv2_rw_m,96.270,3.730,99.560,0.440,53.24,416,1.000,bicubic +resnetv2_50x3_bitm,96.270,3.730,99.630,0.370,217.32,448,1.000,bilinear +swsl_resnext101_32x16d,96.270,3.730,99.500,0.500,194.03,224,0.875,bilinear +resnetv2_101x3_bitm,96.250,3.750,99.590,0.410,387.93,448,1.000,bilinear +swsl_resnext101_32x8d,96.240,3.760,99.590,0.410,88.79,224,0.875,bilinear +resnetrs350,96.240,3.760,99.470,0.530,163.96,384,1.000,bicubic +resnetv2_152x2_bit_teacher_384,96.190,3.810,99.500,0.500,236.34,384,1.000,bicubic +vit_large_r50_s32_224,96.180,3.820,99.530,0.470,328.99,224,0.900,bicubic +resnest269e,96.120,3.880,99.520,0.480,110.93,416,0.928,bicubic +resnet200d,96.110,3.890,99.460,0.540,64.69,320,1.000,bicubic +tf_efficientnet_b3_ns,96.100,3.900,99.480,0.520,12.23,300,0.904,bicubic +tf_efficientnet_b5_ap,96.080,3.920,99.540,0.460,30.39,456,0.934,bicubic +pit_b_distilled_224,96.070,3.930,99.380,0.620,74.79,224,0.900,bicubic +resnest200e,96.070,3.930,99.480,0.520,70.20,320,0.909,bicubic +resnetrs270,96.060,3.940,99.490,0.510,129.86,352,1.000,bicubic +vit_small_r26_s32_384,96.060,3.940,99.560,0.440,36.47,384,1.000,bicubic +swsl_resnext101_32x4d,96.050,3.950,99.530,0.470,44.18,224,0.875,bilinear +vit_base_patch16_224_miil,96.030,3.970,99.350,0.650,86.54,224,0.875,bilinear +cait_xs24_384,96.010,3.990,99.430,0.570,26.67,384,1.000,bicubic +resnetrs200,95.990,4.010,99.440,0.560,93.21,320,1.000,bicubic +tf_efficientnet_b5,95.980,4.020,99.450,0.550,30.39,456,0.934,bicubic +vit_small_patch16_384,95.980,4.020,99.590,0.410,22.20,384,1.000,bicubic +resnetrs152,95.960,4.040,99.380,0.620,86.62,320,1.000,bicubic +eca_nfnet_l1,95.940,4.060,99.490,0.510,41.41,320,1.000,bicubic +ig_resnext101_32x8d,95.930,4.070,99.380,0.620,88.79,224,0.875,bilinear +vit_base_patch32_384,95.900,4.100,99.440,0.560,88.30,384,1.000,bicubic +regnety_160,95.880,4.120,99.560,0.440,83.59,288,1.000,bicubic +resmlp_big_24_distilled_224,95.870,4.130,99.440,0.560,129.14,224,0.875,bicubic +resnet152d,95.870,4.130,99.430,0.570,60.21,320,1.000,bicubic +resnet101d,95.750,4.250,99.440,0.560,44.57,320,1.000,bicubic +resnetv2_152x2_bit_teacher,95.750,4.250,99.430,0.570,236.34,224,0.875,bicubic +deit_base_distilled_patch16_224,95.750,4.250,99.280,0.720,87.34,224,0.900,bicubic +twins_pcpvt_large,95.720,4.280,99.490,0.510,60.99,224,0.900,bicubic +twins_svt_large,95.720,4.280,99.370,0.630,99.27,224,0.900,bicubic +swin_small_patch4_window7_224,95.720,4.280,99.290,0.710,49.61,224,0.900,bicubic +efficientnetv2_rw_s,95.710,4.290,99.380,0.620,23.94,384,1.000,bicubic +tf_efficientnetv2_s,95.710,4.290,99.400,0.600,21.46,384,1.000,bicubic +dm_nfnet_f0,95.690,4.310,99.330,0.670,71.49,256,0.900,bicubic +cait_s24_224,95.650,4.350,99.390,0.610,46.92,224,1.000,bicubic +deit_base_patch16_384,95.650,4.350,99.240,0.760,86.86,384,1.000,bicubic +swsl_resnext50_32x4d,95.620,4.380,99.440,0.560,25.03,224,0.875,bilinear +tf_efficientnet_b4,95.590,4.410,99.330,0.670,19.34,380,0.922,bicubic +resnest101e,95.570,4.430,99.270,0.730,48.28,256,0.875,bilinear +twins_svt_base,95.570,4.430,99.230,0.770,56.07,224,0.900,bicubic +tf_efficientnet_b2_ns,95.520,4.480,99.340,0.660,9.11,260,0.890,bicubic +efficientnet_b4,95.520,4.480,99.390,0.610,19.34,384,1.000,bicubic +tresnet_xl_448,95.510,4.490,99.340,0.660,78.44,448,0.875,bilinear +tf_efficientnet_b4_ap,95.490,4.510,99.390,0.610,19.34,380,0.922,bicubic +regnety_032,95.470,4.530,99.320,0.680,19.44,288,1.000,bicubic +twins_pcpvt_base,95.460,4.540,99.390,0.610,43.83,224,0.900,bicubic +eca_nfnet_l0,95.450,4.550,99.390,0.610,24.14,288,1.000,bicubic +ssl_resnext101_32x16d,95.410,4.590,99.410,0.590,194.03,224,0.875,bilinear +tresnet_l_448,95.410,4.590,99.300,0.700,55.99,448,0.875,bilinear +nfnet_l0,95.390,4.610,99.420,0.580,35.07,288,1.000,bicubic +resnetv2_50x1_bit_distilled,95.390,4.610,99.430,0.570,25.55,224,0.875,bicubic +tresnet_m,95.380,4.620,99.150,0.850,31.39,224,0.875,bilinear +pnasnet5large,95.360,4.640,99.130,0.870,86.06,331,0.911,bicubic +ssl_resnext101_32x8d,95.340,4.660,99.320,0.680,88.79,224,0.875,bilinear +resnetv2_101x1_bitm,95.320,4.680,99.370,0.630,44.54,448,1.000,bilinear +resnetrs101,95.250,4.750,99.210,0.790,63.62,288,0.940,bicubic +vit_large_patch32_384,95.240,4.760,99.320,0.680,306.63,384,1.000,bicubic +cait_xxs36_384,95.220,4.780,99.320,0.680,17.37,384,1.000,bicubic +levit_384,95.210,4.790,99.160,0.840,39.13,224,0.900,bicubic +resnet51q,95.200,4.800,99.280,0.720,35.70,288,1.000,bilinear +swsl_resnet50,95.200,4.800,99.390,0.610,25.56,224,0.875,bilinear +ecaresnet101d,95.160,4.840,99.230,0.770,44.57,224,0.875,bicubic +ssl_resnext101_32x4d,95.160,4.840,99.300,0.700,44.18,224,0.875,bilinear +nasnetalarge,95.150,4.850,99.130,0.870,88.75,331,0.911,bicubic +efficientnet_b3,95.140,4.860,99.210,0.790,12.23,320,1.000,bicubic +vit_small_r26_s32_224,95.130,4.870,99.220,0.780,36.43,224,0.900,bicubic +tf_efficientnetv2_b3,95.120,4.880,99.200,0.800,14.36,300,0.904,bicubic +convit_base,95.100,4.900,99.140,0.860,86.54,224,0.875,bicubic +coat_lite_small,95.080,4.920,99.020,0.980,19.84,224,0.900,bicubic +ecaresnet50t,95.070,4.930,99.290,0.710,25.57,320,0.950,bicubic +tresnet_xl,95.060,4.940,99.260,0.740,78.44,224,0.875,bilinear +deit_base_patch16_224,95.010,4.990,98.980,1.020,86.57,224,0.900,bicubic +tf_efficientnet_b3_ap,94.970,5.030,99.110,0.890,12.23,300,0.904,bicubic +visformer_small,94.960,5.040,99.210,0.790,40.22,224,0.900,bicubic +gernet_l,94.930,5.070,99.200,0.800,31.08,256,0.875,bilinear +cait_xxs24_384,94.920,5.080,99.140,0.860,12.03,384,1.000,bicubic +convit_small,94.920,5.080,99.110,0.890,27.78,224,0.875,bicubic +tf_efficientnet_b3,94.910,5.090,99.110,0.890,12.23,300,0.904,bicubic +tresnet_l,94.900,5.100,99.030,0.970,55.99,224,0.875,bilinear +vit_small_patch16_224,94.880,5.120,99.270,0.730,22.05,224,0.900,bicubic +mixer_b16_224_miil,94.880,5.120,99.080,0.920,59.88,224,0.875,bilinear +tf_efficientnet_lite4,94.870,5.130,99.090,0.910,13.01,380,0.920,bilinear +tf_efficientnet_b1_ns,94.860,5.140,99.250,0.750,7.79,240,0.882,bicubic +seresnext50_32x4d,94.820,5.180,99.130,0.870,27.56,224,0.875,bicubic +pit_b_224,94.790,5.210,98.820,1.180,73.76,224,0.900,bicubic +coat_mini,94.770,5.230,98.950,1.050,10.34,224,0.900,bicubic +twins_svt_small,94.770,5.230,99.080,0.920,24.06,224,0.900,bicubic +legacy_senet154,94.730,5.270,99.100,0.900,115.09,224,0.875,bilinear +pit_s_distilled_224,94.730,5.270,99.190,0.810,24.04,224,0.900,bicubic +resnetv2_50x1_bitm,94.730,5.270,99.180,0.820,25.55,448,1.000,bilinear +gluon_resnet152_v1s,94.720,5.280,99.060,0.940,60.32,224,0.875,bicubic +gluon_senet154,94.710,5.290,98.970,1.030,115.09,224,0.875,bicubic +resnest50d_4s2x40d,94.710,5.290,99.130,0.870,30.42,224,0.875,bicubic +ssl_resnext50_32x4d,94.700,5.300,99.240,0.760,25.03,224,0.875,bilinear +efficientnet_el,94.670,5.330,99.130,0.870,10.59,300,0.904,bicubic +wide_resnet50_2,94.670,5.330,99.050,0.950,68.88,224,0.875,bicubic +rexnet_200,94.660,5.340,99.090,0.910,16.37,224,0.875,bicubic +tresnet_m_448,94.660,5.340,99.150,0.850,31.39,448,0.875,bilinear +gluon_seresnext101_64x4d,94.650,5.350,98.980,1.020,88.23,224,0.875,bicubic +resnest50d,94.620,5.380,99.030,0.970,27.48,224,0.875,bilinear +swin_tiny_patch4_window7_224,94.620,5.380,99.120,0.880,28.29,224,0.900,bicubic +twins_pcpvt_small,94.600,5.400,99.150,0.850,24.11,224,0.900,bicubic +deit_small_distilled_patch16_224,94.590,5.410,99.100,0.900,22.44,224,0.900,bicubic +pit_s_224,94.590,5.410,98.930,1.070,23.46,224,0.900,bicubic +vit_small_patch32_384,94.590,5.410,99.140,0.860,22.92,384,1.000,bicubic +tnt_s_patch16_224,94.580,5.420,99.180,0.820,23.76,224,0.900,bicubic +efficientnet_b3_pruned,94.580,5.420,99.070,0.930,9.86,300,0.904,bicubic +resmlp_36_distilled_224,94.570,5.430,99.160,0.840,44.69,224,0.875,bicubic +gernet_m,94.550,5.450,98.930,1.070,21.14,224,0.875,bilinear +repvgg_b3,94.550,5.450,98.910,1.090,123.09,224,0.875,bilinear +regnety_320,94.520,5.480,99.170,0.830,145.05,224,0.875,bicubic +repvgg_b3g4,94.490,5.510,99.020,0.980,83.83,224,0.875,bilinear +ecaresnet101d_pruned,94.450,5.550,99.100,0.900,24.88,224,0.875,bicubic +gluon_seresnext101_32x4d,94.450,5.550,99.090,0.910,48.96,224,0.875,bicubic +gluon_resnet152_v1d,94.440,5.560,99.010,0.990,60.21,224,0.875,bicubic +levit_256,94.400,5.600,99.060,0.940,18.89,224,0.900,bicubic +nf_resnet50,94.400,5.600,99.070,0.930,25.56,288,0.940,bicubic +vit_base_patch32_224,94.390,5.610,99.060,0.940,88.22,224,0.900,bicubic +resnest50d_1s4x24d,94.390,5.610,99.070,0.930,25.68,224,0.875,bicubic +inception_v4,94.380,5.620,98.820,1.180,42.68,299,0.875,bicubic +efficientnet_b2,94.370,5.630,99.050,0.950,9.11,288,1.000,bicubic +tf_efficientnet_el,94.360,5.640,99.100,0.900,10.59,300,0.904,bicubic +gluon_resnext101_64x4d,94.350,5.650,98.880,1.120,83.46,224,0.875,bicubic +inception_resnet_v2,94.340,5.660,98.800,1.200,55.84,299,0.897,bicubic +resmlp_24_distilled_224,94.330,5.670,99.090,0.910,30.02,224,0.875,bicubic +ssl_resnet50,94.310,5.690,99.150,0.850,25.56,224,0.875,bilinear +regnetx_120,94.270,5.730,99.190,0.810,46.11,224,0.875,bicubic +rexnet_150,94.270,5.730,99.080,0.920,9.73,224,0.875,bicubic +tf_efficientnet_b2_ap,94.270,5.730,98.950,1.050,9.11,260,0.890,bicubic +resmlp_big_24_224,94.260,5.740,98.820,1.180,129.14,224,0.875,bicubic +mixnet_xl,94.230,5.770,98.820,1.180,11.90,224,0.875,bicubic +tf_efficientnet_b2,94.210,5.790,99.030,0.970,9.11,260,0.890,bicubic +regnetx_320,94.210,5.790,99.050,0.950,107.81,224,0.875,bicubic +dpn92,94.190,5.810,98.930,1.070,37.67,224,0.875,bicubic +ecaresnet50d,94.190,5.810,99.020,0.980,25.58,224,0.875,bicubic +gluon_resnet101_v1d,94.170,5.830,98.940,1.060,44.57,224,0.875,bicubic +gluon_resnet101_v1s,94.170,5.830,99.010,0.990,44.67,224,0.875,bicubic +gluon_seresnext50_32x4d,94.170,5.830,98.910,1.090,27.56,224,0.875,bicubic +ecaresnetlight,94.140,5.860,98.950,1.050,30.16,224,0.875,bicubic +regnety_064,94.140,5.860,99.030,0.970,30.58,224,0.875,bicubic +ens_adv_inception_resnet_v2,94.130,5.870,98.790,1.210,55.84,299,0.897,bicubic +legacy_seresnext101_32x4d,94.130,5.870,98.970,1.030,48.96,224,0.875,bilinear +tf_efficientnet_lite3,94.130,5.870,98.960,1.040,8.20,300,0.904,bilinear +gluon_resnext101_32x4d,94.120,5.880,98.930,1.070,44.18,224,0.875,bicubic +efficientnet_el_pruned,94.090,5.910,99.010,0.990,10.59,300,0.904,bicubic +cspdarknet53,94.090,5.910,98.980,1.020,27.64,256,0.887,bilinear +seresnet50,94.080,5.920,98.970,1.030,28.09,224,0.875,bicubic +resnet50d,94.070,5.930,98.920,1.080,25.58,224,0.875,bicubic +tf_efficientnetv2_b2,94.070,5.930,98.930,1.070,10.10,260,0.890,bicubic +gluon_resnet152_v1b,94.030,5.970,98.740,1.260,60.19,224,0.875,bicubic +hrnet_w48,94.030,5.970,99.040,0.960,77.47,224,0.875,bilinear +resnetrs50,94.020,5.980,98.850,1.150,35.69,224,0.910,bicubic +gluon_xception65,94.010,5.990,99.020,0.980,39.92,299,0.903,bicubic +regnety_120,94.010,5.990,99.030,0.970,51.82,224,0.875,bicubic +deit_small_patch16_224,94.000,6.000,98.960,1.040,22.05,224,0.900,bicubic +dla102x2,94.000,6.000,99.030,0.970,41.28,224,0.875,bilinear +dpn107,93.960,6.040,98.840,1.160,86.92,224,0.875,bicubic +skresnext50_32x4d,93.950,6.050,98.820,1.180,27.48,224,0.875,bicubic +ecaresnet26t,93.940,6.060,98.920,1.080,16.01,320,0.950,bicubic +cait_xxs36_224,93.940,6.060,98.890,1.110,17.30,224,1.000,bicubic +dpn98,93.940,6.060,98.920,1.080,61.57,224,0.875,bicubic +xception71,93.890,6.110,98.950,1.050,42.34,299,0.903,bicubic +regnety_080,93.890,6.110,99.000,1.000,39.18,224,0.875,bicubic +gluon_resnet152_v1c,93.880,6.120,98.800,1.200,60.21,224,0.875,bicubic +regnetx_160,93.880,6.120,99.090,0.910,54.28,224,0.875,bicubic +nf_regnet_b1,93.880,6.120,98.740,1.260,10.22,288,0.900,bicubic +cspresnet50,93.860,6.140,98.870,1.130,21.62,256,0.887,bilinear +ese_vovnet39b,93.850,6.150,98.900,1.100,24.57,224,0.875,bicubic +resnext50_32x4d,93.840,6.160,98.830,1.170,25.03,224,0.875,bicubic +hrnet_w64,93.830,6.170,98.930,1.070,128.06,224,0.875,bilinear +ecaresnet50d_pruned,93.820,6.180,99.000,1.000,19.94,224,0.875,bicubic +repvgg_b2g4,93.820,6.180,98.930,1.070,61.76,224,0.875,bilinear +resnext50d_32x4d,93.810,6.190,98.740,1.260,25.05,224,0.875,bicubic +efficientnet_b2_pruned,93.800,6.200,98.910,1.090,8.31,260,0.890,bicubic +dla169,93.800,6.200,98.840,1.160,53.39,224,0.875,bilinear +regnetx_080,93.790,6.210,98.910,1.090,39.57,224,0.875,bicubic +resnext101_32x8d,93.770,6.230,98.950,1.050,88.79,224,0.875,bilinear +cspresnext50,93.760,6.240,98.840,1.160,20.57,224,0.875,bilinear +dpn131,93.760,6.240,98.800,1.200,79.25,224,0.875,bicubic +gluon_resnet101_v1b,93.760,6.240,98.700,1.300,44.55,224,0.875,bicubic +xception65,93.760,6.240,98.860,1.140,39.92,299,0.903,bicubic +efficientnet_em,93.740,6.260,98.930,1.070,6.90,240,0.882,bicubic +tf_efficientnet_b0_ns,93.740,6.260,98.980,1.020,5.29,224,0.875,bicubic +wide_resnet101_2,93.730,6.270,98.810,1.190,126.89,224,0.875,bilinear +resnetblur50,93.710,6.290,98.810,1.190,25.56,224,0.875,bicubic +tf_efficientnetv2_b1,93.710,6.290,98.820,1.180,8.14,240,0.882,bicubic +tf_efficientnet_b1,93.710,6.290,98.800,1.200,7.79,240,0.882,bicubic +levit_192,93.710,6.290,98.790,1.210,10.95,224,0.900,bicubic +hrnet_w40,93.710,6.290,98.800,1.200,57.56,224,0.875,bilinear +gluon_resnet101_v1c,93.690,6.310,98.760,1.240,44.57,224,0.875,bicubic +regnetx_040,93.680,6.320,98.940,1.060,22.12,224,0.875,bicubic +rexnet_130,93.670,6.330,98.710,1.290,7.56,224,0.875,bicubic +gluon_resnext50_32x4d,93.650,6.350,98.690,1.310,25.03,224,0.875,bicubic +resmlp_36_224,93.650,6.350,98.950,1.050,44.69,224,0.875,bicubic +xception,93.640,6.360,98.770,1.230,22.86,299,0.897,bicubic +regnetx_064,93.630,6.370,99.050,0.950,26.21,224,0.875,bicubic +tf_efficientnet_b1_ap,93.630,6.370,98.800,1.200,7.79,240,0.882,bicubic +hrnet_w44,93.620,6.380,98.960,1.040,67.06,224,0.875,bilinear +regnety_040,93.620,6.380,98.950,1.050,20.65,224,0.875,bicubic +dpn68b,93.620,6.380,98.700,1.300,12.61,224,0.875,bicubic +gluon_resnet50_v1s,93.590,6.410,98.840,1.160,25.68,224,0.875,bicubic +repvgg_b2,93.590,6.410,99.070,0.930,89.02,224,0.875,bilinear +res2net50_26w_6s,93.590,6.410,98.750,1.250,37.05,224,0.875,bilinear +dla60_res2next,93.570,6.430,98.800,1.200,17.03,224,0.875,bilinear +tf_efficientnet_cc_b1_8e,93.570,6.430,98.690,1.310,39.72,240,0.882,bicubic +gluon_inception_v3,93.540,6.460,98.830,1.170,23.83,299,0.875,bicubic +dla102x,93.530,6.470,98.850,1.150,26.31,224,0.875,bilinear +gluon_resnet50_v1d,93.530,6.470,98.710,1.290,25.58,224,0.875,bicubic +res2net101_26w_4s,93.520,6.480,98.600,1.400,45.21,224,0.875,bilinear +coat_tiny,93.510,6.490,98.690,1.310,5.50,224,0.900,bicubic +selecsls60b,93.500,6.500,98.840,1.160,32.77,224,0.875,bicubic +cait_xxs24_224,93.490,6.510,98.770,1.230,11.96,224,1.000,bicubic +xception41,93.480,6.520,98.750,1.250,26.97,299,0.903,bicubic +resnet50,93.460,6.540,98.600,1.400,25.56,224,0.875,bicubic +res2net50_26w_8s,93.450,6.550,98.700,1.300,48.40,224,0.875,bilinear +coat_lite_mini,93.450,6.550,98.780,1.220,11.01,224,0.900,bicubic +legacy_seresnet152,93.440,6.560,98.850,1.150,66.82,224,0.875,bilinear +resmlp_24_224,93.440,6.560,98.810,1.190,30.02,224,0.875,bicubic +legacy_seresnext50_32x4d,93.430,6.570,98.800,1.200,27.56,224,0.875,bilinear +vit_tiny_patch16_384,93.420,6.580,98.830,1.170,5.79,384,1.000,bicubic +repvgg_b1,93.410,6.590,98.790,1.210,57.42,224,0.875,bilinear +dla60_res2net,93.380,6.620,98.860,1.140,20.85,224,0.875,bilinear +hrnet_w30,93.370,6.630,98.830,1.170,37.71,224,0.875,bilinear +dla102,93.260,6.740,98.780,1.220,33.27,224,0.875,bilinear +legacy_seresnet101,93.260,6.740,98.740,1.260,49.33,224,0.875,bilinear +mixnet_l,93.260,6.740,98.700,1.300,7.33,224,0.875,bicubic +regnetx_032,93.250,6.750,98.730,1.270,15.30,224,0.875,bicubic +tv_resnet152,93.240,6.760,98.750,1.250,60.19,224,0.875,bilinear +pit_xs_distilled_224,93.240,6.760,98.820,1.180,11.00,224,0.900,bicubic +resnest26d,93.240,6.760,98.850,1.150,17.07,224,0.875,bilinear +tf_inception_v3,93.200,6.800,98.480,1.520,23.83,299,0.875,bicubic +dla60x,93.190,6.810,98.710,1.290,17.35,224,0.875,bilinear +res2net50_26w_4s,93.180,6.820,98.670,1.330,25.70,224,0.875,bilinear +tf_efficientnet_em,93.170,6.830,98.670,1.330,6.90,240,0.882,bicubic +res2next50,93.150,6.850,98.660,1.340,24.67,224,0.875,bilinear +tf_efficientnetv2_b0,93.060,6.940,98.700,1.300,7.14,224,0.875,bicubic +levit_128,93.050,6.950,98.690,1.310,9.21,224,0.900,bicubic +tf_mixnet_l,93.040,6.960,98.540,1.460,7.33,224,0.875,bicubic +res2net50_14w_8s,93.030,6.970,98.700,1.300,25.06,224,0.875,bilinear +repvgg_b1g4,93.030,6.970,98.820,1.180,39.97,224,0.875,bilinear +efficientnet_b1,93.030,6.970,98.710,1.290,7.79,256,1.000,bicubic +adv_inception_v3,93.010,6.990,98.490,1.510,23.83,299,0.875,bicubic +selecsls60,93.010,6.990,98.830,1.170,30.67,224,0.875,bicubic +regnety_016,93.000,7.000,98.680,1.320,11.20,224,0.875,bicubic +efficientnet_b1_pruned,92.980,7.020,98.530,1.470,6.33,240,0.882,bicubic +hardcorenas_f,92.980,7.020,98.620,1.380,8.20,224,0.875,bilinear +hardcorenas_e,92.950,7.050,98.570,1.430,8.07,224,0.875,bilinear +hrnet_w32,92.950,7.050,98.840,1.160,41.23,224,0.875,bilinear +efficientnet_es,92.910,7.090,98.690,1.310,5.44,224,0.875,bicubic +gluon_resnet50_v1c,92.910,7.090,98.710,1.290,25.58,224,0.875,bicubic +pit_xs_224,92.910,7.090,98.780,1.220,10.62,224,0.900,bicubic +tv_resnext50_32x4d,92.900,7.100,98.720,1.280,25.03,224,0.875,bilinear +inception_v3,92.900,7.100,98.330,1.670,23.83,299,0.875,bicubic +densenet161,92.900,7.100,98.810,1.190,28.68,224,0.875,bicubic +tv_resnet101,92.880,7.120,98.660,1.340,44.55,224,0.875,bilinear +resmlp_12_distilled_224,92.870,7.130,98.630,1.370,15.35,224,0.875,bicubic +tf_efficientnet_cc_b0_8e,92.870,7.130,98.460,1.540,24.01,224,0.875,bicubic +coat_lite_tiny,92.850,7.150,98.640,1.360,5.72,224,0.900,bicubic +rexnet_100,92.850,7.150,98.620,1.380,4.80,224,0.875,bicubic +tf_efficientnet_cc_b0_4e,92.840,7.160,98.440,1.560,13.31,224,0.875,bicubic +seresnext26t_32x4d,92.820,7.180,98.560,1.440,16.81,224,0.875,bicubic +res2net50_48w_2s,92.790,7.210,98.470,1.530,25.29,224,0.875,bilinear +hrnet_w18,92.760,7.240,98.660,1.340,21.30,224,0.875,bilinear +densenet201,92.690,7.310,98.650,1.350,20.01,224,0.875,bicubic +repvgg_a2,92.680,7.320,98.520,1.480,28.21,224,0.875,bilinear +gmixer_24_224,92.680,7.320,98.280,1.720,24.72,224,0.875,bicubic +dla60,92.670,7.330,98.630,1.370,22.04,224,0.875,bilinear +legacy_seresnet50,92.670,7.330,98.650,1.350,28.09,224,0.875,bilinear +resnet34d,92.640,7.360,98.420,1.580,21.82,224,0.875,bicubic +mobilenetv2_120d,92.610,7.390,98.510,1.490,5.83,224,0.875,bicubic +tf_efficientnet_b0_ap,92.610,7.390,98.370,1.630,5.29,224,0.875,bicubic +hardcorenas_d,92.600,7.400,98.430,1.570,7.50,224,0.875,bilinear +tf_efficientnet_lite2,92.590,7.410,98.550,1.450,6.09,260,0.890,bicubic +legacy_seresnext26_32x4d,92.570,7.430,98.420,1.580,16.79,224,0.875,bicubic +skresnet34,92.570,7.430,98.520,1.480,22.28,224,0.875,bicubic +gluon_resnet50_v1b,92.560,7.440,98.550,1.450,25.56,224,0.875,bicubic +regnetx_016,92.540,7.460,98.550,1.450,9.19,224,0.875,bicubic +selecsls42b,92.480,7.520,98.440,1.560,32.46,224,0.875,bicubic +efficientnet_b0,92.480,7.520,98.680,1.320,5.29,224,0.875,bicubic +gernet_s,92.440,7.560,98.500,1.500,8.17,224,0.875,bilinear +seresnext26d_32x4d,92.440,7.560,98.540,1.460,16.81,224,0.875,bicubic +densenetblur121d,92.400,7.600,98.410,1.590,8.00,224,0.875,bicubic +tf_efficientnet_b0,92.400,7.600,98.470,1.530,5.29,224,0.875,bicubic +hardcorenas_c,92.330,7.670,98.340,1.660,5.52,224,0.875,bilinear +tf_efficientnet_lite1,92.310,7.690,98.490,1.510,5.42,240,0.882,bicubic +densenet169,92.300,7.700,98.590,1.410,14.15,224,0.875,bicubic +mixnet_m,92.270,7.730,98.350,1.650,5.01,224,0.875,bicubic +mobilenetv3_large_100_miil,92.250,7.750,98.250,1.750,5.48,224,0.875,bilinear +dpn68,92.240,7.760,98.610,1.390,12.61,224,0.875,bicubic +resnet26d,92.230,7.770,98.450,1.550,16.01,224,0.875,bicubic +tf_mixnet_m,92.200,7.800,98.420,1.580,5.01,224,0.875,bicubic +vit_small_patch32_224,92.150,7.850,98.510,1.490,22.88,224,0.900,bicubic +tv_resnet50,92.140,7.860,98.420,1.580,25.56,224,0.875,bilinear +resmlp_12_224,92.120,7.880,98.570,1.430,15.35,224,0.875,bicubic +tf_efficientnet_es,92.100,7.900,98.440,1.560,5.44,224,0.875,bicubic +mobilenetv2_140,92.030,7.970,98.250,1.750,6.11,224,0.875,bicubic +ese_vovnet19b_dw,92.010,7.990,98.510,1.490,6.54,224,0.875,bicubic +densenet121,91.940,8.060,98.280,1.720,7.98,224,0.875,bicubic +hardcorenas_b,91.940,8.060,98.400,1.600,5.18,224,0.875,bilinear +vit_tiny_patch16_224,91.930,8.070,98.340,1.660,5.72,224,0.900,bicubic +regnety_008,91.900,8.100,98.420,1.580,6.26,224,0.875,bicubic +mixnet_s,91.780,8.220,98.300,1.700,4.13,224,0.875,bicubic +vit_tiny_r_s16_p8_384,91.730,8.270,98.430,1.570,6.36,384,1.000,bicubic +efficientnet_es_pruned,91.700,8.300,98.420,1.580,5.44,224,0.875,bicubic +tf_mixnet_s,91.680,8.320,98.240,1.760,4.13,224,0.875,bicubic +repvgg_b0,91.680,8.320,98.450,1.550,15.82,224,0.875,bilinear +semnasnet_100,91.660,8.340,98.270,1.730,3.89,224,0.875,bicubic +hardcorenas_a,91.620,8.380,98.170,1.830,5.26,224,0.875,bilinear +regnety_006,91.570,8.430,98.430,1.570,6.06,224,0.875,bicubic +mobilenetv3_rw,91.550,8.450,98.270,1.730,5.48,224,0.875,bicubic +levit_128s,91.500,8.500,98.400,1.600,7.78,224,0.900,bicubic +legacy_seresnet34,91.480,8.520,98.200,1.800,21.96,224,0.875,bilinear +mobilenetv3_large_100,91.480,8.520,98.320,1.680,5.48,224,0.875,bicubic +resnet26,91.440,8.560,98.280,1.720,16.00,224,0.875,bicubic +tf_mobilenetv3_large_100,91.420,8.580,98.260,1.740,5.48,224,0.875,bilinear +tv_densenet121,91.400,8.600,98.250,1.750,7.98,224,0.875,bicubic +mobilenetv2_110d,91.350,8.650,98.190,1.810,4.52,224,0.875,bicubic +tf_efficientnet_lite0,91.300,8.700,98.090,1.910,4.65,224,0.875,bicubic +fbnetc_100,91.270,8.730,97.830,2.170,5.57,224,0.875,bilinear +efficientnet_lite0,91.260,8.740,98.250,1.750,4.65,224,0.875,bicubic +dla34,91.240,8.760,98.180,1.820,15.74,224,0.875,bilinear +mnasnet_100,91.200,8.800,98.050,1.950,4.38,224,0.875,bicubic +resnet34,91.200,8.800,98.240,1.760,21.80,224,0.875,bilinear +regnetx_008,91.180,8.820,98.380,1.620,7.26,224,0.875,bicubic +hrnet_w18_small_v2,91.170,8.830,98.340,1.660,15.60,224,0.875,bilinear +mixer_b16_224,91.140,8.860,97.400,2.600,59.88,224,0.875,bicubic +resnest14d,91.130,8.870,98.330,1.670,10.61,224,0.875,bilinear +deit_tiny_distilled_patch16_224,91.100,8.900,98.270,1.730,5.91,224,0.900,bicubic +gluon_resnet34_v1b,91.100,8.900,98.180,1.820,21.80,224,0.875,bicubic +swsl_resnet18,91.090,8.910,98.210,1.790,11.69,224,0.875,bilinear +vgg19_bn,91.000,9.000,98.110,1.890,143.68,224,0.875,bilinear +pit_ti_distilled_224,90.900,9.100,98.220,1.780,5.10,224,0.900,bicubic +regnety_004,90.780,9.220,98.080,1.920,4.34,224,0.875,bicubic +regnetx_006,90.760,9.240,98.100,1.900,6.20,224,0.875,bicubic +ssl_resnet18,90.700,9.300,98.020,1.980,11.69,224,0.875,bilinear +spnasnet_100,90.610,9.390,97.950,2.050,4.42,224,0.875,bilinear +vgg16_bn,90.540,9.460,97.990,2.010,138.37,224,0.875,bilinear +convit_tiny,90.530,9.470,98.210,1.790,5.71,224,0.875,bicubic +ghostnet_100,90.440,9.560,97.830,2.170,5.18,224,0.875,bilinear +pit_ti_224,90.420,9.580,98.010,1.990,4.85,224,0.900,bicubic +tf_mobilenetv3_large_075,90.320,9.680,97.870,2.130,3.99,224,0.875,bilinear +tv_resnet34,90.290,9.710,97.980,2.020,21.80,224,0.875,bilinear +skresnet18,90.160,9.840,97.780,2.220,11.96,224,0.875,bicubic +resnet18d,89.990,10.010,97.830,2.170,11.71,224,0.875,bicubic +hrnet_w18_small,89.880,10.120,97.900,2.100,13.19,224,0.875,bilinear +mobilenetv2_100,89.830,10.170,97.830,2.170,3.50,224,0.875,bicubic +vgg19,89.680,10.320,97.550,2.450,143.67,224,0.875,bilinear +deit_tiny_patch16_224,89.620,10.380,97.960,2.040,5.72,224,0.900,bicubic +regnetx_004,89.460,10.540,97.770,2.230,5.16,224,0.875,bicubic +vgg16,89.360,10.640,97.520,2.480,138.36,224,0.875,bilinear +vit_tiny_r_s16_p8_224,89.340,10.660,97.700,2.300,6.34,224,0.900,bicubic +legacy_seresnet18,89.270,10.730,97.680,2.320,11.78,224,0.875,bicubic +vgg13_bn,89.200,10.800,97.530,2.470,133.05,224,0.875,bilinear +tf_mobilenetv3_large_minimal_100,89.180,10.820,97.320,2.680,3.92,224,0.875,bilinear +gluon_resnet18_v1b,88.660,11.340,97.100,2.900,11.69,224,0.875,bicubic +vgg11_bn,88.390,11.610,97.270,2.730,132.87,224,0.875,bilinear +regnety_002,88.200,11.800,97.430,2.570,3.16,224,0.875,bicubic +resnet18,88.150,11.850,97.120,2.880,11.69,224,0.875,bilinear +vgg13,87.570,12.430,97.120,2.880,133.05,224,0.875,bilinear +regnetx_002,87.380,12.620,96.990,3.010,2.68,224,0.875,bicubic +vgg11,87.340,12.660,97.110,2.890,132.86,224,0.875,bilinear +dla60x_c,87.110,12.890,97.140,2.860,1.32,224,0.875,bilinear +mixer_l16_224,86.970,13.030,94.060,5.940,208.20,224,0.875,bicubic +tf_mobilenetv3_small_100,85.960,14.040,96.400,3.600,2.54,224,0.875,bilinear +dla46x_c,85.480,14.520,96.440,3.560,1.07,224,0.875,bilinear +dla46_c,84.660,15.340,96.200,3.800,1.30,224,0.875,bilinear +tf_mobilenetv3_small_075,84.530,15.470,95.890,4.110,2.04,224,0.875,bilinear +tf_mobilenetv3_small_minimal_100,82.670,17.330,95.000,5.000,2.04,224,0.875,bilinear diff --git a/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-r.csv b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-r.csv new file mode 100644 index 0000000000..bf922167be --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-r.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation,top1_diff,top5_diff,rank_diff +ig_resnext101_32x48d,79.650,20.350,89.393,10.607,828.41,224,0.875,bilinear,-17.320,-10.277,+9 +ig_resnext101_32x32d,79.457,20.543,89.183,10.817,468.53,224,0.875,bilinear,-17.323,-10.347,+19 +ig_resnext101_32x16d,78.837,21.163,88.480,11.520,194.03,224,0.875,bilinear,-17.603,-11.060,+37 +tf_efficientnet_l2_ns_475,76.480,23.520,88.653,11.347,480.31,475,0.936,bicubic,-21.270,-11.167,-2 +swsl_resnext101_32x16d,76.303,23.697,87.733,12.267,194.03,224,0.875,bilinear,-19.967,-11.767,+46 +ig_resnext101_32x8d,75.813,24.187,86.200,13.800,88.79,224,0.875,bilinear,-20.117,-13.180,+67 +swsl_resnext101_32x8d,75.590,24.410,86.937,13.063,88.79,224,0.875,bilinear,-20.650,-12.533,+46 +tf_efficientnet_l2_ns,74.650,25.350,87.543,12.457,480.31,800,0.960,bicubic,-23.130,-12.347,-7 +swsl_resnext101_32x4d,72.660,27.340,85.157,14.843,44.18,224,0.875,bilinear,-23.390,-14.373,+56 +swsl_resnext50_32x4d,68.977,31.023,82.810,17.190,25.03,224,0.875,bilinear,-26.643,-16.630,+79 +swsl_resnet50,68.297,31.703,83.313,16.687,25.56,224,0.875,bilinear,-26.903,-16.077,+102 +tf_efficientnet_b7_ns,67.510,32.490,81.383,18.617,66.35,600,0.949,bicubic,-29.690,-18.317,-8 +vit_large_patch16_384,67.053,32.947,78.707,21.293,304.72,384,1.000,bicubic,-30.367,-21.073,-10 +swin_large_patch4_window12_384,66.283,33.717,79.783,20.217,196.74,384,1.000,bicubic,-30.887,-19.897,-9 +tf_efficientnet_b6_ns,65.587,34.413,79.553,20.447,43.04,528,0.942,bicubic,-31.433,-20.157,-7 +vit_large_patch16_224,64.347,35.653,76.190,23.810,304.33,224,0.900,bicubic,-32.363,-23.460,+8 +vit_large_r50_s32_384,64.100,35.900,75.850,24.150,329.09,384,1.000,bicubic,-32.850,-23.860,-4 +swin_large_patch4_window7_224,63.870,36.130,78.180,21.820,196.53,224,0.900,bicubic,-33.080,-21.480,-6 +swin_base_patch4_window12_384,63.470,36.530,78.063,21.937,87.90,384,1.000,bicubic,-33.650,-21.717,-13 +tf_efficientnet_b5_ns,63.047,36.953,77.777,22.223,30.39,456,0.934,bicubic,-33.823,-21.863,-3 +tf_efficientnet_b4_ns,61.230,38.770,76.173,23.827,19.34,380,0.922,bicubic,-35.480,-23.467,+2 +tf_efficientnetv2_l_in21ft1k,60.953,39.047,75.847,24.153,118.52,480,1.000,bicubic,-36.157,-23.863,-15 +vit_base_patch16_384,60.180,39.820,73.843,26.157,86.86,384,1.000,bicubic,-36.840,-25.867,-14 +swin_base_patch4_window7_224,59.537,40.463,74.247,25.753,87.77,224,0.900,bicubic,-37.143,-25.413,+2 +tf_efficientnetv2_m_in21ft1k,58.647,41.353,73.983,26.017,54.14,480,1.000,bicubic,-38.323,-25.627,-14 +vit_large_r50_s32_224,58.633,41.367,71.720,28.280,328.99,224,0.900,bicubic,-37.547,-27.810,+30 +tf_efficientnet_b8_ap,57.830,42.170,72.957,27.043,87.41,672,0.954,bicubic,-38.720,-26.583,+4 +cait_m48_448,57.470,42.530,71.860,28.140,356.46,448,1.000,bicubic,-39.410,-27.760,-13 +cait_m36_384,57.467,42.533,72.313,27.687,271.22,384,1.000,bicubic,-39.363,-27.347,-11 +tf_efficientnet_b3_ns,57.417,42.583,72.387,27.613,12.23,300,0.904,bicubic,-38.683,-27.093,+29 +vit_base_patch16_224,56.823,43.177,70.633,29.367,86.57,224,0.900,bicubic,-39.477,-28.927,+16 +vit_base_r50_s16_384,54.403,45.597,69.560,30.440,98.95,384,1.000,bicubic,-42.047,-30.100,+7 +resnetv2_152x4_bitm,54.320,45.680,70.167,29.833,936.53,480,1.000,bilinear,-42.550,-29.493,-17 +vit_small_r26_s32_384,54.197,45.803,68.757,31.243,36.47,384,1.000,bicubic,-41.863,-30.803,+30 +tf_efficientnet_b5_ap,53.870,46.130,69.160,30.840,30.39,456,0.934,bicubic,-42.210,-30.380,+25 +tf_efficientnet_b2_ns,53.600,46.400,70.270,29.730,9.11,260,0.890,bicubic,-41.920,-29.120,+57 +tf_efficientnet_b6_ap,53.560,46.440,68.550,31.450,43.04,528,0.942,bicubic,-42.810,-31.000,+6 +cait_s36_384,53.550,46.450,68.000,32.000,68.37,384,1.000,bicubic,-43.080,-31.600,-10 +tf_efficientnet_b8,53.410,46.590,69.090,30.910,87.41,672,0.954,bicubic,-43.290,-30.440,-14 +vit_base_patch32_384,53.307,46.693,68.047,31.953,88.30,384,1.000,bicubic,-42.593,-31.393,+34 +tf_efficientnet_b7_ap,53.260,46.740,68.873,31.127,66.35,600,0.949,bicubic,-43.090,-30.717,+4 +tf_efficientnetv2_s_in21ft1k,53.150,46.850,69.000,31.000,21.46,384,1.000,bicubic,-43.320,-30.570,-7 +tf_efficientnet_b4_ap,53.090,46.910,68.210,31.790,19.34,380,0.922,bicubic,-42.400,-31.180,+53 +dm_nfnet_f5,52.870,47.130,67.430,32.570,377.21,544,0.954,bicubic,-43.940,-32.240,-25 +dm_nfnet_f6,52.447,47.553,67.120,32.880,438.36,576,0.956,bicubic,-44.473,-32.600,-31 +tf_efficientnet_b7,52.393,47.607,68.233,31.767,66.35,600,0.949,bicubic,-44.187,-31.277,-17 +tf_efficientnetv2_l,52.377,47.623,67.237,32.763,118.52,480,1.000,bicubic,-44.273,-32.323,-20 +swsl_resnet18,52.327,47.673,70.480,29.520,11.69,224,0.875,bilinear,-38.763,-27.730,+334 +efficientnetv2_rw_m,52.323,47.677,67.210,32.790,53.24,416,1.000,bicubic,-43.947,-32.350,0 +deit_base_distilled_patch16_384,52.257,47.743,67.733,32.267,87.63,384,1.000,bicubic,-44.253,-31.857,-16 +dm_nfnet_f3,52.130,47.870,66.743,33.257,254.92,416,0.940,bicubic,-44.600,-32.887,-29 +resnetv2_152x2_bit_teacher_384,51.937,48.063,68.670,31.330,236.34,384,1.000,bicubic,-44.253,-30.830,+3 +resmlp_big_24_224_in22ft1k,51.903,48.097,68.463,31.537,129.14,224,0.875,bicubic,-44.447,-31.057,-9 +cait_s24_384,51.783,48.217,66.313,33.687,47.06,384,1.000,bicubic,-44.787,-33.237,-24 +resnetv2_152x2_bitm,51.753,48.247,69.250,30.750,236.34,448,1.000,bilinear,-44.767,-30.340,-22 +ecaresnet269d,51.670,48.330,66.047,33.953,102.09,352,1.000,bicubic,-44.790,-33.563,-19 +vit_base_patch16_224_miil,51.557,48.443,65.207,34.793,86.54,224,0.875,bilinear,-44.473,-34.143,+9 +pit_b_distilled_224,51.153,48.847,66.770,33.230,74.79,224,0.900,bicubic,-44.917,-32.610,+3 +dm_nfnet_f4,50.900,49.100,65.557,34.443,316.07,512,0.951,bicubic,-45.880,-34.063,-39 +tf_efficientnet_b1_ns,50.883,49.117,67.910,32.090,7.79,240,0.882,bicubic,-43.977,-31.340,+75 +tf_efficientnetv2_m,50.557,49.443,66.010,33.990,54.14,480,1.000,bicubic,-45.993,-33.560,-29 +efficientnet_b4,50.510,49.490,65.703,34.297,19.34,384,1.000,bicubic,-45.010,-33.637,+32 +resnetv2_101x3_bitm,50.407,49.593,67.790,32.210,387.93,448,1.000,bilinear,-45.843,-31.800,-11 +ssl_resnext101_32x16d,50.257,49.743,66.033,33.967,194.03,224,0.875,bilinear,-45.153,-33.377,+36 +cait_s24_224,50.243,49.757,65.027,34.973,46.92,224,1.000,bicubic,-45.407,-34.363,+22 +eca_nfnet_l2,50.237,49.763,65.450,34.550,56.72,384,1.000,bicubic,-46.213,-34.170,-28 +vit_small_patch16_384,50.160,49.840,65.807,34.193,22.20,384,1.000,bicubic,-45.820,-33.783,+3 +resnest269e,50.153,49.847,64.670,35.330,110.93,416,0.928,bicubic,-45.967,-34.850,-11 +deit_base_distilled_patch16_224,50.063,49.937,66.227,33.773,87.34,224,0.900,bicubic,-45.687,-33.203,+11 +tf_efficientnet_b3_ap,50.057,49.943,65.210,34.790,12.23,300,0.904,bicubic,-44.913,-33.900,+55 +resnest200e,49.873,50.127,64.743,35.257,70.20,320,0.909,bicubic,-46.197,-34.737,-9 +cait_xs24_384,49.527,50.473,64.900,35.100,26.67,384,1.000,bicubic,-46.483,-34.530,-5 +tf_efficientnet_b5,49.510,50.490,65.657,34.343,30.39,456,0.934,bicubic,-46.470,-33.793,-4 +resnetv2_152x2_bit_teacher,49.480,50.520,65.617,34.383,236.34,224,0.875,bicubic,-46.270,-33.823,+5 +resnet200d,49.470,50.530,64.330,35.670,64.69,320,1.000,bicubic,-46.640,-35.130,-17 +resnest101e,49.367,50.633,65.587,34.413,48.28,256,0.875,bilinear,-46.203,-33.683,+15 +resnet152d,49.253,50.747,64.413,35.587,60.21,320,1.000,bicubic,-46.617,-35.017,0 +vit_base_patch32_224,49.253,50.747,64.340,35.660,88.22,224,0.900,bicubic,-45.137,-34.730,+92 +seresnet152d,49.247,50.753,64.170,35.830,66.84,320,1.000,bicubic,-47.063,-35.340,-33 +resmlp_big_24_distilled_224,49.097,50.903,65.470,34.530,129.14,224,0.875,bicubic,-46.773,-33.970,-4 +ssl_resnext101_32x8d,49.067,50.933,65.480,34.520,88.79,224,0.875,bilinear,-46.273,-33.840,+25 +repvgg_b3,48.917,51.083,64.887,35.113,123.09,224,0.875,bilinear,-45.633,-34.023,+80 +resnetrs420,48.857,51.143,63.427,36.573,191.89,416,1.000,bicubic,-47.543,-36.113,-42 +efficientnetv2_rw_s,48.603,51.397,63.840,36.160,23.94,384,1.000,bicubic,-47.107,-35.540,0 +efficientnet_b3,48.563,51.437,64.250,35.750,12.23,320,1.000,bicubic,-46.577,-34.960,+32 +ecaresnet101d,48.527,51.473,64.100,35.900,44.57,224,0.875,bicubic,-46.633,-35.130,+28 +dm_nfnet_f2,48.373,51.627,63.233,36.767,193.78,352,0.920,bicubic,-48.087,-36.307,-51 +vit_small_r26_s32_224,48.363,51.637,63.797,36.203,36.43,224,0.900,bicubic,-46.767,-35.423,+30 +repvgg_b3g4,48.310,51.690,64.800,35.200,83.83,224,0.875,bilinear,-46.180,-34.220,+75 +vit_large_patch32_384,48.250,51.750,61.830,38.170,306.63,384,1.000,bicubic,-46.990,-37.490,+19 +convit_base,48.217,51.783,63.000,37.000,86.54,224,0.875,bicubic,-46.883,-36.140,+29 +resnetrs350,48.050,51.950,62.653,37.347,163.96,384,1.000,bicubic,-48.190,-36.937,-38 +twins_svt_large,47.947,52.053,62.907,37.093,99.27,224,0.900,bicubic,-47.773,-36.583,-11 +mixer_b16_224_miil,47.790,52.210,63.400,36.600,59.88,224,0.875,bilinear,-47.090,-35.870,+39 +repvgg_b2g4,47.787,52.213,64.390,35.610,61.76,224,0.875,bilinear,-46.033,-34.540,+129 +eca_nfnet_l1,47.650,52.350,62.763,37.237,41.41,320,1.000,bicubic,-48.290,-36.727,-24 +resnetv2_50x3_bitm,47.593,52.407,65.603,34.397,217.32,448,1.000,bilinear,-48.677,-34.027,-47 +pit_s_distilled_224,47.543,52.457,63.493,36.507,24.04,224,0.900,bicubic,-47.187,-35.697,+43 +resnest50d_4s2x40d,47.483,52.517,63.807,36.193,30.42,224,0.875,bicubic,-47.227,-35.323,+46 +efficientnet_b3_pruned,47.447,52.553,62.793,37.207,9.86,300,0.904,bicubic,-47.133,-36.387,+59 +tresnet_m,47.230,52.770,61.993,38.007,31.39,224,0.875,bilinear,-48.150,-37.157,+3 +tf_efficientnet_b6,47.213,52.787,63.110,36.890,43.04,528,0.942,bicubic,-49.077,-36.410,-54 +ssl_resnext101_32x4d,47.177,52.823,63.367,36.633,44.18,224,0.875,bilinear,-47.983,-35.933,+12 +resnetrs270,47.107,52.893,62.010,37.990,129.86,352,1.000,bicubic,-48.953,-37.480,-41 +tf_efficientnet_b4,47.083,52.917,62.867,37.133,19.34,380,0.922,bicubic,-48.507,-36.463,-15 +resnet101d,46.893,53.107,62.317,37.683,44.57,320,1.000,bicubic,-48.857,-36.963,-28 +resnetrs200,46.837,53.163,62.487,37.513,93.21,320,1.000,bicubic,-49.153,-36.953,-39 +gluon_seresnext101_64x4d,46.677,53.323,61.303,38.697,88.23,224,0.875,bicubic,-47.973,-37.677,+43 +twins_pcpvt_large,46.637,53.363,62.240,37.760,60.99,224,0.900,bicubic,-49.083,-37.050,-28 +dm_nfnet_f1,46.547,53.453,61.407,38.593,132.63,320,0.910,bicubic,-49.843,-38.063,-68 +tresnet_xl,46.283,53.717,61.943,38.057,78.44,224,0.875,bilinear,-48.777,-37.317,+12 +deit_small_distilled_patch16_224,46.160,53.840,62.417,37.583,22.44,224,0.900,bicubic,-48.430,-36.683,+43 +regnety_160,46.153,53.847,61.837,38.163,83.59,288,1.000,bicubic,-49.727,-37.723,-38 +gernet_m,46.150,53.850,62.700,37.300,21.14,224,0.875,bilinear,-48.400,-36.230,+47 +resnest50d_1s4x24d,46.083,53.917,62.377,37.623,25.68,224,0.875,bicubic,-48.307,-36.683,+56 +tf_efficientnet_b0_ns,46.047,53.953,63.253,36.747,5.29,224,0.875,bicubic,-47.693,-35.727,+119 +resnet51q,46.027,53.973,60.910,39.090,35.70,288,1.000,bilinear,-49.173,-38.370,-5 +vit_small_patch16_224,45.990,54.010,61.820,38.180,22.05,224,0.900,bicubic,-48.890,-37.260,+14 +resnest50d,45.937,54.063,62.623,37.377,27.48,224,0.875,bilinear,-48.683,-36.407,+33 +twins_pcpvt_base,45.893,54.107,61.337,38.663,43.83,224,0.900,bicubic,-49.567,-38.053,-23 +regnety_032,45.893,54.107,61.537,38.463,19.44,288,1.000,bicubic,-49.577,-37.783,-23 +levit_384,45.877,54.123,61.693,38.307,39.13,224,0.900,bicubic,-49.333,-37.467,-11 +twins_svt_base,45.877,54.123,60.967,39.033,56.07,224,0.900,bicubic,-49.693,-38.263,-31 +gluon_seresnext101_32x4d,45.590,54.410,61.143,38.857,48.96,224,0.875,bicubic,-48.860,-37.947,+42 +dm_nfnet_f0,45.483,54.517,60.983,39.017,71.49,256,0.900,bicubic,-50.207,-38.347,-39 +gluon_resnet152_v1d,45.430,54.570,60.077,39.923,60.21,224,0.875,bicubic,-49.010,-38.933,+41 +nfnet_l0,45.420,54.580,62.080,37.920,35.07,288,1.000,bicubic,-49.970,-37.340,-25 +ssl_resnext50_32x4d,45.407,54.593,62.047,37.953,25.03,224,0.875,bilinear,-49.293,-37.193,+18 +resnetv2_50x1_bit_distilled,45.393,54.607,62.303,37.697,25.55,224,0.875,bicubic,-49.997,-37.127,-26 +tresnet_xl_448,45.223,54.777,61.437,38.563,78.44,448,0.875,bilinear,-50.287,-37.903,-35 +nasnetalarge,45.210,54.790,57.883,42.117,88.75,331,0.911,bicubic,-49.940,-41.247,-15 +convit_small,45.203,54.797,60.510,39.490,27.78,224,0.875,bicubic,-49.717,-38.600,-3 +swin_small_patch4_window7_224,45.163,54.837,60.330,39.670,49.61,224,0.900,bicubic,-50.557,-39.040,-50 +tf_efficientnet_b3,45.107,54.893,60.650,39.350,12.23,300,0.904,bicubic,-49.803,-38.460,-4 +rexnet_200,45.047,54.953,62.317,37.683,16.37,224,0.875,bicubic,-49.613,-36.773,+14 +resnetrs152,44.943,55.057,59.713,40.287,86.62,320,1.000,bicubic,-51.017,-39.667,-65 +ecaresnetlight,44.890,55.110,60.770,39.230,30.16,224,0.875,bicubic,-49.250,-38.180,+54 +deit_base_patch16_224,44.870,55.130,59.177,40.823,86.57,224,0.900,bicubic,-50.140,-39.803,-14 +deit_base_patch16_384,44.777,55.223,59.617,40.383,86.86,384,1.000,bicubic,-50.873,-39.623,-51 +cait_xxs36_384,44.773,55.227,59.380,40.620,17.37,384,1.000,bicubic,-50.447,-39.940,-30 +resmlp_36_distilled_224,44.757,55.243,61.073,38.927,44.69,224,0.875,bicubic,-49.813,-38.087,+19 +gernet_l,44.740,55.260,58.943,41.057,31.08,256,0.875,bilinear,-50.190,-40.257,-15 +resmlp_24_distilled_224,44.707,55.293,61.467,38.533,30.02,224,0.875,bicubic,-49.623,-37.623,+34 +tf_efficientnet_b2_ap,44.700,55.300,60.680,39.320,9.11,260,0.890,bicubic,-49.570,-38.270,+37 +ens_adv_inception_resnet_v2,44.393,55.607,58.117,41.883,55.84,299,0.897,bicubic,-49.737,-40.673,+48 +tresnet_l,44.363,55.637,59.953,40.047,55.99,224,0.875,bilinear,-50.537,-39.077,-15 +gluon_resnext101_32x4d,44.290,55.710,59.090,40.910,44.18,224,0.875,bicubic,-49.830,-39.840,+49 +wide_resnet50_2,44.177,55.823,59.727,40.273,68.88,224,0.875,bicubic,-50.493,-39.323,0 +cspresnext50,44.147,55.853,60.533,39.467,20.57,224,0.875,bilinear,-49.613,-38.307,+81 +resnetv2_101x1_bitm,44.127,55.873,61.983,38.017,44.54,448,1.000,bilinear,-51.193,-37.387,-43 +seresnext50_32x4d,44.127,55.873,59.490,40.510,27.56,224,0.875,bicubic,-50.693,-39.640,-15 +gluon_resnet152_v1s,44.073,55.927,58.703,41.297,60.32,224,0.875,bicubic,-50.647,-40.357,-9 +pit_b_224,44.070,55.930,58.017,41.983,73.76,224,0.900,bicubic,-50.720,-40.803,-16 +ssl_resnet50,44.010,55.990,61.887,38.113,25.56,224,0.875,bilinear,-50.300,-37.263,+24 +inception_resnet_v2,44.003,55.997,57.907,42.093,55.84,299,0.897,bicubic,-50.337,-40.893,+21 +pnasnet5large,43.950,56.050,56.730,43.270,86.06,331,0.911,bicubic,-51.410,-42.400,-51 +pit_s_224,43.890,56.110,58.627,41.373,23.46,224,0.900,bicubic,-50.700,-40.303,-1 +gluon_resnext101_64x4d,43.877,56.123,58.710,41.290,83.46,224,0.875,bicubic,-50.473,-40.170,+17 +coat_lite_small,43.823,56.177,57.147,42.853,19.84,224,0.900,bicubic,-51.257,-41.873,-38 +tnt_s_patch16_224,43.773,56.227,59.197,40.803,23.76,224,0.900,bicubic,-50.807,-39.873,-2 +cait_xxs36_224,43.760,56.240,58.720,41.280,17.30,224,1.000,bicubic,-50.180,-40.200,+51 +ecaresnet50d,43.750,56.250,60.387,39.613,25.58,224,0.875,bicubic,-50.440,-38.633,+25 +ecaresnet101d_pruned,43.737,56.263,59.607,40.393,24.88,224,0.875,bicubic,-50.713,-39.493,+2 +tf_efficientnetv2_s,43.710,56.290,58.597,41.403,21.46,384,1.000,bicubic,-52.000,-40.803,-79 +rexnet_150,43.690,56.310,60.897,39.103,9.73,224,0.875,bicubic,-50.580,-38.183,+15 +pit_xs_distilled_224,43.663,56.337,60.703,39.297,11.00,224,0.900,bicubic,-49.577,-38.147,+115 +gluon_resnet101_v1d,43.440,56.560,58.613,41.387,44.57,224,0.875,bicubic,-50.730,-40.327,+21 +ecaresnet50t,43.407,56.593,59.300,40.700,25.57,320,0.950,bicubic,-51.663,-39.990,-46 +gluon_resnet101_v1s,43.363,56.637,58.503,41.497,44.67,224,0.875,bicubic,-50.807,-40.507,+20 +cspdarknet53,43.357,56.643,59.430,40.570,27.64,256,0.887,bilinear,-50.733,-39.580,+28 +dpn68b,43.287,56.713,58.673,41.327,12.61,224,0.875,bicubic,-50.333,-40.277,+81 +visformer_small,43.253,56.747,57.993,42.007,40.22,224,0.900,bicubic,-51.707,-41.217,-46 +eca_nfnet_l0,43.233,56.767,59.913,40.087,24.14,288,1.000,bicubic,-52.217,-39.477,-74 +vit_small_patch32_384,43.143,56.857,59.293,40.707,22.92,384,1.000,bicubic,-51.447,-39.847,-17 +resnest26d,43.140,56.860,60.623,39.377,17.07,224,0.875,bilinear,-50.100,-38.127,+107 +twins_pcpvt_small,43.090,56.910,58.873,41.127,24.11,224,0.900,bicubic,-51.510,-40.277,-22 +resmlp_36_224,43.050,56.950,59.310,40.690,44.69,224,0.875,bicubic,-50.600,-39.640,+69 +dpn131,43.047,56.953,57.440,42.560,79.25,224,0.875,bicubic,-50.713,-41.360,+53 +cspresnet50,43.030,56.970,59.153,40.847,21.62,256,0.887,bilinear,-50.830,-39.717,+40 +tf_efficientnet_lite4,42.967,57.033,57.620,42.380,13.01,380,0.920,bilinear,-51.903,-41.470,-46 +twins_svt_small,42.923,57.077,58.453,41.547,24.06,224,0.900,bicubic,-51.847,-40.627,-42 +gluon_resnet152_v1b,42.903,57.097,57.750,42.250,60.19,224,0.875,bicubic,-51.127,-40.990,+20 +dpn107,42.857,57.143,57.367,42.633,86.92,224,0.875,bicubic,-51.103,-41.473,+26 +levit_256,42.823,57.177,57.897,42.103,18.89,224,0.900,bicubic,-51.577,-41.163,-16 +tf_efficientnet_b1_ap,42.803,57.197,58.813,41.187,7.79,240,0.882,bicubic,-50.827,-39.987,+64 +gluon_resnet152_v1c,42.800,57.200,57.737,42.263,60.21,224,0.875,bicubic,-51.080,-41.063,+30 +gluon_xception65,42.793,57.207,58.820,41.180,39.92,299,0.903,bicubic,-51.217,-40.200,+18 +tresnet_l_448,42.753,57.247,58.947,41.053,55.99,448,0.875,bilinear,-52.657,-40.353,-87 +resnet50d,42.707,57.293,58.697,41.303,25.58,224,0.875,bicubic,-51.363,-40.223,+11 +gluon_seresnext50_32x4d,42.683,57.317,58.710,41.290,27.56,224,0.875,bicubic,-51.487,-40.200,0 +resnext101_32x8d,42.557,57.443,58.317,41.683,88.79,224,0.875,bilinear,-51.213,-40.633,+38 +nf_resnet50,42.510,57.490,59.520,40.480,25.56,288,0.940,bicubic,-51.890,-39.550,-23 +seresnet50,42.510,57.490,58.667,41.333,28.09,224,0.875,bicubic,-51.570,-40.303,+6 +resnetrs101,42.437,57.563,57.300,42.700,63.62,288,0.940,bicubic,-52.813,-41.910,-86 +tf_efficientnetv2_b3,42.313,57.687,57.940,42.060,14.36,300,0.904,bicubic,-52.807,-41.260,-76 +dpn98,42.280,57.720,56.880,43.120,61.57,224,0.875,bicubic,-51.660,-42.040,+17 +deit_small_patch16_224,42.263,57.737,58.020,41.980,22.05,224,0.900,bicubic,-51.737,-40.940,+10 +tf_efficientnet_cc_b1_8e,42.233,57.767,58.420,41.580,39.72,240,0.882,bicubic,-51.337,-40.270,+59 +legacy_senet154,42.207,57.793,56.597,43.403,115.09,224,0.875,bilinear,-52.523,-42.503,-59 +cait_xxs24_384,42.187,57.813,57.460,42.540,12.03,384,1.000,bicubic,-52.733,-41.680,-72 +tf_efficientnet_b2,42.120,57.880,58.197,41.803,9.11,260,0.890,bicubic,-52.090,-40.853,-17 +gluon_resnext50_32x4d,42.043,57.957,57.667,42.333,25.03,224,0.875,bicubic,-51.607,-41.023,+43 +resnet50,42.013,57.987,56.000,44.000,25.56,224,0.875,bicubic,-51.447,-42.600,+63 +ecaresnet50d_pruned,41.953,58.047,58.217,41.783,19.94,224,0.875,bicubic,-51.867,-40.783,+19 +efficientnet_b2,41.933,58.067,58.300,41.700,9.11,288,1.000,bicubic,-52.437,-40.750,-32 +dla102x2,41.647,58.353,57.967,42.033,41.28,224,0.875,bilinear,-52.353,-41.063,+2 +hrnet_w64,41.637,58.363,57.130,42.870,128.06,224,0.875,bilinear,-52.193,-41.800,+15 +gluon_senet154,41.627,58.373,56.373,43.627,115.09,224,0.875,bicubic,-53.083,-42.597,-64 +inception_v4,41.577,58.423,55.383,44.617,42.68,299,0.875,bicubic,-52.803,-43.437,-37 +efficientnet_el,41.497,58.503,58.303,41.697,10.59,300,0.904,bicubic,-53.173,-40.827,-63 +efficientnet_em,41.493,58.507,58.877,41.123,6.90,240,0.882,bicubic,-52.247,-40.053,+23 +tf_efficientnet_cc_b0_8e,41.487,58.513,57.377,42.623,24.01,224,0.875,bicubic,-51.383,-41.083,+97 +swin_tiny_patch4_window7_224,41.457,58.543,57.303,42.697,28.29,224,0.900,bicubic,-53.163,-41.817,-60 +resnext50_32x4d,41.443,58.557,56.997,43.003,25.03,224,0.875,bicubic,-52.397,-41.833,+7 +cait_xxs24_224,41.383,58.617,57.527,42.473,11.96,224,1.000,bicubic,-52.107,-41.243,+49 +tv_resnet152,41.327,58.673,57.520,42.480,60.19,224,0.875,bilinear,-51.913,-41.300,+64 +xception71,41.270,58.730,55.873,44.127,42.34,299,0.903,bicubic,-52.620,-43.127,-3 +dpn92,41.267,58.733,56.333,43.667,37.67,224,0.875,bicubic,-52.923,-42.597,-32 +adv_inception_v3,41.263,58.737,56.317,43.683,23.83,299,0.875,bicubic,-51.747,-42.173,+75 +gernet_s,41.247,58.753,58.830,41.170,8.17,224,0.875,bilinear,-51.193,-39.670,+112 +resnetblur50,41.053,58.947,57.077,42.923,25.56,224,0.875,bicubic,-52.657,-41.723,+16 +nf_regnet_b1,41.013,58.987,58.120,41.880,10.22,288,0.900,bicubic,-52.867,-40.970,-4 +gluon_resnet50_v1d,40.970,59.030,57.137,42.863,25.58,224,0.875,bicubic,-52.560,-41.573,+37 +gluon_inception_v3,40.907,59.093,55.617,44.383,23.83,299,0.875,bicubic,-52.633,-43.213,+34 +ese_vovnet39b,40.867,59.133,56.947,43.053,24.57,224,0.875,bicubic,-52.983,-41.953,-5 +levit_192,40.847,59.153,56.687,43.313,10.95,224,0.900,bicubic,-52.863,-42.113,+14 +regnety_320,40.813,59.187,56.117,43.883,145.05,224,0.875,bicubic,-53.707,-43.053,-64 +resnet34d,40.810,59.190,56.530,43.470,21.82,224,0.875,bicubic,-51.830,-41.890,+93 +xception,40.763,59.237,56.387,43.613,22.86,299,0.897,bicubic,-52.877,-42.383,+18 +skresnext50_32x4d,40.700,59.300,56.023,43.977,27.48,224,0.875,bicubic,-53.250,-42.797,-20 +gluon_resnet101_v1b,40.680,59.320,56.117,43.883,44.55,224,0.875,bicubic,-53.080,-42.583,+1 +hrnet_w40,40.660,59.340,56.753,43.247,57.56,224,0.875,bilinear,-53.050,-42.067,+9 +resmlp_24_224,40.653,59.347,56.573,43.427,30.02,224,0.875,bicubic,-52.787,-42.237,+37 +repvgg_b1,40.593,59.407,57.837,42.163,57.42,224,0.875,bilinear,-52.817,-40.953,+39 +tf_efficientnet_lite3,40.563,59.437,56.477,43.523,8.20,300,0.904,bilinear,-53.567,-42.483,-40 +tresnet_m_448,40.530,59.470,56.700,43.300,31.39,448,0.875,bilinear,-54.130,-42.450,-86 +pit_xs_224,40.497,59.503,56.530,43.470,10.62,224,0.900,bicubic,-52.413,-42.250,+66 +dla169,40.493,59.507,57.263,42.737,53.39,224,0.875,bilinear,-53.307,-41.647,-11 +repvgg_b2,40.467,59.533,57.780,42.220,89.02,224,0.875,bilinear,-53.123,-41.290,+15 +regnetx_320,40.443,59.557,55.660,44.340,107.81,224,0.875,bicubic,-53.767,-43.370,-55 +coat_mini,40.420,59.580,55.167,44.833,10.34,224,0.900,bicubic,-54.350,-43.783,-103 +skresnet34,40.397,59.603,56.737,43.263,22.28,224,0.875,bicubic,-52.173,-41.783,+85 +efficientnet_el_pruned,40.390,59.610,56.903,43.097,10.59,300,0.904,bicubic,-53.700,-42.077,-46 +efficientnet_b2_pruned,40.383,59.617,56.537,43.463,8.31,260,0.890,bicubic,-53.417,-42.303,-18 +coat_lite_mini,40.360,59.640,55.717,44.283,11.01,224,0.900,bicubic,-53.090,-42.983,+23 +legacy_seresnext101_32x4d,40.360,59.640,54.817,45.183,48.96,224,0.875,bilinear,-53.770,-44.153,-52 +wide_resnet101_2,40.360,59.640,55.780,44.220,126.89,224,0.875,bilinear,-53.370,-43.030,-11 +tf_efficientnet_b0_ap,40.337,59.663,56.787,43.213,5.29,224,0.875,bicubic,-52.273,-41.583,+75 +xception65,40.273,59.727,55.283,44.717,39.92,299,0.903,bicubic,-53.487,-43.577,-16 +regnetx_160,40.270,59.730,56.050,43.950,54.28,224,0.875,bicubic,-53.610,-42.690,-33 +densenet201,40.267,59.733,56.710,43.290,20.01,224,0.875,bicubic,-52.423,-41.940,+65 +resnext50d_32x4d,40.170,59.830,55.487,44.513,25.05,224,0.875,bicubic,-53.640,-43.253,-27 +hrnet_w48,40.093,59.907,56.640,43.360,77.47,224,0.875,bilinear,-53.937,-42.400,-50 +legacy_seresnet152,40.043,59.957,55.820,44.180,66.82,224,0.875,bilinear,-53.397,-43.030,+15 +hrnet_w30,40.030,59.970,57.093,42.907,37.71,224,0.875,bilinear,-53.340,-41.737,+20 +regnetx_080,40.000,60.000,55.977,44.023,39.57,224,0.875,bicubic,-53.790,-42.933,-28 +tf_efficientnet_b1,39.977,60.023,56.137,43.863,7.79,240,0.882,bicubic,-53.733,-42.673,-18 +gluon_resnet101_v1c,39.953,60.047,55.300,44.700,44.57,224,0.875,bicubic,-53.737,-43.460,-16 +resmlp_12_distilled_224,39.843,60.157,57.440,42.560,15.35,224,0.875,bicubic,-53.027,-41.190,+49 +tf_efficientnetv2_b0,39.787,60.213,56.283,43.717,7.14,224,0.875,bicubic,-53.273,-42.417,+28 +res2net101_26w_4s,39.717,60.283,54.550,45.450,45.21,224,0.875,bilinear,-53.803,-44.050,0 +regnetx_120,39.687,60.313,55.633,44.367,46.11,224,0.875,bicubic,-54.583,-43.557,-83 +hrnet_w44,39.677,60.323,55.333,44.667,67.06,224,0.875,bilinear,-53.943,-43.367,-13 +vit_small_patch32_224,39.667,60.333,55.253,44.747,22.88,224,0.900,bicubic,-52.483,-43.257,+80 +densenet161,39.620,60.380,56.133,43.867,28.68,224,0.875,bicubic,-53.280,-42.587,+41 +resmlp_big_24_224,39.620,60.380,54.817,45.183,129.14,224,0.875,bicubic,-54.640,-44.003,-84 +mixnet_xl,39.617,60.383,55.887,44.113,11.90,224,0.875,bicubic,-54.613,-42.933,-84 +xception41,39.610,60.390,55.037,44.963,26.97,299,0.903,bicubic,-53.870,-43.713,-3 +res2net50_26w_8s,39.603,60.397,54.550,45.450,48.40,224,0.875,bilinear,-53.847,-44.230,-2 +tf_efficientnetv2_b1,39.570,60.430,55.343,44.657,8.14,240,0.882,bicubic,-54.140,-43.447,-32 +dla102x,39.553,60.447,56.323,43.677,26.31,224,0.875,bilinear,-53.977,-42.527,-12 +rexnet_130,39.487,60.513,56.640,43.360,7.56,224,0.875,bicubic,-54.183,-42.070,-28 +hrnet_w32,39.463,60.537,56.123,43.877,41.23,224,0.875,bilinear,-53.487,-42.717,+27 +resnetv2_50x1_bitm,39.440,60.560,57.847,42.153,25.55,448,1.000,bilinear,-55.290,-41.333,-132 +levit_128,39.433,60.567,55.350,44.650,9.21,224,0.900,bicubic,-53.617,-43.340,+14 +regnety_064,39.403,60.597,55.773,44.227,30.58,224,0.875,bicubic,-54.737,-43.257,-84 +densenetblur121d,39.380,60.620,56.640,43.360,8.00,224,0.875,bicubic,-53.020,-41.770,+57 +regnety_120,39.347,60.653,55.277,44.723,51.82,224,0.875,bicubic,-54.663,-43.753,-72 +tv_resnet101,39.307,60.693,55.803,44.197,44.55,224,0.875,bilinear,-53.573,-42.857,+28 +tf_efficientnet_el,39.303,60.697,55.387,44.613,10.59,300,0.904,bicubic,-55.057,-43.713,-106 +tf_inception_v3,39.237,60.763,54.300,45.700,23.83,299,0.875,bicubic,-53.963,-44.180,+2 +gluon_resnet50_v1s,39.233,60.767,55.010,44.990,25.68,224,0.875,bicubic,-54.357,-43.830,-29 +tf_efficientnetv2_b2,39.180,60.820,54.570,45.430,10.10,260,0.890,bicubic,-54.890,-44.360,-82 +densenet169,39.167,60.833,55.843,44.157,14.15,224,0.875,bicubic,-53.133,-42.747,+54 +legacy_seresnet101,39.037,60.963,55.003,44.997,49.33,224,0.875,bilinear,-54.223,-43.737,-8 +efficientnet_b1_pruned,39.010,60.990,55.647,44.353,6.33,240,0.882,bicubic,-53.970,-42.883,+11 +repvgg_b1g4,38.990,61.010,56.350,43.650,39.97,224,0.875,bilinear,-54.040,-42.470,+5 +inception_v3,38.960,61.040,53.853,46.147,23.83,299,0.875,bicubic,-53.940,-44.477,+17 +dpn68,38.933,61.067,54.933,45.067,12.61,224,0.875,bicubic,-53.307,-43.677,+52 +regnety_080,38.917,61.083,55.213,44.787,39.18,224,0.875,bicubic,-54.973,-43.737,-75 +legacy_seresnext50_32x4d,38.877,61.123,54.593,45.407,27.56,224,0.875,bilinear,-54.553,-44.207,-20 +dla102,38.833,61.167,55.323,44.677,33.27,224,0.875,bilinear,-54.427,-43.457,-16 +regnety_040,38.820,61.180,55.557,44.443,20.65,224,0.875,bicubic,-54.800,-43.403,-42 +densenet121,38.783,61.217,56.273,43.727,7.98,224,0.875,bicubic,-53.157,-42.007,+56 +res2net50_14w_8s,38.710,61.290,54.077,45.923,25.06,224,0.875,bilinear,-54.320,-44.633,-4 +regnetx_040,38.703,61.297,55.340,44.660,22.12,224,0.875,bicubic,-54.977,-43.600,-53 +res2net50_26w_6s,38.687,61.313,53.743,46.257,37.05,224,0.875,bilinear,-54.903,-45.007,-42 +regnetx_032,38.680,61.320,55.157,44.843,15.30,224,0.875,bicubic,-54.570,-43.573,-19 +selecsls60,38.623,61.377,55.630,44.370,30.67,224,0.875,bicubic,-54.387,-43.200,-4 +dla60x,38.617,61.383,55.383,44.617,17.35,224,0.875,bilinear,-54.573,-43.327,-16 +tf_efficientnet_b0,38.600,61.400,55.957,44.043,5.29,224,0.875,bicubic,-53.800,-42.513,+34 +dla60_res2net,38.590,61.410,54.560,45.440,20.85,224,0.875,bilinear,-54.790,-44.300,-28 +selecsls60b,38.573,61.427,55.307,44.693,32.77,224,0.875,bicubic,-54.927,-43.533,-40 +repvgg_a2,38.563,61.437,55.770,44.230,28.21,224,0.875,bilinear,-54.117,-42.510,+13 +hardcorenas_f,38.500,61.500,55.657,44.343,8.20,224,0.875,bilinear,-54.480,-42.963,-7 +dla60_res2next,38.450,61.550,54.950,45.050,17.03,224,0.875,bilinear,-55.120,-43.850,-50 +resmlp_12_224,38.443,61.557,56.327,43.673,15.35,224,0.875,bicubic,-53.677,-42.243,+39 +regnetx_064,38.430,61.570,54.990,45.010,26.21,224,0.875,bicubic,-55.200,-44.060,-60 +tf_efficientnet_cc_b0_4e,38.413,61.587,55.150,44.850,13.31,224,0.875,bicubic,-54.427,-43.290,+3 +gluon_resnet50_v1b,38.407,61.593,54.833,45.167,25.56,224,0.875,bicubic,-54.153,-43.717,+18 +hrnet_w18,38.277,61.723,55.643,44.357,21.30,224,0.875,bilinear,-54.483,-43.017,+4 +mixnet_l,38.160,61.840,54.757,45.243,7.33,224,0.875,bicubic,-55.100,-43.943,-34 +hardcorenas_e,38.137,61.863,55.173,44.827,8.07,224,0.875,bilinear,-54.813,-43.397,-14 +efficientnet_b1,38.087,61.913,54.010,45.990,7.79,256,1.000,bicubic,-54.943,-44.690,-21 +coat_lite_tiny,38.070,61.930,53.453,46.547,5.72,224,0.900,bicubic,-54.780,-45.187,-5 +gmixer_24_224,38.050,61.950,52.083,47.917,24.72,224,0.875,bicubic,-54.630,-46.437,+2 +resnetrs50,37.957,62.043,53.310,46.690,35.69,224,0.910,bicubic,-56.063,-45.540,-113 +hardcorenas_c,37.883,62.117,55.717,44.283,5.52,224,0.875,bilinear,-54.447,-42.623,+18 +gluon_resnet50_v1c,37.843,62.157,54.123,45.877,25.58,224,0.875,bicubic,-55.067,-44.587,-17 +res2net50_26w_4s,37.827,62.173,53.073,46.927,25.70,224,0.875,bilinear,-55.353,-45.597,-35 +efficientnet_es,37.770,62.230,54.967,45.033,5.44,224,0.875,bicubic,-55.140,-43.723,-20 +resnest14d,37.767,62.233,56.470,43.530,10.61,224,0.875,bilinear,-53.363,-41.860,+57 +tv_resnext50_32x4d,37.750,62.250,54.113,45.887,25.03,224,0.875,bilinear,-55.150,-44.697,-19 +ecaresnet26t,37.650,62.350,54.350,45.650,16.01,320,0.950,bicubic,-56.290,-44.540,-113 +hardcorenas_d,37.550,62.450,54.723,45.277,7.50,224,0.875,bilinear,-55.050,-43.707,-1 +res2next50,37.477,62.523,52.853,47.147,24.67,224,0.875,bilinear,-55.673,-45.807,-39 +resnet34,37.443,62.557,54.297,45.703,21.80,224,0.875,bilinear,-53.757,-43.943,+48 +pit_ti_distilled_224,37.337,62.663,55.137,44.863,5.10,224,0.900,bicubic,-53.563,-43.083,+56 +hardcorenas_b,37.243,62.757,55.073,44.927,5.18,224,0.875,bilinear,-54.697,-43.327,+22 +mobilenetv3_large_100_miil,37.210,62.790,53.513,46.487,5.48,224,0.875,bilinear,-55.040,-44.737,+10 +res2net50_48w_2s,37.117,62.883,53.333,46.667,25.29,224,0.875,bilinear,-55.673,-45.137,-17 +dla60,37.073,62.927,54.200,45.800,22.04,224,0.875,bilinear,-55.597,-44.430,-13 +rexnet_100,37.063,62.937,54.020,45.980,4.80,224,0.875,bicubic,-55.787,-44.600,-22 +regnety_016,37.017,62.983,54.093,45.907,11.20,224,0.875,bicubic,-55.983,-44.587,-38 +tf_mixnet_l,36.987,63.013,52.583,47.417,7.33,224,0.875,bicubic,-56.053,-45.957,-45 +legacy_seresnet50,36.873,63.127,53.487,46.513,28.09,224,0.875,bilinear,-55.797,-45.163,-16 +tv_densenet121,36.810,63.190,54.033,45.967,7.98,224,0.875,bicubic,-54.590,-44.217,+31 +tf_efficientnet_lite2,36.807,63.193,53.320,46.680,6.09,260,0.890,bicubic,-55.783,-45.230,-13 +mobilenetv2_120d,36.780,63.220,54.047,45.953,5.83,224,0.875,bicubic,-55.830,-44.463,-17 +tf_efficientnet_lite1,36.737,63.263,53.590,46.410,5.42,240,0.882,bicubic,-55.573,-44.900,-3 +regnetx_016,36.683,63.317,53.297,46.703,9.19,224,0.875,bicubic,-55.857,-45.253,-12 +hardcorenas_a,36.640,63.360,54.910,45.090,5.26,224,0.875,bilinear,-54.980,-43.260,+18 +levit_128s,36.620,63.380,53.117,46.883,7.78,224,0.900,bicubic,-54.880,-45.283,+20 +efficientnet_b0,36.600,63.400,53.497,46.503,5.29,224,0.875,bicubic,-55.880,-44.943,-13 +tf_efficientnet_em,36.380,63.620,52.840,47.160,6.90,240,0.882,bicubic,-56.790,-45.830,-59 +skresnet18,36.320,63.680,54.197,45.803,11.96,224,0.875,bicubic,-53.840,-43.583,+49 +repvgg_b0,36.287,63.713,54.057,45.943,15.82,224,0.875,bilinear,-55.393,-44.183,+11 +tv_resnet50,36.177,63.823,52.803,47.197,25.56,224,0.875,bilinear,-55.963,-45.617,-3 +legacy_seresnet34,36.143,63.857,52.553,47.447,21.96,224,0.875,bilinear,-55.337,-45.647,+15 +coat_tiny,36.123,63.877,51.063,48.937,5.50,224,0.900,bicubic,-57.387,-47.627,-88 +tv_resnet34,36.087,63.913,53.533,46.467,21.80,224,0.875,bilinear,-54.203,-44.447,+43 +deit_tiny_distilled_patch16_224,36.023,63.977,54.240,45.760,5.91,224,0.900,bicubic,-55.077,-44.030,+28 +mobilenetv2_140,36.000,64.000,53.943,46.057,6.11,224,0.875,bicubic,-56.030,-44.307,-5 +tf_efficientnet_lite0,35.930,64.070,53.480,46.520,4.65,224,0.875,bicubic,-55.370,-44.610,+16 +selecsls42b,35.813,64.187,52.487,47.513,32.46,224,0.875,bicubic,-56.667,-46.193,-25 +gluon_resnet34_v1b,35.760,64.240,52.187,47.813,21.80,224,0.875,bicubic,-55.340,-45.993,+25 +dla34,35.643,64.357,52.783,47.217,15.74,224,0.875,bilinear,-55.597,-45.397,+16 +mixnet_m,35.640,64.360,52.430,47.570,5.01,224,0.875,bicubic,-56.630,-45.920,-19 +efficientnet_lite0,35.620,64.380,53.657,46.343,4.65,224,0.875,bicubic,-55.640,-44.593,+13 +ssl_resnet18,35.597,64.403,53.740,46.260,11.69,224,0.875,bilinear,-55.103,-44.280,+27 +mobilenetv3_rw,35.547,64.453,53.713,46.287,5.48,224,0.875,bicubic,-56.003,-44.557,+1 +efficientnet_es_pruned,35.390,64.610,52.850,47.150,5.44,224,0.875,bicubic,-56.310,-45.570,-6 +mobilenetv2_110d,35.293,64.707,52.830,47.170,4.52,224,0.875,bicubic,-56.057,-45.360,+6 +tf_mixnet_m,35.180,64.820,50.987,49.013,5.01,224,0.875,bicubic,-57.020,-47.433,-21 +hrnet_w18_small_v2,35.173,64.827,52.440,47.560,15.60,224,0.875,bilinear,-55.997,-45.900,+12 +resnet18d,35.127,64.873,52.890,47.110,11.71,224,0.875,bicubic,-54.863,-44.940,+30 +convit_tiny,35.047,64.953,51.787,48.213,5.71,224,0.875,bicubic,-55.483,-46.423,+23 +ese_vovnet19b_dw,34.840,65.160,52.030,47.970,6.54,224,0.875,bicubic,-57.170,-46.480,-19 +regnety_008,34.807,65.193,51.743,48.257,6.26,224,0.875,bicubic,-57.093,-46.677,-16 +pit_ti_224,34.670,65.330,52.170,47.830,4.85,224,0.900,bicubic,-55.750,-45.840,+22 +mobilenetv3_large_100,34.603,65.397,52.860,47.140,5.48,224,0.875,bicubic,-56.877,-45.460,-6 +seresnext26d_32x4d,34.543,65.457,51.543,48.457,16.81,224,0.875,bicubic,-57.897,-46.997,-39 +seresnext26t_32x4d,34.540,65.460,51.377,48.623,16.81,224,0.875,bicubic,-58.280,-47.183,-60 +mixer_b16_224,34.423,65.577,48.093,51.907,59.88,224,0.875,bicubic,-56.717,-49.307,+4 +resnet26d,34.273,65.727,51.687,48.313,16.01,224,0.875,bicubic,-57.957,-46.763,-33 +tf_efficientnet_es,34.263,65.737,51.350,48.650,5.44,224,0.875,bicubic,-57.837,-47.090,-29 +fbnetc_100,34.253,65.747,51.180,48.820,5.57,224,0.875,bilinear,-57.017,-46.650,-6 +regnety_006,34.150,65.850,51.277,48.723,6.06,224,0.875,bicubic,-57.420,-47.153,-17 +tf_mobilenetv3_large_100,33.950,66.050,51.490,48.510,5.48,224,0.875,bilinear,-57.470,-46.770,-12 +regnetx_008,33.770,66.230,50.547,49.453,7.26,224,0.875,bicubic,-57.410,-47.833,-4 +mnasnet_100,33.763,66.237,51.170,48.830,4.38,224,0.875,bicubic,-57.437,-46.880,-7 +vit_tiny_r_s16_p8_384,33.650,66.350,50.683,49.317,6.36,384,1.000,bicubic,-58.080,-47.747,-27 +vit_tiny_patch16_384,33.550,66.450,51.077,48.923,5.79,384,1.000,bicubic,-59.870,-47.753,-111 +semnasnet_100,33.520,66.480,50.787,49.213,3.89,224,0.875,bicubic,-58.140,-47.483,-25 +resnet26,33.500,66.500,50.927,49.073,16.00,224,0.875,bicubic,-57.940,-47.353,-19 +mixnet_s,33.480,66.520,50.997,49.003,4.13,224,0.875,bicubic,-58.300,-47.303,-32 +spnasnet_100,33.477,66.523,51.267,48.733,4.42,224,0.875,bilinear,-57.133,-46.683,+1 +vgg19_bn,33.230,66.770,50.803,49.197,143.68,224,0.875,bilinear,-57.770,-47.307,-5 +ghostnet_100,33.207,66.793,51.163,48.837,5.18,224,0.875,bilinear,-57.233,-46.667,+2 +regnetx_006,33.157,66.843,50.250,49.750,6.20,224,0.875,bicubic,-57.603,-47.850,-4 +resnet18,33.067,66.933,51.170,48.830,11.69,224,0.875,bilinear,-55.083,-45.950,+19 +legacy_seresnext26_32x4d,32.757,67.243,49.237,50.763,16.79,224,0.875,bicubic,-59.813,-49.183,-66 +deit_tiny_patch16_224,32.667,67.333,50.273,49.727,5.72,224,0.900,bicubic,-56.953,-47.687,+7 +hrnet_w18_small,32.667,67.333,50.587,49.413,13.19,224,0.875,bilinear,-57.213,-47.313,+3 +legacy_seresnet18,32.600,67.400,50.340,49.660,11.78,224,0.875,bicubic,-56.670,-47.340,+9 +mobilenetv2_100,32.523,67.477,50.800,49.200,3.50,224,0.875,bicubic,-57.307,-47.030,+2 +regnetx_004,32.517,67.483,49.343,50.657,5.16,224,0.875,bicubic,-56.943,-48.427,+4 +gluon_resnet18_v1b,32.407,67.593,49.727,50.273,11.69,224,0.875,bicubic,-56.253,-47.373,+9 +regnety_004,32.333,67.667,49.453,50.547,4.34,224,0.875,bicubic,-58.447,-48.627,-14 +tf_mixnet_s,32.183,67.817,48.493,51.507,4.13,224,0.875,bicubic,-59.497,-49.957,-43 +vit_tiny_patch16_224,32.023,67.977,49.017,50.983,5.72,224,0.900,bicubic,-59.907,-49.323,-49 +tf_mobilenetv3_large_075,31.867,68.133,49.110,50.890,3.99,224,0.875,bilinear,-58.453,-48.760,-9 +tf_mobilenetv3_large_minimal_100,31.597,68.403,49.337,50.663,3.92,224,0.875,bilinear,-57.583,-47.983,+3 +vit_tiny_r_s16_p8_224,30.807,69.193,47.657,52.343,6.34,224,0.900,bicubic,-58.533,-50.043,-1 +vgg16_bn,30.357,69.643,47.260,52.740,138.37,224,0.875,bilinear,-60.183,-50.730,-16 +regnety_002,29.687,70.313,46.787,53.213,3.16,224,0.875,bicubic,-58.513,-50.643,+3 +vgg13_bn,28.883,71.117,46.737,53.263,133.05,224,0.875,bilinear,-60.317,-50.793,-2 +regnetx_002,28.860,71.140,45.420,54.580,2.68,224,0.875,bicubic,-58.520,-51.570,+4 +vgg19,28.580,71.420,45.170,54.830,143.67,224,0.875,bilinear,-61.100,-52.380,-10 +dla60x_c,28.447,71.553,46.193,53.807,1.32,224,0.875,bilinear,-58.663,-50.947,+4 +vgg11_bn,28.423,71.577,46.453,53.547,132.87,224,0.875,bilinear,-59.967,-50.817,-3 +vgg16,27.877,72.123,44.673,55.327,138.36,224,0.875,bilinear,-61.483,-52.847,-10 +tf_mobilenetv3_small_100,27.297,72.703,44.420,55.580,2.54,224,0.875,bilinear,-58.663,-51.980,+3 +mixer_l16_224,26.853,73.147,37.923,62.077,208.20,224,0.875,bicubic,-60.117,-56.137,+1 +vgg11,26.533,73.467,43.460,56.540,132.86,224,0.875,bilinear,-60.807,-53.650,-2 +vgg13,26.267,73.733,43.370,56.630,133.05,224,0.875,bilinear,-61.303,-53.750,-5 +dla46x_c,26.217,73.783,43.780,56.220,1.07,224,0.875,bilinear,-59.263,-52.660,0 +tf_mobilenetv3_small_075,26.200,73.800,43.637,56.363,2.04,224,0.875,bilinear,-58.330,-52.253,+1 +dla46_c,25.490,74.510,43.800,56.200,1.30,224,0.875,bilinear,-59.170,-52.400,-1 +tf_mobilenetv3_small_minimal_100,25.087,74.913,42.930,57.070,2.04,224,0.875,bilinear,-57.583,-52.070,0 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-real.csv b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-real.csv new file mode 100644 index 0000000000..4433bd1070 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet-real.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation,top1_diff,top5_diff,rank_diff +tf_efficientnet_l2_ns,90.563,9.437,98.779,1.221,480.31,800,0.960,bicubic,+2.211,+0.129,0 +tf_efficientnet_l2_ns_475,90.537,9.463,98.710,1.290,480.31,475,0.936,bicubic,+2.303,+0.164,0 +cait_m48_448,90.196,9.804,98.484,1.516,356.46,448,1.000,bicubic,+3.712,+0.730,+3 +vit_large_patch16_384,90.196,9.804,98.661,1.339,304.72,384,1.000,bicubic,+3.116,+0.361,0 +tf_efficientnet_b7_ns,90.100,9.900,98.614,1.386,66.35,600,0.949,bicubic,+3.260,+0.520,0 +cait_m36_384,90.046,9.954,98.493,1.507,271.22,384,1.000,bicubic,+3.992,+0.763,+8 +dm_nfnet_f6,90.046,9.954,98.546,1.454,438.36,576,0.956,bicubic,+3.902,+0.816,+5 +swin_large_patch4_window12_384,90.027,9.973,98.657,1.343,196.74,384,1.000,bicubic,+2.879,+0.423,-5 +tf_efficientnetv2_l_in21ft1k,90.008,9.992,98.619,1.381,118.52,480,1.000,bicubic,+3.704,+0.641,+1 +swin_base_patch4_window12_384,89.995,10.005,98.695,1.304,87.90,384,1.000,bicubic,+3.563,+0.637,-2 +vit_base_patch16_384,89.989,10.011,98.678,1.322,86.86,384,1.000,bicubic,+3.983,+0.678,+4 +cait_s36_384,89.844,10.156,98.427,1.573,68.37,384,1.000,bicubic,+4.384,+0.947,+10 +swin_large_patch4_window7_224,89.796,10.204,98.640,1.360,196.53,224,0.900,bicubic,+3.477,+0.744,-4 +vit_large_r50_s32_384,89.794,10.206,98.514,1.486,329.09,384,1.000,bicubic,+3.610,+0.596,-3 +tf_efficientnet_b6_ns,89.782,10.218,98.510,1.490,43.04,528,0.942,bicubic,+3.330,+0.628,-8 +tf_efficientnetv2_m_in21ft1k,89.775,10.225,98.503,1.497,54.14,480,1.000,bicubic,+4.187,+0.751,+3 +tf_efficientnet_b5_ns,89.651,10.349,98.482,1.518,30.39,456,0.934,bicubic,+3.563,+0.730,-4 +tf_efficientnet_b8_ap,89.581,10.419,98.305,1.695,87.41,672,0.954,bicubic,+4.211,+1.011,+8 +dm_nfnet_f4,89.557,10.443,98.303,1.697,316.07,512,0.951,bicubic,+3.843,+0.783,-1 +cait_s24_384,89.502,10.498,98.362,1.638,47.06,384,1.000,bicubic,+4.456,+1.016,+12 +dm_nfnet_f3,89.485,10.515,98.399,1.601,254.92,416,0.940,bicubic,+3.963,+0.937,-1 +dm_nfnet_f5,89.461,10.539,98.324,1.676,377.21,544,0.954,bicubic,+3.647,+0.836,-5 +deit_base_distilled_patch16_384,89.429,10.571,98.441,1.559,87.63,384,1.000,bicubic,+4.007,+1.109,+1 +tf_efficientnet_b7_ap,89.429,10.571,98.347,1.653,66.35,600,0.949,bicubic,+4.309,+1.096,+5 +tf_efficientnetv2_l,89.367,10.633,98.275,1.725,118.52,480,1.000,bicubic,+3.877,+0.903,-4 +tf_efficientnet_b8,89.355,10.645,98.303,1.697,87.41,672,0.954,bicubic,+3.985,+0.913,-1 +tf_efficientnet_b6_ap,89.342,10.658,98.281,1.719,43.04,528,0.942,bicubic,+4.554,+1.143,+13 +vit_large_patch16_224,89.314,10.686,98.392,1.608,304.33,224,0.900,bicubic,+3.472,+0.568,-12 +tf_efficientnet_b4_ns,89.305,10.694,98.347,1.653,19.34,380,0.922,bicubic,+4.143,+0.877,-1 +tf_efficientnetv2_m,89.284,10.716,98.236,1.764,54.14,480,1.000,bicubic,+4.240,+0.958,+3 +swin_base_patch4_window7_224,89.145,10.855,98.429,1.571,87.77,224,0.900,bicubic,+3.893,+0.867,-4 +eca_nfnet_l2,89.141,10.859,98.315,1.685,56.72,384,1.000,bicubic,+4.443,+1.051,+10 +cait_xs24_384,89.139,10.861,98.290,1.710,26.67,384,1.000,bicubic,+5.077,+1.402,+27 +ig_resnext101_32x48d,89.120,10.880,98.130,1.870,828.41,224,0.875,bilinear,+3.692,+0.558,-11 +ig_resnext101_32x32d,89.111,10.889,98.181,1.819,468.53,224,0.875,bilinear,+4.017,+0.743,-5 +tf_efficientnet_b7,89.086,10.914,98.183,1.817,66.35,600,0.949,bicubic,+4.150,+0.979,+1 +ecaresnet269d,89.069,10.931,98.234,1.766,102.09,352,1.000,bicubic,+4.093,+1.008,-2 +resmlp_big_24_224_in22ft1k,89.011,10.989,98.215,1.785,129.14,224,0.875,bicubic,+4.617,+1.095,+12 +dm_nfnet_f2,89.009,10.991,98.189,1.810,193.78,352,0.920,bicubic,+3.945,+0.950,-8 +efficientnetv2_rw_m,88.987,11.013,98.213,1.787,53.24,416,1.000,bicubic,+4.179,+1.065,-1 +tf_efficientnet_b5_ap,88.938,11.062,98.164,1.836,30.39,456,0.934,bicubic,+4.686,+1.190,+14 +dm_nfnet_f1,88.925,11.075,98.115,1.885,132.63,320,0.910,bicubic,+4.299,+1.015,+1 +tf_efficientnetv2_s_in21ft1k,88.904,11.096,98.277,1.723,21.46,384,1.000,bicubic,+4.602,+1.025,+9 +vit_base_patch16_224,88.866,11.134,98.230,1.770,86.57,224,0.900,bicubic,+4.334,+0.936,0 +resnetrs420,88.840,11.160,98.034,1.966,191.89,416,1.000,bicubic,+3.832,+0.910,-11 +ig_resnext101_32x16d,88.834,11.166,98.049,1.951,194.03,224,0.875,bilinear,+4.664,+0.853,+10 +resnetrs270,88.834,11.166,98.136,1.864,129.86,352,1.000,bicubic,+4.400,+1.166,+1 +vit_small_r26_s32_384,88.819,11.181,98.337,1.663,36.47,384,1.000,bicubic,+4.773,+1.009,+14 +vit_base_r50_s16_384,88.808,11.192,98.232,1.768,98.95,384,1.000,bicubic,+3.836,+0.944,-13 +seresnet152d,88.795,11.205,98.172,1.828,66.84,320,1.000,bicubic,+4.433,+1.132,+1 +swsl_resnext101_32x8d,88.770,11.230,98.147,1.853,88.79,224,0.875,bilinear,+4.486,+0.971,+2 +tf_efficientnet_b6,88.761,11.239,98.064,1.937,43.04,528,0.942,bicubic,+4.651,+1.178,+6 +resnetrs350,88.759,11.241,98.029,1.971,163.96,384,1.000,bicubic,+4.039,+1.041,-12 +vit_base_patch16_224_miil,88.737,11.262,98.027,1.973,86.54,224,0.875,bilinear,+4.469,+1.225,0 +resnetv2_152x2_bitm,88.725,11.275,98.307,1.693,236.34,448,1.000,bilinear,+4.215,+0.875,-9 +regnety_160,88.697,11.303,98.068,1.932,83.59,288,1.000,bicubic,+5.011,+1.292,+17 +pit_b_distilled_224,88.676,11.324,98.093,1.907,74.79,224,0.900,bicubic,+4.532,+1.237,0 +vit_small_patch16_384,88.652,11.348,98.232,1.768,22.20,384,1.000,bicubic,+4.850,+1.130,+13 +eca_nfnet_l1,88.624,11.376,98.132,1.868,41.41,320,1.000,bicubic,+4.614,+1.104,+5 +resnetrs200,88.605,11.395,98.034,1.966,93.21,320,1.000,bicubic,+4.539,+1.160,-1 +resnetv2_152x4_bitm,88.545,11.455,98.192,1.808,936.53,480,1.000,bilinear,+3.629,+0.750,-23 +resnet200d,88.543,11.457,97.959,2.041,64.69,320,1.000,bicubic,+4.581,+1.135,+3 +resnest269e,88.522,11.478,98.027,1.973,110.93,416,0.928,bicubic,+4.004,+1.041,-18 +efficientnetv2_rw_s,88.473,11.527,97.974,2.026,23.94,384,1.000,bicubic,+4.665,+1.250,+6 +resnetv2_101x3_bitm,88.464,11.536,98.157,1.843,387.93,448,1.000,bilinear,+4.024,+0.775,-18 +cait_s24_224,88.447,11.553,97.957,2.043,46.92,224,1.000,bicubic,+4.995,+1.393,+11 +resnetv2_50x3_bitm,88.443,11.557,98.200,1.800,217.32,448,1.000,bilinear,+4.429,+1.076,-5 +resmlp_big_24_distilled_224,88.443,11.557,97.940,2.060,129.14,224,0.875,bicubic,+4.853,+1.292,+9 +resnest200e,88.432,11.568,98.042,1.958,70.20,320,0.909,bicubic,+4.600,+1.148,-1 +tf_efficientnet_b3_ns,88.426,11.574,98.029,1.971,12.23,300,0.904,bicubic,+4.378,+1.119,-9 +vit_large_r50_s32_224,88.426,11.574,98.085,1.915,328.99,224,0.900,bicubic,+3.992,+0.921,-22 +tf_efficientnetv2_s,88.402,11.598,97.927,2.073,21.46,384,1.000,bicubic,+4.508,+1.229,-6 +efficientnet_b4,88.372,11.628,97.961,2.039,19.34,384,1.000,bicubic,+4.944,+1.365,+5 +resnet152d,88.355,11.645,97.935,2.065,60.21,320,1.000,bicubic,+4.675,+1.197,0 +tf_efficientnet_b4_ap,88.349,11.651,97.893,2.107,19.34,380,0.922,bicubic,+5.101,+1.501,+8 +tf_efficientnet_b5,88.321,11.679,97.912,2.088,30.39,456,0.934,bicubic,+4.509,+1.164,-7 +resnetrs152,88.251,11.749,97.737,2.263,86.62,320,1.000,bicubic,+4.539,+1.123,-5 +deit_base_distilled_patch16_224,88.214,11.786,97.914,2.086,87.34,224,0.900,bicubic,+4.826,+1.426,+1 +resnetv2_152x2_bit_teacher_384,88.150,11.850,98.051,1.949,236.34,384,1.000,bicubic,+4.306,+0.933,-12 +ig_resnext101_32x8d,88.146,11.854,97.856,2.144,88.79,224,0.875,bilinear,+5.458,+1.220,+20 +cait_xxs36_384,88.140,11.860,97.908,2.092,17.37,384,1.000,bicubic,+5.946,+1.760,+34 +dm_nfnet_f0,88.125,11.875,97.854,2.146,71.49,256,0.900,bicubic,+4.739,+1.282,-2 +swsl_resnext101_32x4d,88.099,11.901,97.967,2.033,44.18,224,0.875,bilinear,+4.869,+1.207,+1 +eca_nfnet_l0,87.980,12.020,97.871,2.129,24.14,288,1.000,bicubic,+5.400,+1.381,+19 +nfnet_l0,87.967,12.033,97.867,2.133,35.07,288,1.000,bicubic,+5.217,+1.351,+12 +tf_efficientnet_b4,87.963,12.037,97.739,2.261,19.34,380,0.922,bicubic,+4.941,+1.439,+6 +resnet101d,87.941,12.059,97.908,2.092,44.57,320,1.000,bicubic,+4.919,+1.462,+4 +regnety_032,87.937,12.063,97.891,2.109,19.44,288,1.000,bicubic,+5.213,+1.467,+10 +vit_base_patch32_384,87.909,12.091,98.012,1.988,88.30,384,1.000,bicubic,+4.559,+1.176,-8 +twins_svt_large,87.901,12.099,97.581,2.419,99.27,224,0.900,bicubic,+4.223,+0.987,-15 +twins_pcpvt_large,87.877,12.123,97.856,2.144,60.99,224,0.900,bicubic,+4.737,+1.258,-5 +deit_base_patch16_384,87.845,12.155,97.510,2.490,86.86,384,1.000,bicubic,+4.739,+1.138,-4 +tresnet_xl_448,87.796,12.204,97.459,2.541,78.44,448,0.875,bilinear,+4.746,+1.285,-3 +resnetv2_50x1_bit_distilled,87.787,12.213,97.899,2.101,25.55,224,0.875,bicubic,+4.969,+1.377,+1 +tresnet_m,87.736,12.264,97.523,2.477,31.39,224,0.875,bilinear,+4.656,+1.405,-6 +twins_pcpvt_base,87.736,12.264,97.726,2.274,43.83,224,0.900,bicubic,+5.028,+1.380,+3 +resnetv2_101x1_bitm,87.681,12.319,97.940,2.060,44.54,448,1.000,bilinear,+5.349,+1.422,+11 +swin_small_patch4_window7_224,87.664,12.336,97.566,2.434,49.61,224,0.900,bicubic,+4.452,+1.244,-13 +twins_svt_base,87.638,12.362,97.523,2.477,56.07,224,0.900,bicubic,+4.502,+1.105,-12 +pnasnet5large,87.636,12.364,97.485,2.515,86.06,331,0.911,bicubic,+4.854,+1.445,-4 +swsl_resnext101_32x16d,87.615,12.386,97.820,2.180,194.03,224,0.875,bilinear,+4.269,+0.974,-19 +swsl_resnext50_32x4d,87.600,12.400,97.651,2.349,25.03,224,0.875,bilinear,+5.418,+1.421,+14 +tf_efficientnet_b2_ns,87.557,12.443,97.628,2.372,9.11,260,0.890,bicubic,+5.177,+1.380,+2 +levit_384,87.553,12.447,97.545,2.455,39.13,224,0.900,bicubic,+4.967,+1.529,-2 +ecaresnet50t,87.538,12.462,97.643,2.357,25.57,320,0.950,bicubic,+5.192,+1.505,+2 +resnetv2_152x2_bit_teacher,87.493,12.507,97.812,2.188,236.34,224,0.875,bicubic,+4.631,+1.244,-12 +efficientnet_b3,87.435,12.565,97.681,2.319,12.23,320,1.000,bicubic,+5.193,+1.567,+7 +cait_xxs24_384,87.416,12.584,97.619,2.381,12.03,384,1.000,bicubic,+6.450,+1.973,+49 +resnet51q,87.395,12.605,97.587,2.413,35.70,288,1.000,bilinear,+5.035,+1.407,-3 +coat_lite_small,87.380,12.620,97.365,2.635,19.84,224,0.900,bicubic,+5.072,+1.515,-1 +tresnet_l_448,87.377,12.623,97.485,2.515,55.99,448,0.875,bilinear,+5.109,+1.509,+2 +nasnetalarge,87.350,12.650,97.417,2.583,88.75,331,0.911,bicubic,+4.730,+1.371,-11 +ecaresnet101d,87.288,12.712,97.562,2.438,44.57,224,0.875,bicubic,+5.116,+1.516,+4 +resnest101e,87.284,12.716,97.560,2.440,48.28,256,0.875,bilinear,+4.394,+1.240,-21 +pit_s_distilled_224,87.277,12.723,97.500,2.500,24.04,224,0.900,bicubic,+5.281,+1.702,+6 +resnetrs101,87.247,12.753,97.457,2.543,63.62,288,0.940,bicubic,+4.959,+1.449,-4 +mixer_b16_224_miil,87.226,12.774,97.410,2.590,59.88,224,0.875,bilinear,+4.918,+1.694,-7 +tresnet_xl,87.224,12.776,97.400,2.600,78.44,224,0.875,bilinear,+5.170,+1.463,+1 +convit_base,87.200,12.800,97.286,2.714,86.54,224,0.875,bicubic,+4.910,+1.348,-8 +tf_efficientnet_b3_ap,87.192,12.808,97.380,2.620,12.23,300,0.904,bicubic,+5.370,+1.756,+5 +visformer_small,87.181,12.819,97.323,2.677,40.22,224,0.900,bicubic,+5.075,+1.451,-3 +convit_small,87.053,12.947,97.350,2.650,27.78,224,0.875,bicubic,+5.627,+1.606,+15 +tf_efficientnetv2_b3,87.032,12.968,97.303,2.697,14.36,300,0.904,bicubic,+5.062,+1.521,-1 +deit_small_distilled_patch16_224,86.993,13.007,97.316,2.684,22.44,224,0.900,bicubic,+5.793,+1.938,+23 +resmlp_36_distilled_224,86.993,13.007,97.278,2.722,44.69,224,0.875,bicubic,+5.833,+1.790,+24 +tnt_s_patch16_224,86.903,13.097,97.368,2.632,23.76,224,0.900,bicubic,+5.385,+1.620,+6 +vit_small_patch16_224,86.869,13.131,97.613,2.387,22.05,224,0.900,bicubic,+5.467,+1.479,+11 +vit_small_r26_s32_224,86.863,13.137,97.528,2.472,36.43,224,0.900,bicubic,+5.005,+1.506,-5 +ssl_resnext101_32x16d,86.856,13.143,97.517,2.483,194.03,224,0.875,bilinear,+5.013,+1.421,-5 +rexnet_200,86.846,13.154,97.276,2.724,16.37,224,0.875,bicubic,+5.214,+1.608,-1 +tf_efficientnet_b3,86.835,13.165,97.297,2.703,12.23,300,0.904,bicubic,+5.199,+1.579,-3 +deit_base_patch16_224,86.829,13.171,97.049,2.951,86.57,224,0.900,bicubic,+4.831,+1.315,-12 +tresnet_m_448,86.820,13.180,97.212,2.788,31.39,448,0.875,bilinear,+5.106,+1.640,-7 +ssl_resnext101_32x8d,86.807,13.193,97.466,2.534,88.79,224,0.875,bilinear,+5.191,+1.428,-4 +swsl_resnet50,86.807,13.193,97.498,2.502,25.56,224,0.875,bilinear,+5.641,+1.526,+13 +tf_efficientnet_lite4,86.803,13.197,97.263,2.737,13.01,380,0.920,bilinear,+5.267,+1.595,-5 +coat_mini,86.793,13.207,97.162,2.837,10.34,224,0.900,bicubic,+5.525,+1.770,+7 +tresnet_l,86.767,13.233,97.271,2.729,55.99,224,0.875,bilinear,+5.277,+1.647,-3 +twins_svt_small,86.756,13.244,97.175,2.825,24.06,224,0.900,bicubic,+5.074,+1.505,-12 +levit_256,86.728,13.272,97.259,2.741,18.89,224,0.900,bicubic,+5.218,+1.769,-7 +seresnext50_32x4d,86.699,13.301,97.214,2.786,27.56,224,0.875,bicubic,+5.433,+1.594,+4 +pit_b_224,86.686,13.314,96.898,3.102,73.76,224,0.900,bicubic,+4.240,+1.188,-38 +tf_efficientnet_b1_ns,86.669,13.331,97.378,2.622,7.79,240,0.882,bicubic,+5.281,+1.640,-4 +swin_tiny_patch4_window7_224,86.664,13.336,97.197,2.803,28.29,224,0.900,bicubic,+5.286,+1.657,-4 +gernet_l,86.654,13.346,97.186,2.814,31.08,256,0.875,bilinear,+5.300,+1.650,-4 +wide_resnet50_2,86.647,13.353,97.214,2.786,68.88,224,0.875,bicubic,+5.191,+1.682,-10 +efficientnet_el,86.635,13.366,97.175,2.825,10.59,300,0.904,bicubic,+5.319,+1.649,-5 +resmlp_24_distilled_224,86.622,13.378,97.135,2.865,30.02,224,0.875,bicubic,+5.856,+1.917,+16 +twins_pcpvt_small,86.620,13.380,97.340,2.660,24.11,224,0.900,bicubic,+5.532,+1.698,+3 +nf_resnet50,86.609,13.391,97.293,2.707,25.56,288,0.940,bicubic,+5.949,+1.957,+17 +resnest50d_4s2x40d,86.592,13.408,97.269,2.731,30.42,224,0.875,bicubic,+5.484,+1.711,-1 +efficientnet_b3_pruned,86.581,13.419,97.190,2.810,9.86,300,0.904,bicubic,+5.723,+1.948,+9 +repvgg_b3,86.566,13.434,97.139,2.861,123.09,224,0.875,bilinear,+6.074,+1.879,+19 +ssl_resnext101_32x4d,86.479,13.521,97.468,2.532,44.18,224,0.875,bilinear,+5.555,+1.740,+4 +ecaresnet50d,86.470,13.530,97.186,2.814,25.58,224,0.875,bicubic,+5.878,+1.866,+15 +gluon_resnet152_v1s,86.468,13.532,97.109,2.891,60.32,224,0.875,bicubic,+5.452,+1.697,-2 +resnest50d_1s4x24d,86.447,13.553,97.148,2.852,25.68,224,0.875,bicubic,+5.459,+1.826,-2 +resnetv2_50x1_bitm,86.436,13.564,97.602,2.398,25.55,448,1.000,bilinear,+6.094,+1.918,+22 +repvgg_b3g4,86.361,13.639,97.054,2.946,83.83,224,0.875,bilinear,+6.149,+1.944,+32 +legacy_senet154,86.342,13.658,96.928,3.072,115.09,224,0.875,bilinear,+5.032,+1.432,-17 +cait_xxs36_224,86.340,13.660,97.111,2.889,17.30,224,1.000,bicubic,+6.590,+2.245,+54 +gernet_m,86.319,13.681,97.096,2.904,21.14,224,0.875,bilinear,+5.587,+1.912,+3 +pit_s_224,86.316,13.684,97.045,2.955,23.46,224,0.900,bicubic,+5.222,+1.713,-12 +vit_small_patch32_384,86.312,13.688,97.417,2.583,22.92,384,1.000,bicubic,+5.832,+1.819,+9 +efficientnet_b2,86.304,13.696,96.990,3.010,9.11,288,1.000,bicubic,+5.692,+1.672,+3 +gluon_senet154,86.278,13.722,96.949,3.051,115.09,224,0.875,bicubic,+5.044,+1.601,-20 +resnest50d,86.240,13.761,97.073,2.927,27.48,224,0.875,bilinear,+5.266,+1.695,-11 +ecaresnet101d_pruned,86.210,13.790,97.335,2.665,24.88,224,0.875,bicubic,+5.392,+1.707,-6 +efficientnet_el_pruned,86.192,13.807,97.026,2.974,10.59,300,0.904,bicubic,+5.892,+1.998,+17 +cspdarknet53,86.182,13.818,97.013,2.987,27.64,256,0.887,bilinear,+6.124,+1.929,+27 +inception_v4,86.169,13.831,96.919,3.081,42.68,299,0.875,bicubic,+6.001,+1.951,+22 +rexnet_150,86.154,13.846,97.058,2.942,9.73,224,0.875,bicubic,+5.844,+1.892,+11 +inception_resnet_v2,86.133,13.867,97.043,2.957,55.84,299,0.897,bicubic,+5.675,+1.737,+4 +ssl_resnext50_32x4d,86.086,13.914,97.212,2.788,25.03,224,0.875,bilinear,+5.768,+1.806,+8 +tf_efficientnet_el,86.084,13.916,96.964,3.036,10.59,300,0.904,bicubic,+5.834,+1.836,+13 +gluon_resnet101_v1s,86.054,13.946,97.022,2.978,44.67,224,0.875,bicubic,+5.752,+1.862,+8 +ecaresnetlight,86.052,13.948,97.069,2.931,30.16,224,0.875,bicubic,+5.590,+1.819,-1 +gluon_seresnext101_32x4d,86.032,13.968,96.977,3.023,48.96,224,0.875,bicubic,+5.128,+1.683,-19 +resnet50d,86.009,13.991,96.979,3.021,25.58,224,0.875,bicubic,+5.479,+1.819,-8 +ecaresnet26t,85.983,14.017,97.041,2.959,16.01,320,0.950,bicubic,+6.129,+1.957,+29 +tf_efficientnet_b2_ap,85.975,14.025,96.810,3.190,9.11,260,0.890,bicubic,+5.675,+1.592,+4 +gluon_seresnext101_64x4d,85.960,14.040,96.979,3.021,88.23,224,0.875,bicubic,+5.066,+1.671,-22 +vit_base_patch32_224,85.956,14.044,97.130,2.869,88.22,224,0.900,bicubic,+5.231,+1.562,-17 +gluon_resnet152_v1d,85.917,14.083,96.812,3.188,60.21,224,0.875,bicubic,+5.443,+1.606,-9 +vit_large_patch32_384,85.909,14.091,97.368,2.632,306.63,384,1.000,bicubic,+4.403,+1.276,-51 +tf_efficientnet_b2,85.902,14.098,96.862,3.139,9.11,260,0.890,bicubic,+5.816,+1.954,+10 +tf_efficientnetv2_b2,85.900,14.100,96.889,3.111,10.10,260,0.890,bicubic,+5.692,+1.847,+5 +seresnet50,85.857,14.143,97.004,2.995,28.09,224,0.875,bicubic,+5.583,+1.934,-1 +repvgg_b2g4,85.855,14.145,96.812,3.188,61.76,224,0.875,bilinear,+6.489,+2.124,+42 +gluon_resnet101_v1d,85.849,14.151,96.663,3.337,44.57,224,0.875,bicubic,+5.435,+1.649,-12 +resnet50,85.804,14.196,96.712,3.288,25.56,224,0.875,bicubic,+6.766,+2.322,+63 +mixnet_xl,85.798,14.202,96.712,3.288,11.90,224,0.875,bicubic,+5.322,+1.776,-18 +ens_adv_inception_resnet_v2,85.781,14.220,96.759,3.241,55.84,299,0.897,bicubic,+5.799,+1.821,+7 +tf_efficientnet_lite3,85.755,14.245,96.887,3.113,8.20,300,0.904,bilinear,+5.935,+1.973,+18 +ese_vovnet39b,85.751,14.249,96.891,3.109,24.57,224,0.875,bicubic,+6.431,+2.179,+38 +gluon_resnext101_32x4d,85.746,14.254,96.635,3.365,44.18,224,0.875,bicubic,+5.412,+1.709,-15 +legacy_seresnext101_32x4d,85.746,14.254,96.757,3.243,48.96,224,0.875,bilinear,+5.518,+1.739,-7 +cspresnext50,85.740,14.260,96.840,3.160,20.57,224,0.875,bilinear,+5.700,+1.896,0 +regnety_320,85.727,14.273,96.725,3.275,145.05,224,0.875,bicubic,+4.915,+1.481,-36 +cspresnet50,85.721,14.279,96.795,3.205,21.62,256,0.887,bilinear,+6.147,+2.083,+23 +xception71,85.697,14.303,96.776,3.224,42.34,299,0.903,bicubic,+5.823,+1.854,+5 +resmlp_big_24_224,85.695,14.305,96.426,3.574,129.14,224,0.875,bicubic,+4.667,+1.404,-49 +gluon_resnext101_64x4d,85.693,14.307,96.644,3.356,83.46,224,0.875,bicubic,+5.089,+1.656,-34 +efficientnet_em,85.684,14.316,96.938,3.062,6.90,240,0.882,bicubic,+6.432,+2.144,+38 +deit_small_patch16_224,85.678,14.322,96.906,3.094,22.05,224,0.900,bicubic,+5.822,+1.854,+2 +pit_xs_distilled_224,85.657,14.343,96.667,3.333,11.00,224,0.900,bicubic,+6.351,+2.303,+31 +efficientnet_b2_pruned,85.642,14.358,96.746,3.254,8.31,260,0.890,bicubic,+5.726,+1.890,-5 +dpn107,85.640,14.360,96.729,3.271,86.92,224,0.875,bicubic,+5.484,+1.819,-14 +resmlp_36_224,85.620,14.380,96.795,3.205,44.69,224,0.875,bicubic,+5.850,+1.909,+4 +levit_192,85.580,14.420,96.740,3.260,10.95,224,0.900,bicubic,+5.738,+1.954,-2 +gluon_resnet152_v1c,85.580,14.420,96.646,3.354,60.21,224,0.875,bicubic,+5.670,+1.806,-8 +ecaresnet50d_pruned,85.580,14.420,96.936,3.064,19.94,224,0.875,bicubic,+5.864,+2.056,+7 +resnext50d_32x4d,85.569,14.431,96.748,3.252,25.05,224,0.875,bicubic,+5.893,+1.882,+7 +tf_efficientnetv2_b1,85.561,14.439,96.727,3.273,8.14,240,0.882,bicubic,+6.099,+2.005,+14 +regnety_120,85.543,14.457,96.785,3.215,51.82,224,0.875,bicubic,+5.177,+1.659,-36 +regnetx_320,85.524,14.476,96.669,3.331,107.81,224,0.875,bicubic,+5.278,+1.643,-27 +nf_regnet_b1,85.505,14.495,96.791,3.209,10.22,288,0.900,bicubic,+6.213,+2.043,+22 +dpn92,85.494,14.506,96.635,3.365,37.67,224,0.875,bicubic,+5.486,+1.799,-19 +gluon_resnet152_v1b,85.475,14.525,96.550,3.450,60.19,224,0.875,bicubic,+5.789,+1.814,0 +rexnet_130,85.473,14.527,96.684,3.316,7.56,224,0.875,bicubic,+5.973,+2.002,+6 +resnetrs50,85.462,14.538,96.736,3.264,35.69,224,0.910,bicubic,+5.570,+1.767,-17 +dpn131,85.398,14.602,96.639,3.361,79.25,224,0.875,bicubic,+5.576,+1.929,-11 +regnetx_160,85.390,14.610,96.637,3.363,54.28,224,0.875,bicubic,+5.534,+1.807,-15 +dla102x2,85.366,14.634,96.629,3.371,41.28,224,0.875,bilinear,+5.918,+1.989,+5 +gluon_seresnext50_32x4d,85.336,14.664,96.667,3.333,27.56,224,0.875,bicubic,+5.418,+1.845,-24 +xception65,85.315,14.685,96.637,3.363,39.92,299,0.903,bicubic,+5.763,+1.983,-2 +skresnext50_32x4d,85.313,14.687,96.390,3.610,27.48,224,0.875,bicubic,+5.157,+1.748,-32 +dpn98,85.311,14.689,96.469,3.531,61.57,224,0.875,bicubic,+5.669,+1.871,-7 +gluon_resnet101_v1c,85.304,14.696,96.405,3.595,44.57,224,0.875,bicubic,+5.770,+1.827,-4 +dpn68b,85.291,14.709,96.464,3.536,12.61,224,0.875,bicubic,+6.076,+2.050,+15 +regnety_064,85.283,14.717,96.639,3.361,30.58,224,0.875,bicubic,+5.561,+1.871,-15 +resnetblur50,85.283,14.717,96.531,3.470,25.56,224,0.875,bicubic,+5.997,+1.892,+8 +resmlp_24_224,85.268,14.732,96.492,3.508,30.02,224,0.875,bicubic,+5.894,+1.946,-3 +coat_lite_mini,85.251,14.749,96.680,3.320,11.01,224,0.900,bicubic,+6.163,+2.076,+15 +regnety_080,85.245,14.755,96.633,3.367,39.18,224,0.875,bicubic,+5.369,+1.803,-30 +cait_xxs24_224,85.228,14.773,96.712,3.288,11.96,224,1.000,bicubic,+6.842,+2.402,+44 +resnext50_32x4d,85.221,14.779,96.526,3.474,25.03,224,0.875,bicubic,+5.453,+1.928,-23 +resnext101_32x8d,85.187,14.813,96.445,3.555,88.79,224,0.875,bilinear,+5.879,+1.927,-4 +gluon_inception_v3,85.183,14.817,96.526,3.474,23.83,299,0.875,bicubic,+6.377,+2.156,+21 +hrnet_w48,85.151,14.849,96.492,3.508,77.47,224,0.875,bilinear,+5.851,+1.980,-2 +gluon_xception65,85.148,14.851,96.597,3.403,39.92,299,0.903,bicubic,+5.433,+1.737,-23 +gluon_resnet101_v1b,85.142,14.858,96.366,3.634,44.55,224,0.875,bicubic,+5.836,+1.842,-6 +regnetx_120,85.131,14.869,96.477,3.523,46.11,224,0.875,bicubic,+5.535,+1.739,-21 +xception,85.129,14.871,96.471,3.529,22.86,299,0.897,bicubic,+6.077,+2.079,+9 +tf_efficientnet_b1_ap,85.127,14.873,96.405,3.595,7.79,240,0.882,bicubic,+5.847,+2.099,-4 +hrnet_w64,85.119,14.881,96.744,3.256,128.06,224,0.875,bilinear,+5.645,+2.092,-19 +ssl_resnet50,85.097,14.903,96.866,3.134,25.56,224,0.875,bilinear,+5.875,+2.034,-4 +res2net101_26w_4s,85.093,14.907,96.381,3.619,45.21,224,0.875,bilinear,+5.895,+1.949,-1 +tf_efficientnet_cc_b1_8e,85.063,14.937,96.422,3.578,39.72,240,0.882,bicubic,+5.755,+2.052,-14 +res2net50_26w_8s,85.029,14.971,96.419,3.580,48.40,224,0.875,bilinear,+5.831,+2.052,-4 +resnest26d,85.008,14.992,96.637,3.363,17.07,224,0.875,bilinear,+6.530,+2.339,+22 +gluon_resnext50_32x4d,84.995,15.005,96.426,3.574,25.03,224,0.875,bicubic,+5.641,+2.000,-20 +tf_efficientnet_b0_ns,84.984,15.016,96.503,3.497,5.29,224,0.875,bicubic,+6.326,+2.127,+14 +coat_tiny,84.976,15.024,96.409,3.591,5.50,224,0.900,bicubic,+6.542,+2.371,+23 +regnety_040,84.948,15.052,96.612,3.388,20.65,224,0.875,bicubic,+5.728,+1.956,-11 +dla169,84.920,15.080,96.535,3.465,53.39,224,0.875,bilinear,+6.232,+2.199,+9 +tf_efficientnet_b1,84.918,15.082,96.364,3.636,7.79,240,0.882,bicubic,+6.092,+2.166,+2 +legacy_seresnext50_32x4d,84.901,15.099,96.434,3.566,27.56,224,0.875,bilinear,+5.823,+1.998,-8 +hrnet_w44,84.884,15.116,96.434,3.566,67.06,224,0.875,bilinear,+5.988,+2.066,-2 +gluon_resnet50_v1s,84.862,15.138,96.443,3.557,25.68,224,0.875,bicubic,+6.150,+2.205,+4 +regnetx_080,84.862,15.138,96.434,3.566,39.57,224,0.875,bicubic,+5.668,+1.874,-13 +levit_128,84.843,15.157,96.360,3.640,9.21,224,0.900,bicubic,+6.357,+2.350,+9 +gluon_resnet50_v1d,84.832,15.168,96.398,3.602,25.58,224,0.875,bicubic,+5.758,+1.928,-12 +dla60_res2next,84.830,15.170,96.411,3.589,17.03,224,0.875,bilinear,+6.390,+2.259,+12 +vit_tiny_patch16_384,84.828,15.172,96.708,3.292,5.79,384,1.000,bicubic,+6.398,+2.166,+13 +mixnet_l,84.822,15.178,96.328,3.672,7.33,224,0.875,bicubic,+5.846,+2.146,-11 +tv_resnet152,84.815,15.185,96.225,3.775,60.19,224,0.875,bilinear,+6.503,+2.187,+17 +dla60_res2net,84.813,15.187,96.481,3.519,20.85,224,0.875,bilinear,+6.349,+2.275,+6 +dla102x,84.813,15.187,96.552,3.448,26.31,224,0.875,bilinear,+6.303,+2.324,+1 +pit_xs_224,84.792,15.208,96.492,3.508,10.62,224,0.900,bicubic,+6.610,+2.324,+19 +xception41,84.792,15.208,96.413,3.587,26.97,299,0.903,bicubic,+6.276,+2.135,-2 +regnetx_064,84.781,15.219,96.490,3.510,26.21,224,0.875,bicubic,+5.709,+2.032,-20 +hrnet_w40,84.743,15.257,96.554,3.446,57.56,224,0.875,bilinear,+5.823,+2.084,-17 +res2net50_26w_6s,84.726,15.274,96.281,3.719,37.05,224,0.875,bilinear,+6.156,+2.157,-6 +repvgg_b2,84.724,15.276,96.469,3.531,89.02,224,0.875,bilinear,+5.932,+2.055,-13 +resmlp_12_distilled_224,84.713,15.287,96.225,3.775,15.35,224,0.875,bicubic,+6.769,+2.667,+25 +legacy_seresnet152,84.704,15.296,96.417,3.583,66.82,224,0.875,bilinear,+6.044,+2.047,-11 +selecsls60b,84.657,15.343,96.300,3.700,32.77,224,0.875,bicubic,+6.245,+2.126,+1 +hrnet_w32,84.651,15.349,96.407,3.593,41.23,224,0.875,bilinear,+6.201,+2.221,-4 +tf_efficientnetv2_b0,84.625,15.375,96.274,3.726,7.14,224,0.875,bicubic,+6.269,+2.250,+3 +efficientnet_b1,84.608,15.392,96.332,3.668,7.79,256,1.000,bicubic,+5.814,+1.990,-20 +regnetx_040,84.600,15.400,96.383,3.617,22.12,224,0.875,bicubic,+6.118,+2.139,-10 +efficientnet_es,84.591,15.409,96.311,3.689,5.44,224,0.875,bicubic,+6.525,+2.385,+11 +hrnet_w30,84.572,15.428,96.388,3.612,37.71,224,0.875,bilinear,+6.366,+2.166,+4 +tf_mixnet_l,84.564,15.437,96.244,3.756,7.33,224,0.875,bicubic,+5.790,+2.246,-22 +wide_resnet101_2,84.557,15.443,96.349,3.651,126.89,224,0.875,bilinear,+5.701,+2.067,-28 +dla60x,84.523,15.477,96.285,3.715,17.35,224,0.875,bilinear,+6.277,+2.267,-2 +legacy_seresnet101,84.504,15.496,96.330,3.670,49.33,224,0.875,bilinear,+6.122,+2.066,-7 +tf_efficientnet_em,84.450,15.550,96.180,3.820,6.90,240,0.882,bicubic,+6.320,+2.136,+2 +coat_lite_tiny,84.450,15.550,96.368,3.632,5.72,224,0.900,bicubic,+6.938,+2.452,+27 +repvgg_b1,84.416,15.584,96.221,3.779,57.42,224,0.875,bilinear,+6.050,+2.123,-9 +efficientnet_b1_pruned,84.393,15.607,96.140,3.860,6.33,240,0.882,bicubic,+6.157,+2.306,-5 +res2net50_26w_4s,84.365,15.635,96.082,3.918,25.70,224,0.875,bilinear,+6.401,+2.228,+7 +hardcorenas_f,84.326,15.674,96.025,3.975,8.20,224,0.875,bilinear,+6.222,+2.222,-1 +res2net50_14w_8s,84.309,15.691,96.072,3.929,25.06,224,0.875,bilinear,+6.159,+2.224,-4 +selecsls60,84.288,15.712,96.095,3.905,30.67,224,0.875,bicubic,+6.306,+2.267,+3 +regnetx_032,84.237,15.763,96.247,3.753,15.30,224,0.875,bicubic,+6.065,+2.159,-7 +res2next50,84.226,15.774,95.997,4.003,24.67,224,0.875,bilinear,+5.980,+2.105,-12 +gluon_resnet50_v1c,84.207,15.793,96.161,3.839,25.58,224,0.875,bicubic,+6.195,+2.173,-2 +dla102,84.190,15.810,96.206,3.794,33.27,224,0.875,bilinear,+6.158,+2.260,-4 +rexnet_100,84.162,15.838,96.255,3.745,4.80,224,0.875,bicubic,+6.304,+2.385,+5 +tf_inception_v3,84.132,15.868,95.920,4.080,23.83,299,0.875,bicubic,+6.270,+2.280,+3 +res2net50_48w_2s,84.126,15.874,95.965,4.035,25.29,224,0.875,bilinear,+6.604,+2.411,+12 +resnet34d,84.098,15.902,95.978,4.022,21.82,224,0.875,bicubic,+6.982,+2.596,+23 +tf_efficientnet_lite2,84.094,15.906,96.069,3.931,6.09,260,0.890,bicubic,+6.626,+2.315,+12 +efficientnet_b0,84.038,15.962,95.956,4.044,5.29,224,0.875,bicubic,+6.340,+2.424,+2 +gmixer_24_224,83.968,16.032,95.849,4.151,24.72,224,0.875,bicubic,+5.932,+2.185,-12 +hardcorenas_e,83.968,16.032,95.898,4.101,8.07,224,0.875,bilinear,+6.174,+2.204,-1 +tf_efficientnet_cc_b0_8e,83.966,16.034,96.065,3.935,24.01,224,0.875,bicubic,+6.058,+2.411,-6 +tv_resnext50_32x4d,83.959,16.041,95.960,4.040,25.03,224,0.875,bilinear,+6.339,+2.264,0 +regnety_016,83.955,16.045,96.005,3.995,11.20,224,0.875,bicubic,+6.093,+2.285,-7 +gluon_resnet50_v1b,83.940,16.060,96.012,3.988,25.56,224,0.875,bicubic,+6.360,+2.296,+2 +densenet161,83.906,16.094,96.010,3.990,28.68,224,0.875,bicubic,+6.548,+2.372,+8 +adv_inception_v3,83.902,16.098,95.935,4.065,23.83,299,0.875,bicubic,+6.320,+2.199,-1 +mobilenetv2_120d,83.893,16.107,95.909,4.091,5.83,224,0.875,bicubic,+6.609,+2.417,+9 +seresnext26t_32x4d,83.878,16.122,95.931,4.069,16.81,224,0.875,bicubic,+5.892,+2.185,-18 +tv_resnet101,83.848,16.152,95.892,4.108,44.55,224,0.875,bilinear,+6.474,+2.352,+3 +inception_v3,83.761,16.239,95.879,4.121,23.83,299,0.875,bicubic,+6.323,+2.405,0 +hardcorenas_d,83.759,16.241,95.734,4.266,7.50,224,0.875,bilinear,+6.327,+2.250,0 +seresnext26d_32x4d,83.754,16.246,95.849,4.151,16.81,224,0.875,bicubic,+6.152,+2.241,-9 +dla60,83.729,16.271,95.933,4.067,22.04,224,0.875,bilinear,+6.697,+2.615,+10 +repvgg_b1g4,83.699,16.301,96.020,3.980,39.97,224,0.875,bilinear,+6.105,+2.194,-10 +legacy_seresnet50,83.662,16.337,95.973,4.027,28.09,224,0.875,bilinear,+6.032,+2.225,-14 +tf_efficientnet_b0_ap,83.650,16.350,95.779,4.221,5.29,224,0.875,bicubic,+6.564,+2.523,+5 +skresnet34,83.641,16.359,95.933,4.067,22.28,224,0.875,bicubic,+6.729,+2.611,+10 +tf_efficientnet_cc_b0_4e,83.639,16.361,95.740,4.260,13.31,224,0.875,bicubic,+6.333,+2.406,-4 +resmlp_12_224,83.571,16.429,95.760,4.240,15.35,224,0.875,bicubic,+6.917,+2.580,+13 +densenet201,83.556,16.444,95.811,4.189,20.01,224,0.875,bicubic,+6.270,+2.333,-5 +mobilenetv3_large_100_miil,83.556,16.444,95.452,4.548,5.48,224,0.875,bilinear,+5.640,+2.542,-27 +gernet_s,83.522,16.478,95.794,4.206,8.17,224,0.875,bilinear,+6.606,+2.662,+4 +legacy_seresnext26_32x4d,83.517,16.483,95.719,4.281,16.79,224,0.875,bicubic,+6.413,+2.403,-3 +mixnet_m,83.515,16.485,95.689,4.311,5.01,224,0.875,bicubic,+6.255,+2.265,-7 +tf_efficientnet_b0,83.515,16.485,95.719,4.281,5.29,224,0.875,bicubic,+6.667,+2.491,+3 +hrnet_w18,83.500,16.500,95.907,4.093,21.30,224,0.875,bilinear,+6.742,+2.463,+4 +densenetblur121d,83.472,16.527,95.822,4.178,8.00,224,0.875,bicubic,+6.885,+2.630,+9 +selecsls42b,83.457,16.543,95.745,4.255,32.46,224,0.875,bicubic,+6.283,+2.355,-10 +tf_efficientnet_lite1,83.344,16.656,95.642,4.358,5.42,240,0.882,bicubic,+6.702,+2.416,+4 +hardcorenas_c,83.342,16.658,95.706,4.294,5.52,224,0.875,bilinear,+6.288,+2.548,-8 +regnetx_016,83.195,16.805,95.740,4.260,9.19,224,0.875,bicubic,+6.245,+2.320,-7 +mobilenetv2_140,83.182,16.818,95.689,4.311,6.11,224,0.875,bicubic,+6.666,+2.693,+7 +dpn68,83.178,16.822,95.597,4.402,12.61,224,0.875,bicubic,+6.860,+2.620,+8 +tf_efficientnet_es,83.178,16.822,95.585,4.415,5.44,224,0.875,bicubic,+6.584,+2.383,+1 +tf_mixnet_m,83.176,16.824,95.461,4.539,5.01,224,0.875,bicubic,+6.234,+2.309,-10 +ese_vovnet19b_dw,83.109,16.890,95.779,4.221,6.54,224,0.875,bicubic,+6.311,+2.511,-7 +levit_128s,83.069,16.931,95.531,4.469,7.78,224,0.900,bicubic,+6.539,+2.665,+1 +resnet26d,83.050,16.950,95.604,4.396,16.01,224,0.875,bicubic,+6.354,+2.454,-7 +repvgg_a2,83.001,16.999,95.589,4.411,28.21,224,0.875,bilinear,+6.541,+2.585,+1 +tv_resnet50,82.958,17.042,95.467,4.533,25.56,224,0.875,bilinear,+6.820,+2.603,+3 +hardcorenas_b,82.873,17.128,95.392,4.607,5.18,224,0.875,bilinear,+6.335,+2.638,-4 +densenet121,82.823,17.177,95.585,4.415,7.98,224,0.875,bicubic,+7.245,+2.933,+10 +vit_tiny_r_s16_p8_384,82.691,17.309,95.845,4.155,6.36,384,1.000,bicubic,+6.739,+2.585,+3 +densenet169,82.683,17.317,95.600,4.400,14.15,224,0.875,bicubic,+6.776,+2.574,+4 +mixnet_s,82.525,17.476,95.356,4.644,4.13,224,0.875,bicubic,+6.532,+2.560,-1 +vit_small_patch32_224,82.514,17.486,95.670,4.330,22.88,224,0.900,bicubic,+6.524,+2.398,-1 +regnety_008,82.493,17.508,95.487,4.513,6.26,224,0.875,bicubic,+6.177,+2.421,-5 +efficientnet_lite0,82.382,17.619,95.279,4.721,4.65,224,0.875,bicubic,+6.898,+2.769,+7 +resnest14d,82.349,17.651,95.339,4.661,10.61,224,0.875,bilinear,+6.843,+2.821,+5 +hardcorenas_a,82.313,17.687,95.294,4.706,5.26,224,0.875,bilinear,+6.397,+2.780,-3 +efficientnet_es_pruned,82.296,17.704,95.303,4.697,5.44,224,0.875,bicubic,+7.296,+2.855,+15 +mobilenetv3_rw,82.275,17.725,95.234,4.766,5.48,224,0.875,bicubic,+6.641,+2.526,-1 +semnasnet_100,82.251,17.749,95.230,4.770,3.89,224,0.875,bicubic,+6.803,+2.626,+4 +mobilenetv3_large_100,82.177,17.823,95.196,4.804,5.48,224,0.875,bicubic,+6.410,+2.654,-5 +resnet34,82.138,17.862,95.130,4.870,21.80,224,0.875,bilinear,+7.028,+2.846,+8 +mobilenetv2_110d,82.070,17.930,95.076,4.923,4.52,224,0.875,bicubic,+7.034,+2.890,+9 +vit_tiny_patch16_224,82.066,17.934,95.489,4.511,5.72,224,0.900,bicubic,+6.612,+2.641,-1 +tf_mixnet_s,82.038,17.962,95.121,4.879,4.13,224,0.875,bicubic,+6.388,+2.493,-8 +repvgg_b0,82.001,17.999,95.100,4.900,15.82,224,0.875,bilinear,+6.849,+2.682,+1 +deit_tiny_distilled_patch16_224,81.997,18.003,95.141,4.859,5.91,224,0.900,bicubic,+7.487,+3.251,+14 +mixer_b16_224,81.978,18.022,94.449,5.551,59.88,224,0.875,bicubic,+5.376,+2.221,-27 +pit_ti_distilled_224,81.967,18.033,95.145,4.855,5.10,224,0.900,bicubic,+7.437,+3.049,+11 +hrnet_w18_small_v2,81.961,18.039,95.164,4.836,15.60,224,0.875,bilinear,+6.847,+2.748,-1 +tf_efficientnet_lite0,81.952,18.048,95.168,4.832,4.65,224,0.875,bicubic,+7.122,+2.992,+3 +resnet26,81.944,18.056,95.241,4.759,16.00,224,0.875,bicubic,+6.652,+2.671,-7 +tf_mobilenetv3_large_100,81.848,18.152,95.070,4.930,5.48,224,0.875,bilinear,+6.330,+2.464,-13 +tv_densenet121,81.726,18.274,95.034,4.966,7.98,224,0.875,bicubic,+6.988,+2.884,+2 +regnety_006,81.700,18.300,95.115,4.885,6.06,224,0.875,bicubic,+6.454,+2.583,-9 +dla34,81.658,18.342,94.878,5.122,15.74,224,0.875,bilinear,+7.028,+2.800,+2 +fbnetc_100,81.559,18.441,94.970,5.030,5.57,224,0.875,bilinear,+6.436,+2.584,-9 +legacy_seresnet34,81.534,18.466,94.899,5.101,21.96,224,0.875,bilinear,+6.726,+2.775,-3 +gluon_resnet34_v1b,81.500,18.500,94.810,5.190,21.80,224,0.875,bicubic,+6.912,+2.820,0 +regnetx_008,81.485,18.515,95.059,4.941,7.26,224,0.875,bicubic,+6.447,+2.724,-9 +mnasnet_100,81.459,18.541,94.899,5.101,4.38,224,0.875,bicubic,+6.801,+2.785,-4 +vgg19_bn,81.444,18.556,94.763,5.237,143.68,224,0.875,bilinear,+7.230,+2.921,0 +convit_tiny,81.126,18.874,95.044,4.955,5.71,224,0.875,bicubic,+8.010,+3.331,+8 +spnasnet_100,80.878,19.122,94.526,5.474,4.42,224,0.875,bilinear,+6.794,+2.708,-1 +ghostnet_100,80.699,19.301,94.291,5.709,5.18,224,0.875,bilinear,+6.721,+2.835,0 +regnety_004,80.659,19.341,94.686,5.314,4.34,224,0.875,bicubic,+6.624,+2.934,-2 +skresnet18,80.637,19.363,94.378,5.622,11.96,224,0.875,bicubic,+7.599,+3.210,+5 +regnetx_006,80.629,19.371,94.524,5.476,6.20,224,0.875,bicubic,+6.777,+2.852,-2 +pit_ti_224,80.605,19.395,94.618,5.383,4.85,224,0.900,bicubic,+7.693,+3.216,+5 +swsl_resnet18,80.575,19.425,94.743,5.256,11.69,224,0.875,bilinear,+7.299,+3.010,0 +vgg16_bn,80.556,19.444,94.592,5.408,138.37,224,0.875,bilinear,+7.206,+3.086,-3 +tv_resnet34,80.389,19.611,94.436,5.564,21.80,224,0.875,bilinear,+7.077,+3.010,-3 +resnet18d,80.387,19.613,94.252,5.748,11.71,224,0.875,bicubic,+8.127,+3.556,+6 +mobilenetv2_100,80.257,19.743,94.195,5.805,3.50,224,0.875,bicubic,+7.287,+3.179,-1 +ssl_resnet18,80.101,19.899,94.590,5.410,11.69,224,0.875,bilinear,+7.491,+3.174,0 +tf_mobilenetv3_large_075,80.093,19.907,94.184,5.816,3.99,224,0.875,bilinear,+6.655,+2.834,-9 +deit_tiny_patch16_224,80.018,19.982,94.449,5.551,5.72,224,0.900,bicubic,+7.850,+3.331,+4 +hrnet_w18_small,79.557,20.443,93.898,6.102,13.19,224,0.875,bilinear,+7.215,+3.220,0 +vgg19,79.480,20.520,93.870,6.130,143.67,224,0.875,bilinear,+7.112,+2.998,-2 +regnetx_004,79.435,20.565,93.853,6.147,5.16,224,0.875,bicubic,+7.039,+3.023,-4 +tf_mobilenetv3_large_minimal_100,79.222,20.778,93.706,6.294,3.92,224,0.875,bilinear,+6.974,+3.076,-1 +legacy_seresnet18,79.153,20.847,93.783,6.217,11.78,224,0.875,bicubic,+7.411,+3.449,+2 +vgg16,79.038,20.962,93.646,6.354,138.36,224,0.875,bilinear,+7.444,+3.264,+3 +vgg13_bn,79.006,20.994,93.655,6.345,133.05,224,0.875,bilinear,+7.412,+3.279,+1 +vit_tiny_r_s16_p8_224,78.991,21.009,93.902,6.098,6.34,224,0.900,bicubic,+7.203,+3.074,-2 +gluon_resnet18_v1b,78.372,21.628,93.138,6.862,11.69,224,0.875,bicubic,+7.536,+3.376,+1 +vgg11_bn,77.926,22.074,93.230,6.770,132.87,224,0.875,bilinear,+7.566,+3.428,+1 +regnety_002,77.405,22.595,92.914,7.086,3.16,224,0.875,bicubic,+7.153,+3.374,+1 +mixer_l16_224,77.285,22.715,90.582,9.418,208.20,224,0.875,bicubic,+5.227,+2.914,-7 +resnet18,77.276,22.724,92.756,7.244,11.69,224,0.875,bilinear,+7.528,+3.678,+1 +vgg13,77.230,22.770,92.689,7.311,133.05,224,0.875,bilinear,+7.303,+3.444,-1 +vgg11,76.384,23.616,92.154,7.846,132.86,224,0.875,bilinear,+7.360,+3.526,0 +regnetx_002,76.124,23.876,92.211,7.789,2.68,224,0.875,bicubic,+7.362,+3.655,0 +dla60x_c,75.637,24.363,92.177,7.823,1.32,224,0.875,bilinear,+7.745,+3.751,+1 +tf_mobilenetv3_small_100,74.717,25.283,91.257,8.743,2.54,224,0.875,bilinear,+6.795,+3.593,-1 +dla46x_c,73.647,26.353,91.095,8.905,1.07,224,0.875,bilinear,+7.677,+4.115,0 +tf_mobilenetv3_small_075,72.812,27.188,90.036,9.964,2.04,224,0.875,bilinear,+7.096,+3.906,0 +dla46_c,72.601,27.399,90.499,9.501,1.30,224,0.875,bilinear,+7.735,+4.207,0 +tf_mobilenetv3_small_minimal_100,70.111,29.889,88.505,11.495,2.04,224,0.875,bilinear,+7.205,+4.275,0 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet.csv b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet.csv new file mode 100644 index 0000000000..a5081bf0b0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenet.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation +tf_efficientnet_l2_ns,88.352,11.648,98.650,1.350,480.31,800,0.960,bicubic +tf_efficientnet_l2_ns_475,88.234,11.766,98.546,1.454,480.31,475,0.936,bicubic +swin_large_patch4_window12_384,87.148,12.852,98.234,1.766,196.74,384,1.000,bicubic +vit_large_patch16_384,87.080,12.920,98.300,1.700,304.72,384,1.000,bicubic +tf_efficientnet_b7_ns,86.840,13.160,98.094,1.906,66.35,600,0.949,bicubic +cait_m48_448,86.484,13.516,97.754,2.246,356.46,448,1.000,bicubic +tf_efficientnet_b6_ns,86.452,13.548,97.882,2.118,43.04,528,0.942,bicubic +swin_base_patch4_window12_384,86.432,13.568,98.058,1.942,87.90,384,1.000,bicubic +swin_large_patch4_window7_224,86.320,13.680,97.896,2.104,196.53,224,0.900,bicubic +tf_efficientnetv2_l_in21ft1k,86.304,13.696,97.978,2.022,118.52,480,1.000,bicubic +vit_large_r50_s32_384,86.184,13.816,97.918,2.082,329.09,384,1.000,bicubic +dm_nfnet_f6,86.144,13.856,97.730,2.270,438.36,576,0.956,bicubic +tf_efficientnet_b5_ns,86.088,13.912,97.752,2.248,30.39,456,0.934,bicubic +cait_m36_384,86.054,13.946,97.730,2.270,271.22,384,1.000,bicubic +vit_base_patch16_384,86.006,13.994,98.000,2.000,86.86,384,1.000,bicubic +vit_large_patch16_224,85.842,14.158,97.824,2.176,304.33,224,0.900,bicubic +dm_nfnet_f5,85.814,14.186,97.488,2.512,377.21,544,0.954,bicubic +dm_nfnet_f4,85.714,14.286,97.520,2.480,316.07,512,0.951,bicubic +tf_efficientnetv2_m_in21ft1k,85.588,14.412,97.752,2.248,54.14,480,1.000,bicubic +dm_nfnet_f3,85.522,14.478,97.462,2.538,254.92,416,0.940,bicubic +tf_efficientnetv2_l,85.490,14.510,97.372,2.628,118.52,480,1.000,bicubic +cait_s36_384,85.460,14.540,97.480,2.520,68.37,384,1.000,bicubic +ig_resnext101_32x48d,85.428,14.572,97.572,2.428,828.41,224,0.875,bilinear +deit_base_distilled_patch16_384,85.422,14.578,97.332,2.668,87.63,384,1.000,bicubic +tf_efficientnet_b8,85.370,14.630,97.390,2.610,87.41,672,0.954,bicubic +tf_efficientnet_b8_ap,85.370,14.630,97.294,2.706,87.41,672,0.954,bicubic +swin_base_patch4_window7_224,85.252,14.748,97.562,2.438,87.77,224,0.900,bicubic +tf_efficientnet_b4_ns,85.162,14.838,97.470,2.530,19.34,380,0.922,bicubic +tf_efficientnet_b7_ap,85.120,14.880,97.252,2.748,66.35,600,0.949,bicubic +ig_resnext101_32x32d,85.094,14.906,97.438,2.562,468.53,224,0.875,bilinear +dm_nfnet_f2,85.064,14.936,97.240,2.760,193.78,352,0.920,bicubic +cait_s24_384,85.046,14.954,97.346,2.654,47.06,384,1.000,bicubic +tf_efficientnetv2_m,85.044,14.956,97.278,2.722,54.14,480,1.000,bicubic +resnetrs420,85.008,14.992,97.124,2.876,191.89,416,1.000,bicubic +ecaresnet269d,84.976,15.024,97.226,2.774,102.09,352,1.000,bicubic +vit_base_r50_s16_384,84.972,15.028,97.288,2.712,98.95,384,1.000,bicubic +tf_efficientnet_b7,84.936,15.064,97.204,2.796,66.35,600,0.949,bicubic +resnetv2_152x4_bitm,84.916,15.084,97.442,2.558,936.53,480,1.000,bilinear +efficientnetv2_rw_m,84.808,15.192,97.148,2.852,53.24,416,1.000,bicubic +tf_efficientnet_b6_ap,84.788,15.212,97.138,2.862,43.04,528,0.942,bicubic +resnetrs350,84.720,15.280,96.988,3.012,163.96,384,1.000,bicubic +eca_nfnet_l2,84.698,15.302,97.264,2.736,56.72,384,1.000,bicubic +dm_nfnet_f1,84.626,15.374,97.100,2.900,132.63,320,0.910,bicubic +vit_base_patch16_224,84.532,15.468,97.294,2.706,86.57,224,0.900,bicubic +resnest269e,84.518,15.482,96.986,3.014,110.93,416,0.928,bicubic +resnetv2_152x2_bitm,84.510,15.490,97.432,2.568,236.34,448,1.000,bilinear +resnetv2_101x3_bitm,84.440,15.560,97.382,2.618,387.93,448,1.000,bilinear +resnetrs270,84.434,15.566,96.970,3.030,129.86,352,1.000,bicubic +vit_large_r50_s32_224,84.434,15.566,97.164,2.836,328.99,224,0.900,bicubic +resmlp_big_24_224_in22ft1k,84.394,15.606,97.120,2.880,129.14,224,0.875,bicubic +seresnet152d,84.362,15.638,97.040,2.960,66.84,320,1.000,bicubic +tf_efficientnetv2_s_in21ft1k,84.302,15.698,97.252,2.748,21.46,384,1.000,bicubic +swsl_resnext101_32x8d,84.284,15.716,97.176,2.824,88.79,224,0.875,bilinear +vit_base_patch16_224_miil,84.268,15.732,96.802,3.198,86.54,224,0.875,bilinear +tf_efficientnet_b5_ap,84.252,15.748,96.974,3.026,30.39,456,0.934,bicubic +ig_resnext101_32x16d,84.170,15.830,97.196,2.804,194.03,224,0.875,bilinear +pit_b_distilled_224,84.144,15.856,96.856,3.144,74.79,224,0.900,bicubic +tf_efficientnet_b6,84.110,15.890,96.886,3.114,43.04,528,0.942,bicubic +resnetrs200,84.066,15.934,96.874,3.126,93.21,320,1.000,bicubic +cait_xs24_384,84.062,15.938,96.888,3.112,26.67,384,1.000,bicubic +tf_efficientnet_b3_ns,84.048,15.952,96.910,3.090,12.23,300,0.904,bicubic +vit_small_r26_s32_384,84.046,15.954,97.328,2.672,36.47,384,1.000,bicubic +resnetv2_50x3_bitm,84.014,15.986,97.124,2.876,217.32,448,1.000,bilinear +eca_nfnet_l1,84.010,15.990,97.028,2.972,41.41,320,1.000,bicubic +resnet200d,83.962,16.038,96.824,3.176,64.69,320,1.000,bicubic +tf_efficientnetv2_s,83.894,16.106,96.698,3.302,21.46,384,1.000,bicubic +resnetv2_152x2_bit_teacher_384,83.844,16.156,97.118,2.882,236.34,384,1.000,bicubic +resnest200e,83.832,16.168,96.894,3.106,70.20,320,0.909,bicubic +tf_efficientnet_b5,83.812,16.188,96.748,3.252,30.39,456,0.934,bicubic +efficientnetv2_rw_s,83.808,16.192,96.724,3.276,23.94,384,1.000,bicubic +vit_small_patch16_384,83.802,16.198,97.102,2.898,22.20,384,1.000,bicubic +resnetrs152,83.712,16.288,96.614,3.386,86.62,320,1.000,bicubic +regnety_160,83.686,16.314,96.776,3.224,83.59,288,1.000,bicubic +resnet152d,83.680,16.320,96.738,3.262,60.21,320,1.000,bicubic +twins_svt_large,83.678,16.322,96.594,3.406,99.27,224,0.900,bicubic +resmlp_big_24_distilled_224,83.590,16.410,96.648,3.352,129.14,224,0.875,bicubic +cait_s24_224,83.452,16.548,96.564,3.436,46.92,224,1.000,bicubic +efficientnet_b4,83.428,16.572,96.596,3.404,19.34,384,1.000,bicubic +deit_base_distilled_patch16_224,83.388,16.612,96.488,3.512,87.34,224,0.900,bicubic +dm_nfnet_f0,83.386,16.614,96.572,3.428,71.49,256,0.900,bicubic +vit_base_patch32_384,83.350,16.650,96.836,3.164,88.30,384,1.000,bicubic +swsl_resnext101_32x16d,83.346,16.654,96.846,3.154,194.03,224,0.875,bilinear +tf_efficientnet_b4_ap,83.248,16.752,96.392,3.608,19.34,380,0.922,bicubic +swsl_resnext101_32x4d,83.230,16.770,96.760,3.240,44.18,224,0.875,bilinear +swin_small_patch4_window7_224,83.212,16.788,96.322,3.678,49.61,224,0.900,bicubic +twins_pcpvt_large,83.140,16.860,96.598,3.402,60.99,224,0.900,bicubic +twins_svt_base,83.136,16.864,96.418,3.582,56.07,224,0.900,bicubic +deit_base_patch16_384,83.106,16.894,96.372,3.628,86.86,384,1.000,bicubic +tresnet_m,83.080,16.920,96.118,3.882,31.39,224,0.875,bilinear +tresnet_xl_448,83.050,16.950,96.174,3.826,78.44,448,0.875,bilinear +resnet101d,83.022,16.978,96.446,3.554,44.57,320,1.000,bicubic +tf_efficientnet_b4,83.022,16.978,96.300,3.700,19.34,380,0.922,bicubic +resnest101e,82.890,17.110,96.320,3.680,48.28,256,0.875,bilinear +resnetv2_152x2_bit_teacher,82.862,17.138,96.568,3.432,236.34,224,0.875,bicubic +resnetv2_50x1_bit_distilled,82.818,17.182,96.522,3.478,25.55,224,0.875,bicubic +pnasnet5large,82.782,17.218,96.040,3.960,86.06,331,0.911,bicubic +nfnet_l0,82.750,17.250,96.516,3.484,35.07,288,1.000,bicubic +regnety_032,82.724,17.276,96.424,3.576,19.44,288,1.000,bicubic +twins_pcpvt_base,82.708,17.292,96.346,3.654,43.83,224,0.900,bicubic +ig_resnext101_32x8d,82.688,17.312,96.636,3.364,88.79,224,0.875,bilinear +nasnetalarge,82.620,17.380,96.046,3.954,88.75,331,0.911,bicubic +levit_384,82.586,17.414,96.016,3.984,39.13,224,0.900,bicubic +eca_nfnet_l0,82.580,17.420,96.490,3.510,24.14,288,1.000,bicubic +pit_b_224,82.446,17.554,95.710,4.290,73.76,224,0.900,bicubic +tf_efficientnet_b2_ns,82.380,17.620,96.248,3.752,9.11,260,0.890,bicubic +resnet51q,82.360,17.640,96.180,3.820,35.70,288,1.000,bilinear +ecaresnet50t,82.346,17.654,96.138,3.862,25.57,320,0.950,bicubic +resnetv2_101x1_bitm,82.332,17.668,96.518,3.482,44.54,448,1.000,bilinear +coat_lite_small,82.308,17.692,95.850,4.150,19.84,224,0.900,bicubic +mixer_b16_224_miil,82.308,17.692,95.716,4.284,59.88,224,0.875,bilinear +convit_base,82.290,17.710,95.938,4.062,86.54,224,0.875,bicubic +resnetrs101,82.288,17.712,96.008,3.992,63.62,288,0.940,bicubic +tresnet_l_448,82.268,17.732,95.976,4.024,55.99,448,0.875,bilinear +efficientnet_b3,82.242,17.758,96.114,3.886,12.23,320,1.000,bicubic +cait_xxs36_384,82.194,17.806,96.148,3.852,17.37,384,1.000,bicubic +swsl_resnext50_32x4d,82.182,17.818,96.230,3.770,25.03,224,0.875,bilinear +ecaresnet101d,82.172,17.828,96.046,3.954,44.57,224,0.875,bicubic +visformer_small,82.106,17.894,95.872,4.128,40.22,224,0.900,bicubic +tresnet_xl,82.054,17.946,95.936,4.064,78.44,224,0.875,bilinear +deit_base_patch16_224,81.998,18.002,95.734,4.266,86.57,224,0.900,bicubic +pit_s_distilled_224,81.996,18.004,95.798,4.202,24.04,224,0.900,bicubic +tf_efficientnetv2_b3,81.970,18.030,95.782,4.218,14.36,300,0.904,bicubic +vit_small_r26_s32_224,81.858,18.142,96.022,3.978,36.43,224,0.900,bicubic +ssl_resnext101_32x16d,81.844,18.156,96.096,3.904,194.03,224,0.875,bilinear +tf_efficientnet_b3_ap,81.822,18.178,95.624,4.376,12.23,300,0.904,bicubic +tresnet_m_448,81.714,18.286,95.572,4.428,31.39,448,0.875,bilinear +twins_svt_small,81.682,18.318,95.670,4.330,24.06,224,0.900,bicubic +tf_efficientnet_b3,81.636,18.364,95.718,4.282,12.23,300,0.904,bicubic +rexnet_200,81.632,18.368,95.668,4.332,16.37,224,0.875,bicubic +ssl_resnext101_32x8d,81.616,18.384,96.038,3.962,88.79,224,0.875,bilinear +tf_efficientnet_lite4,81.536,18.464,95.668,4.332,13.01,380,0.920,bilinear +tnt_s_patch16_224,81.518,18.482,95.748,4.252,23.76,224,0.900,bicubic +levit_256,81.510,18.490,95.490,4.510,18.89,224,0.900,bicubic +vit_large_patch32_384,81.506,18.494,96.092,3.908,306.63,384,1.000,bicubic +tresnet_l,81.490,18.510,95.624,4.376,55.99,224,0.875,bilinear +wide_resnet50_2,81.456,18.544,95.532,4.468,68.88,224,0.875,bicubic +convit_small,81.426,18.574,95.744,4.256,27.78,224,0.875,bicubic +vit_small_patch16_224,81.402,18.598,96.134,3.866,22.05,224,0.900,bicubic +tf_efficientnet_b1_ns,81.388,18.612,95.738,4.262,7.79,240,0.882,bicubic +swin_tiny_patch4_window7_224,81.378,18.622,95.540,4.460,28.29,224,0.900,bicubic +gernet_l,81.354,18.646,95.536,4.464,31.08,256,0.875,bilinear +efficientnet_el,81.316,18.684,95.526,4.474,10.59,300,0.904,bicubic +legacy_senet154,81.310,18.690,95.496,4.504,115.09,224,0.875,bilinear +coat_mini,81.268,18.732,95.392,4.608,10.34,224,0.900,bicubic +seresnext50_32x4d,81.266,18.734,95.620,4.380,27.56,224,0.875,bicubic +gluon_senet154,81.234,18.766,95.348,4.652,115.09,224,0.875,bicubic +deit_small_distilled_patch16_224,81.200,18.800,95.378,4.622,22.44,224,0.900,bicubic +swsl_resnet50,81.166,18.834,95.972,4.028,25.56,224,0.875,bilinear +resmlp_36_distilled_224,81.160,18.840,95.488,4.512,44.69,224,0.875,bicubic +resnest50d_4s2x40d,81.108,18.892,95.558,4.442,30.42,224,0.875,bicubic +pit_s_224,81.094,18.906,95.332,4.668,23.46,224,0.900,bicubic +twins_pcpvt_small,81.088,18.912,95.642,4.358,24.11,224,0.900,bicubic +resmlp_big_24_224,81.028,18.972,95.022,4.978,129.14,224,0.875,bicubic +gluon_resnet152_v1s,81.016,18.984,95.412,4.588,60.32,224,0.875,bicubic +resnest50d_1s4x24d,80.988,19.012,95.322,4.678,25.68,224,0.875,bicubic +resnest50d,80.974,19.026,95.378,4.622,27.48,224,0.875,bilinear +cait_xxs24_384,80.966,19.034,95.646,4.354,12.03,384,1.000,bicubic +ssl_resnext101_32x4d,80.924,19.076,95.728,4.272,44.18,224,0.875,bilinear +gluon_seresnext101_32x4d,80.904,19.096,95.294,4.706,48.96,224,0.875,bicubic +gluon_seresnext101_64x4d,80.894,19.106,95.308,4.692,88.23,224,0.875,bicubic +efficientnet_b3_pruned,80.858,19.142,95.242,4.758,9.86,300,0.904,bicubic +ecaresnet101d_pruned,80.818,19.182,95.628,4.372,24.88,224,0.875,bicubic +regnety_320,80.812,19.188,95.244,4.756,145.05,224,0.875,bicubic +resmlp_24_distilled_224,80.766,19.234,95.218,4.782,30.02,224,0.875,bicubic +gernet_m,80.732,19.268,95.184,4.816,21.14,224,0.875,bilinear +vit_base_patch32_224,80.724,19.276,95.568,4.432,88.22,224,0.900,bicubic +nf_resnet50,80.660,19.340,95.336,4.664,25.56,288,0.940,bicubic +efficientnet_b2,80.612,19.388,95.318,4.682,9.11,288,1.000,bicubic +gluon_resnext101_64x4d,80.604,19.396,94.988,5.012,83.46,224,0.875,bicubic +ecaresnet50d,80.592,19.408,95.320,4.680,25.58,224,0.875,bicubic +resnet50d,80.530,19.470,95.160,4.840,25.58,224,0.875,bicubic +repvgg_b3,80.492,19.508,95.260,4.740,123.09,224,0.875,bilinear +vit_small_patch32_384,80.480,19.520,95.598,4.402,22.92,384,1.000,bicubic +mixnet_xl,80.476,19.524,94.936,5.064,11.90,224,0.875,bicubic +gluon_resnet152_v1d,80.474,19.526,95.206,4.794,60.21,224,0.875,bicubic +ecaresnetlight,80.462,19.538,95.250,4.750,30.16,224,0.875,bicubic +inception_resnet_v2,80.458,19.542,95.306,4.694,55.84,299,0.897,bicubic +gluon_resnet101_v1d,80.414,19.586,95.014,4.986,44.57,224,0.875,bicubic +regnety_120,80.366,19.634,95.126,4.874,51.82,224,0.875,bicubic +resnetv2_50x1_bitm,80.342,19.658,95.684,4.316,25.55,448,1.000,bilinear +gluon_resnext101_32x4d,80.334,19.666,94.926,5.074,44.18,224,0.875,bicubic +ssl_resnext50_32x4d,80.318,19.682,95.406,4.594,25.03,224,0.875,bilinear +rexnet_150,80.310,19.690,95.166,4.834,9.73,224,0.875,bicubic +gluon_resnet101_v1s,80.302,19.698,95.160,4.840,44.67,224,0.875,bicubic +tf_efficientnet_b2_ap,80.300,19.700,95.028,4.972,9.11,260,0.890,bicubic +efficientnet_el_pruned,80.300,19.700,95.218,4.782,10.59,300,0.904,bicubic +seresnet50,80.274,19.726,95.070,4.930,28.09,224,0.875,bicubic +tf_efficientnet_el,80.250,19.750,95.128,4.872,10.59,300,0.904,bicubic +regnetx_320,80.246,19.754,95.026,4.974,107.81,224,0.875,bicubic +legacy_seresnext101_32x4d,80.228,19.772,95.018,4.982,48.96,224,0.875,bilinear +repvgg_b3g4,80.212,19.788,95.110,4.890,83.83,224,0.875,bilinear +tf_efficientnetv2_b2,80.208,19.792,95.042,4.958,10.10,260,0.890,bicubic +inception_v4,80.168,19.832,94.968,5.032,42.68,299,0.875,bicubic +dpn107,80.156,19.844,94.910,5.090,86.92,224,0.875,bicubic +skresnext50_32x4d,80.156,19.844,94.642,5.358,27.48,224,0.875,bicubic +tf_efficientnet_b2,80.086,19.914,94.908,5.092,9.11,260,0.890,bicubic +cspdarknet53,80.058,19.942,95.084,4.916,27.64,256,0.887,bilinear +cspresnext50,80.040,19.960,94.944,5.056,20.57,224,0.875,bilinear +dpn92,80.008,19.992,94.836,5.164,37.67,224,0.875,bicubic +ens_adv_inception_resnet_v2,79.982,20.018,94.938,5.062,55.84,299,0.897,bicubic +gluon_seresnext50_32x4d,79.918,20.082,94.822,5.178,27.56,224,0.875,bicubic +efficientnet_b2_pruned,79.916,20.084,94.856,5.144,8.31,260,0.890,bicubic +gluon_resnet152_v1c,79.910,20.090,94.840,5.160,60.21,224,0.875,bicubic +resnetrs50,79.892,20.108,94.968,5.032,35.69,224,0.910,bicubic +regnety_080,79.876,20.124,94.830,5.170,39.18,224,0.875,bicubic +xception71,79.874,20.126,94.922,5.078,42.34,299,0.903,bicubic +deit_small_patch16_224,79.856,20.144,95.052,4.948,22.05,224,0.900,bicubic +regnetx_160,79.856,20.144,94.830,5.170,54.28,224,0.875,bicubic +ecaresnet26t,79.854,20.146,95.084,4.916,16.01,320,0.950,bicubic +levit_192,79.842,20.158,94.786,5.214,10.95,224,0.900,bicubic +dpn131,79.822,20.178,94.710,5.290,79.25,224,0.875,bicubic +tf_efficientnet_lite3,79.820,20.180,94.914,5.086,8.20,300,0.904,bilinear +resmlp_36_224,79.770,20.230,94.886,5.114,44.69,224,0.875,bicubic +resnext50_32x4d,79.768,20.232,94.598,5.402,25.03,224,0.875,bicubic +cait_xxs36_224,79.750,20.250,94.866,5.134,17.30,224,1.000,bicubic +regnety_064,79.722,20.278,94.768,5.232,30.58,224,0.875,bicubic +ecaresnet50d_pruned,79.716,20.284,94.880,5.120,19.94,224,0.875,bicubic +gluon_xception65,79.716,20.284,94.860,5.140,39.92,299,0.903,bicubic +gluon_resnet152_v1b,79.686,20.314,94.736,5.264,60.19,224,0.875,bicubic +resnext50d_32x4d,79.676,20.324,94.866,5.134,25.05,224,0.875,bicubic +dpn98,79.642,20.358,94.598,5.402,61.57,224,0.875,bicubic +regnetx_120,79.596,20.404,94.738,5.262,46.11,224,0.875,bicubic +cspresnet50,79.574,20.426,94.712,5.288,21.62,256,0.887,bilinear +xception65,79.552,20.448,94.654,5.346,39.92,299,0.903,bicubic +gluon_resnet101_v1c,79.534,20.466,94.578,5.422,44.57,224,0.875,bicubic +rexnet_130,79.500,20.500,94.682,5.318,7.56,224,0.875,bicubic +hrnet_w64,79.474,20.526,94.652,5.348,128.06,224,0.875,bilinear +tf_efficientnetv2_b1,79.462,20.538,94.722,5.278,8.14,240,0.882,bicubic +dla102x2,79.448,20.552,94.640,5.360,41.28,224,0.875,bilinear +resmlp_24_224,79.374,20.626,94.546,5.454,30.02,224,0.875,bicubic +repvgg_b2g4,79.366,20.634,94.688,5.312,61.76,224,0.875,bilinear +gluon_resnext50_32x4d,79.354,20.646,94.426,5.574,25.03,224,0.875,bicubic +ese_vovnet39b,79.320,20.680,94.712,5.288,24.57,224,0.875,bicubic +resnext101_32x8d,79.308,20.692,94.518,5.482,88.79,224,0.875,bilinear +tf_efficientnet_cc_b1_8e,79.308,20.692,94.370,5.630,39.72,240,0.882,bicubic +gluon_resnet101_v1b,79.306,20.694,94.524,5.476,44.55,224,0.875,bicubic +pit_xs_distilled_224,79.306,20.694,94.364,5.636,11.00,224,0.900,bicubic +hrnet_w48,79.300,20.700,94.512,5.488,77.47,224,0.875,bilinear +nf_regnet_b1,79.292,20.708,94.748,5.252,10.22,288,0.900,bicubic +resnetblur50,79.286,20.714,94.638,5.362,25.56,224,0.875,bicubic +tf_efficientnet_b1_ap,79.280,20.720,94.306,5.694,7.79,240,0.882,bicubic +efficientnet_em,79.252,20.748,94.794,5.206,6.90,240,0.882,bicubic +ssl_resnet50,79.222,20.778,94.832,5.168,25.56,224,0.875,bilinear +regnety_040,79.220,20.780,94.656,5.344,20.65,224,0.875,bicubic +dpn68b,79.216,20.784,94.414,5.586,12.61,224,0.875,bicubic +res2net50_26w_8s,79.198,20.802,94.368,5.632,48.40,224,0.875,bilinear +res2net101_26w_4s,79.198,20.802,94.432,5.568,45.21,224,0.875,bilinear +regnetx_080,79.194,20.806,94.560,5.440,39.57,224,0.875,bicubic +coat_lite_mini,79.088,20.912,94.604,5.396,11.01,224,0.900,bicubic +legacy_seresnext50_32x4d,79.078,20.922,94.436,5.564,27.56,224,0.875,bilinear +gluon_resnet50_v1d,79.074,20.926,94.470,5.530,25.58,224,0.875,bicubic +regnetx_064,79.072,20.928,94.458,5.542,26.21,224,0.875,bicubic +xception,79.052,20.948,94.392,5.608,22.86,299,0.897,bicubic +resnet50,79.038,20.962,94.390,5.610,25.56,224,0.875,bicubic +mixnet_l,78.976,21.024,94.182,5.818,7.33,224,0.875,bicubic +hrnet_w40,78.920,21.080,94.470,5.530,57.56,224,0.875,bilinear +hrnet_w44,78.896,21.104,94.368,5.632,67.06,224,0.875,bilinear +wide_resnet101_2,78.856,21.144,94.282,5.718,126.89,224,0.875,bilinear +tf_efficientnet_b1,78.826,21.174,94.198,5.802,7.79,240,0.882,bicubic +gluon_inception_v3,78.806,21.194,94.370,5.630,23.83,299,0.875,bicubic +efficientnet_b1,78.794,21.206,94.342,5.658,7.79,256,1.000,bicubic +repvgg_b2,78.792,21.208,94.414,5.586,89.02,224,0.875,bilinear +tf_mixnet_l,78.774,21.226,93.998,6.002,7.33,224,0.875,bicubic +gluon_resnet50_v1s,78.712,21.288,94.238,5.762,25.68,224,0.875,bicubic +dla169,78.688,21.312,94.336,5.664,53.39,224,0.875,bilinear +legacy_seresnet152,78.660,21.340,94.370,5.630,66.82,224,0.875,bilinear +tf_efficientnet_b0_ns,78.658,21.342,94.376,5.624,5.29,224,0.875,bicubic +res2net50_26w_6s,78.570,21.430,94.124,5.876,37.05,224,0.875,bilinear +xception41,78.516,21.484,94.278,5.722,26.97,299,0.903,bicubic +dla102x,78.510,21.490,94.228,5.772,26.31,224,0.875,bilinear +levit_128,78.486,21.514,94.010,5.990,9.21,224,0.900,bicubic +regnetx_040,78.482,21.518,94.244,5.756,22.12,224,0.875,bicubic +resnest26d,78.478,21.522,94.298,5.702,17.07,224,0.875,bilinear +dla60_res2net,78.464,21.536,94.206,5.794,20.85,224,0.875,bilinear +hrnet_w32,78.450,21.550,94.186,5.814,41.23,224,0.875,bilinear +dla60_res2next,78.440,21.560,94.152,5.848,17.03,224,0.875,bilinear +coat_tiny,78.434,21.566,94.038,5.962,5.50,224,0.900,bicubic +vit_tiny_patch16_384,78.430,21.570,94.542,5.458,5.79,384,1.000,bicubic +selecsls60b,78.412,21.588,94.174,5.826,32.77,224,0.875,bicubic +cait_xxs24_224,78.386,21.614,94.310,5.690,11.96,224,1.000,bicubic +legacy_seresnet101,78.382,21.618,94.264,5.736,49.33,224,0.875,bilinear +repvgg_b1,78.366,21.634,94.098,5.902,57.42,224,0.875,bilinear +tf_efficientnetv2_b0,78.356,21.644,94.024,5.976,7.14,224,0.875,bicubic +tv_resnet152,78.312,21.688,94.038,5.962,60.19,224,0.875,bilinear +dla60x,78.246,21.754,94.018,5.982,17.35,224,0.875,bilinear +res2next50,78.246,21.754,93.892,6.108,24.67,224,0.875,bilinear +efficientnet_b1_pruned,78.236,21.764,93.834,6.166,6.33,240,0.882,bicubic +hrnet_w30,78.206,21.794,94.222,5.778,37.71,224,0.875,bilinear +pit_xs_224,78.182,21.818,94.168,5.832,10.62,224,0.900,bicubic +regnetx_032,78.172,21.828,94.088,5.912,15.30,224,0.875,bicubic +res2net50_14w_8s,78.150,21.850,93.848,6.152,25.06,224,0.875,bilinear +tf_efficientnet_em,78.130,21.870,94.044,5.956,6.90,240,0.882,bicubic +hardcorenas_f,78.104,21.896,93.802,6.198,8.20,224,0.875,bilinear +efficientnet_es,78.066,21.934,93.926,6.074,5.44,224,0.875,bicubic +gmixer_24_224,78.036,21.964,93.664,6.336,24.72,224,0.875,bicubic +dla102,78.032,21.968,93.946,6.054,33.27,224,0.875,bilinear +gluon_resnet50_v1c,78.012,21.988,93.988,6.012,25.58,224,0.875,bicubic +seresnext26t_32x4d,77.986,22.014,93.746,6.254,16.81,224,0.875,bicubic +selecsls60,77.982,22.018,93.828,6.172,30.67,224,0.875,bicubic +res2net50_26w_4s,77.964,22.036,93.854,6.146,25.70,224,0.875,bilinear +resmlp_12_distilled_224,77.944,22.056,93.558,6.442,15.35,224,0.875,bicubic +mobilenetv3_large_100_miil,77.916,22.084,92.910,7.090,5.48,224,0.875,bilinear +tf_efficientnet_cc_b0_8e,77.908,22.092,93.654,6.346,24.01,224,0.875,bicubic +regnety_016,77.862,22.138,93.720,6.280,11.20,224,0.875,bicubic +tf_inception_v3,77.862,22.138,93.640,6.360,23.83,299,0.875,bicubic +rexnet_100,77.858,22.142,93.870,6.130,4.80,224,0.875,bicubic +hardcorenas_e,77.794,22.206,93.694,6.306,8.07,224,0.875,bilinear +efficientnet_b0,77.698,22.302,93.532,6.468,5.29,224,0.875,bicubic +legacy_seresnet50,77.630,22.370,93.748,6.252,28.09,224,0.875,bilinear +tv_resnext50_32x4d,77.620,22.380,93.696,6.304,25.03,224,0.875,bilinear +seresnext26d_32x4d,77.602,22.398,93.608,6.392,16.81,224,0.875,bicubic +repvgg_b1g4,77.594,22.406,93.826,6.174,39.97,224,0.875,bilinear +adv_inception_v3,77.582,22.418,93.736,6.264,23.83,299,0.875,bicubic +gluon_resnet50_v1b,77.580,22.420,93.716,6.284,25.56,224,0.875,bicubic +res2net50_48w_2s,77.522,22.478,93.554,6.446,25.29,224,0.875,bilinear +coat_lite_tiny,77.512,22.488,93.916,6.084,5.72,224,0.900,bicubic +tf_efficientnet_lite2,77.468,22.532,93.754,6.246,6.09,260,0.890,bicubic +inception_v3,77.438,22.562,93.474,6.526,23.83,299,0.875,bicubic +hardcorenas_d,77.432,22.568,93.484,6.516,7.50,224,0.875,bilinear +tv_resnet101,77.374,22.626,93.540,6.460,44.55,224,0.875,bilinear +densenet161,77.358,22.642,93.638,6.362,28.68,224,0.875,bicubic +tf_efficientnet_cc_b0_4e,77.306,22.694,93.334,6.666,13.31,224,0.875,bicubic +densenet201,77.286,22.714,93.478,6.522,20.01,224,0.875,bicubic +mobilenetv2_120d,77.284,22.716,93.492,6.508,5.83,224,0.875,bicubic +mixnet_m,77.260,22.740,93.424,6.576,5.01,224,0.875,bicubic +selecsls42b,77.174,22.826,93.390,6.610,32.46,224,0.875,bicubic +resnet34d,77.116,22.884,93.382,6.618,21.82,224,0.875,bicubic +legacy_seresnext26_32x4d,77.104,22.896,93.316,6.684,16.79,224,0.875,bicubic +tf_efficientnet_b0_ap,77.086,22.914,93.256,6.744,5.29,224,0.875,bicubic +hardcorenas_c,77.054,22.946,93.158,6.842,5.52,224,0.875,bilinear +dla60,77.032,22.968,93.318,6.682,22.04,224,0.875,bilinear +regnetx_016,76.950,23.050,93.420,6.580,9.19,224,0.875,bicubic +tf_mixnet_m,76.942,23.058,93.152,6.848,5.01,224,0.875,bicubic +gernet_s,76.916,23.084,93.132,6.868,8.17,224,0.875,bilinear +skresnet34,76.912,23.088,93.322,6.678,22.28,224,0.875,bicubic +tf_efficientnet_b0,76.848,23.152,93.228,6.772,5.29,224,0.875,bicubic +ese_vovnet19b_dw,76.798,23.202,93.268,6.732,6.54,224,0.875,bicubic +hrnet_w18,76.758,23.242,93.444,6.556,21.30,224,0.875,bilinear +resnet26d,76.696,23.304,93.150,6.850,16.01,224,0.875,bicubic +resmlp_12_224,76.654,23.346,93.180,6.820,15.35,224,0.875,bicubic +tf_efficientnet_lite1,76.642,23.358,93.226,6.774,5.42,240,0.882,bicubic +mixer_b16_224,76.602,23.398,92.228,7.772,59.88,224,0.875,bicubic +tf_efficientnet_es,76.594,23.406,93.202,6.798,5.44,224,0.875,bicubic +densenetblur121d,76.588,23.412,93.192,6.808,8.00,224,0.875,bicubic +hardcorenas_b,76.538,23.462,92.754,7.246,5.18,224,0.875,bilinear +levit_128s,76.530,23.470,92.866,7.134,7.78,224,0.900,bicubic +mobilenetv2_140,76.516,23.484,92.996,7.004,6.11,224,0.875,bicubic +repvgg_a2,76.460,23.540,93.004,6.996,28.21,224,0.875,bilinear +dpn68,76.318,23.682,92.978,7.022,12.61,224,0.875,bicubic +regnety_008,76.316,23.684,93.066,6.934,6.26,224,0.875,bicubic +tv_resnet50,76.138,23.862,92.864,7.136,25.56,224,0.875,bilinear +mixnet_s,75.992,24.008,92.796,7.204,4.13,224,0.875,bicubic +vit_small_patch32_224,75.990,24.010,93.272,6.728,22.88,224,0.900,bicubic +vit_tiny_r_s16_p8_384,75.952,24.048,93.260,6.740,6.36,384,1.000,bicubic +hardcorenas_a,75.916,24.084,92.514,7.486,5.26,224,0.875,bilinear +densenet169,75.906,24.094,93.026,6.974,14.15,224,0.875,bicubic +mobilenetv3_large_100,75.766,24.234,92.542,7.458,5.48,224,0.875,bicubic +tf_mixnet_s,75.650,24.350,92.628,7.372,4.13,224,0.875,bicubic +mobilenetv3_rw,75.634,24.366,92.708,7.292,5.48,224,0.875,bicubic +densenet121,75.578,24.422,92.652,7.348,7.98,224,0.875,bicubic +tf_mobilenetv3_large_100,75.518,24.482,92.606,7.394,5.48,224,0.875,bilinear +resnest14d,75.506,24.494,92.518,7.482,10.61,224,0.875,bilinear +efficientnet_lite0,75.484,24.516,92.510,7.490,4.65,224,0.875,bicubic +vit_tiny_patch16_224,75.454,24.546,92.848,7.152,5.72,224,0.900,bicubic +semnasnet_100,75.448,24.552,92.604,7.396,3.89,224,0.875,bicubic +resnet26,75.292,24.708,92.570,7.430,16.00,224,0.875,bicubic +regnety_006,75.246,24.754,92.532,7.468,6.06,224,0.875,bicubic +repvgg_b0,75.152,24.848,92.418,7.582,15.82,224,0.875,bilinear +fbnetc_100,75.124,24.876,92.386,7.614,5.57,224,0.875,bilinear +hrnet_w18_small_v2,75.114,24.886,92.416,7.584,15.60,224,0.875,bilinear +resnet34,75.110,24.890,92.284,7.716,21.80,224,0.875,bilinear +regnetx_008,75.038,24.962,92.336,7.664,7.26,224,0.875,bicubic +mobilenetv2_110d,75.036,24.964,92.186,7.814,4.52,224,0.875,bicubic +efficientnet_es_pruned,75.000,25.000,92.448,7.552,5.44,224,0.875,bicubic +tf_efficientnet_lite0,74.830,25.170,92.176,7.824,4.65,224,0.875,bicubic +legacy_seresnet34,74.808,25.192,92.124,7.876,21.96,224,0.875,bilinear +tv_densenet121,74.738,25.262,92.150,7.850,7.98,224,0.875,bicubic +mnasnet_100,74.658,25.342,92.114,7.886,4.38,224,0.875,bicubic +dla34,74.630,25.370,92.078,7.922,15.74,224,0.875,bilinear +gluon_resnet34_v1b,74.588,25.412,91.990,8.010,21.80,224,0.875,bicubic +pit_ti_distilled_224,74.530,25.470,92.096,7.904,5.10,224,0.900,bicubic +deit_tiny_distilled_patch16_224,74.510,25.490,91.890,8.110,5.91,224,0.900,bicubic +vgg19_bn,74.214,25.786,91.842,8.158,143.68,224,0.875,bilinear +spnasnet_100,74.084,25.916,91.818,8.182,4.42,224,0.875,bilinear +regnety_004,74.034,25.966,91.752,8.248,4.34,224,0.875,bicubic +ghostnet_100,73.978,26.022,91.456,8.544,5.18,224,0.875,bilinear +regnetx_006,73.852,26.148,91.672,8.328,6.20,224,0.875,bicubic +tf_mobilenetv3_large_075,73.438,26.562,91.350,8.650,3.99,224,0.875,bilinear +vgg16_bn,73.350,26.650,91.506,8.494,138.37,224,0.875,bilinear +tv_resnet34,73.312,26.688,91.426,8.574,21.80,224,0.875,bilinear +swsl_resnet18,73.276,26.724,91.734,8.266,11.69,224,0.875,bilinear +convit_tiny,73.116,26.884,91.714,8.286,5.71,224,0.875,bicubic +skresnet18,73.038,26.962,91.168,8.832,11.96,224,0.875,bicubic +mobilenetv2_100,72.970,27.030,91.016,8.984,3.50,224,0.875,bicubic +pit_ti_224,72.912,27.088,91.402,8.598,4.85,224,0.900,bicubic +ssl_resnet18,72.610,27.390,91.416,8.584,11.69,224,0.875,bilinear +regnetx_004,72.396,27.604,90.830,9.170,5.16,224,0.875,bicubic +vgg19,72.368,27.632,90.872,9.128,143.67,224,0.875,bilinear +hrnet_w18_small,72.342,27.658,90.678,9.322,13.19,224,0.875,bilinear +resnet18d,72.260,27.740,90.696,9.304,11.71,224,0.875,bicubic +tf_mobilenetv3_large_minimal_100,72.248,27.752,90.630,9.370,3.92,224,0.875,bilinear +deit_tiny_patch16_224,72.168,27.832,91.118,8.882,5.72,224,0.900,bicubic +mixer_l16_224,72.058,27.942,87.668,12.332,208.20,224,0.875,bicubic +vit_tiny_r_s16_p8_224,71.788,28.212,90.828,9.172,6.34,224,0.900,bicubic +legacy_seresnet18,71.742,28.258,90.334,9.666,11.78,224,0.875,bicubic +vgg13_bn,71.594,28.406,90.376,9.624,133.05,224,0.875,bilinear +vgg16,71.594,28.406,90.382,9.618,138.36,224,0.875,bilinear +gluon_resnet18_v1b,70.836,29.164,89.762,10.238,11.69,224,0.875,bicubic +vgg11_bn,70.360,29.640,89.802,10.198,132.87,224,0.875,bilinear +regnety_002,70.252,29.748,89.540,10.460,3.16,224,0.875,bicubic +vgg13,69.926,30.074,89.246,10.754,133.05,224,0.875,bilinear +resnet18,69.748,30.252,89.078,10.922,11.69,224,0.875,bilinear +vgg11,69.024,30.976,88.628,11.372,132.86,224,0.875,bilinear +regnetx_002,68.762,31.238,88.556,11.444,2.68,224,0.875,bicubic +tf_mobilenetv3_small_100,67.922,32.078,87.664,12.336,2.54,224,0.875,bilinear +dla60x_c,67.892,32.108,88.426,11.574,1.32,224,0.875,bilinear +dla46x_c,65.970,34.030,86.980,13.020,1.07,224,0.875,bilinear +tf_mobilenetv3_small_075,65.716,34.284,86.130,13.870,2.04,224,0.875,bilinear +dla46_c,64.866,35.134,86.292,13.708,1.30,224,0.875,bilinear +tf_mobilenetv3_small_minimal_100,62.906,37.094,84.230,15.770,2.04,224,0.875,bilinear diff --git a/PyTorch/contrib/cv/classification/convmixer/results/results-imagenetv2-matched-frequency.csv b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenetv2-matched-frequency.csv new file mode 100644 index 0000000000..2edc6816f5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/results-imagenetv2-matched-frequency.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation,top1_diff,top5_diff,rank_diff +tf_efficientnet_l2_ns_475,80.460,19.540,95.730,4.270,480.31,475,0.936,bicubic,-7.774,-2.816,+1 +tf_efficientnet_l2_ns,80.250,19.750,95.840,4.160,480.31,800,0.960,bicubic,-8.102,-2.810,-1 +tf_efficientnet_b7_ns,78.510,21.490,94.380,5.620,66.35,600,0.949,bicubic,-8.330,-3.714,+2 +vit_large_patch16_384,77.940,22.060,94.450,5.550,304.72,384,1.000,bicubic,-9.140,-3.850,0 +tf_efficientnet_b6_ns,77.280,22.720,93.890,6.110,43.04,528,0.942,bicubic,-9.172,-3.992,+2 +vit_large_r50_s32_384,77.060,22.940,93.720,6.280,329.09,384,1.000,bicubic,-9.124,-4.198,+5 +swin_large_patch4_window12_384,77.040,22.960,93.750,6.250,196.74,384,1.000,bicubic,-10.108,-4.484,-4 +tf_efficientnetv2_l_in21ft1k,76.940,23.060,93.950,6.050,118.52,480,1.000,bicubic,-9.364,-4.028,+2 +cait_m48_448,76.870,23.130,93.370,6.630,356.46,448,1.000,bicubic,-9.614,-4.384,-3 +ig_resnext101_32x48d,76.870,23.130,93.310,6.690,828.41,224,0.875,bilinear,-8.558,-4.262,+13 +ig_resnext101_32x32d,76.840,23.160,93.200,6.800,468.53,224,0.875,bilinear,-8.254,-4.238,+19 +tf_efficientnet_b5_ns,76.810,23.190,93.580,6.420,30.39,456,0.934,bicubic,-9.278,-4.172,+1 +vit_base_patch16_384,76.500,23.500,93.750,6.250,86.86,384,1.000,bicubic,-9.506,-4.250,+2 +cait_m36_384,76.320,23.680,93.050,6.950,271.22,384,1.000,bicubic,-9.734,-4.680,0 +vit_large_patch16_224,76.290,23.710,93.600,6.400,304.33,224,0.900,bicubic,-9.552,-4.224,+1 +swin_base_patch4_window12_384,76.280,23.720,93.320,6.680,87.90,384,1.000,bicubic,-10.152,-4.738,-8 +tf_efficientnetv2_l,76.280,23.720,92.970,7.030,118.52,480,1.000,bicubic,-9.210,-4.402,+4 +swin_large_patch4_window7_224,76.270,23.730,93.420,6.580,196.53,224,0.900,bicubic,-10.050,-4.476,-9 +cait_s36_384,76.210,23.790,92.970,7.030,68.37,384,1.000,bicubic,-9.250,-4.510,+3 +dm_nfnet_f6,76.130,23.870,93.110,6.890,438.36,576,0.956,bicubic,-10.014,-4.620,-8 +tf_efficientnet_b7_ap,76.090,23.910,92.970,7.030,66.35,600,0.949,bicubic,-9.030,-4.282,+8 +tf_efficientnet_b8_ap,76.090,23.910,92.730,7.270,87.41,672,0.954,bicubic,-9.280,-4.564,+4 +tf_efficientnetv2_m_in21ft1k,75.920,24.080,93.280,6.720,54.14,480,1.000,bicubic,-9.668,-4.472,-4 +dm_nfnet_f4,75.850,24.150,92.950,7.050,316.07,512,0.951,bicubic,-9.864,-4.570,-6 +ig_resnext101_32x16d,75.720,24.280,92.910,7.090,194.03,224,0.875,bilinear,-8.450,-4.286,+31 +tf_efficientnet_b4_ns,75.670,24.330,93.050,6.950,19.34,380,0.922,bicubic,-9.492,-4.420,+2 +vit_base_r50_s16_384,75.590,24.410,92.790,7.210,98.95,384,1.000,bicubic,-9.382,-4.498,+9 +deit_base_distilled_patch16_384,75.550,24.450,92.500,7.500,87.63,384,1.000,bicubic,-9.872,-4.832,-4 +tf_efficientnetv2_m,75.520,24.480,92.620,7.380,54.14,480,1.000,bicubic,-9.524,-4.658,+4 +cait_s24_384,75.480,24.520,92.600,7.400,47.06,384,1.000,bicubic,-9.566,-4.746,+2 +swsl_resnext101_32x8d,75.430,24.570,92.760,7.240,88.79,224,0.875,bilinear,-8.854,-4.416,+22 +tf_efficientnet_b6_ap,75.380,24.620,92.440,7.560,43.04,528,0.942,bicubic,-9.408,-4.698,+8 +dm_nfnet_f3,75.210,24.790,92.940,7.060,254.92,416,0.940,bicubic,-10.312,-4.522,-13 +efficientnetv2_rw_m,75.170,24.830,92.570,7.430,53.24,416,1.000,bicubic,-9.638,-4.578,+5 +ecaresnet269d,75.120,24.880,92.840,7.160,102.09,352,1.000,bicubic,-9.856,-4.386,0 +dm_nfnet_f5,75.000,25.000,92.600,7.400,377.21,544,0.954,bicubic,-10.814,-4.888,-19 +tf_efficientnet_b8,74.940,25.060,92.310,7.690,87.41,672,0.954,bicubic,-10.430,-5.080,-12 +eca_nfnet_l2,74.830,25.170,92.650,7.350,56.72,384,1.000,bicubic,-9.868,-4.614,+4 +tf_efficientnet_b7,74.720,25.280,92.220,7.780,66.35,600,0.949,bicubic,-10.216,-4.984,-2 +dm_nfnet_f2,74.620,25.380,92.260,7.740,193.78,352,0.920,bicubic,-10.444,-4.980,-9 +tf_efficientnet_b5_ap,74.600,25.400,91.990,8.010,30.39,456,0.934,bicubic,-9.652,-4.984,+14 +dm_nfnet_f1,74.570,25.430,92.260,7.740,132.63,320,0.910,bicubic,-10.056,-4.840,+1 +swin_base_patch4_window7_224,74.570,25.430,92.560,7.440,87.77,224,0.900,bicubic,-10.682,-5.002,-16 +seresnet152d,74.510,25.490,92.080,7.920,66.84,320,1.000,bicubic,-9.852,-4.960,+7 +resnest200e,74.480,25.520,91.860,8.140,70.20,320,0.909,bicubic,-9.352,-5.034,+23 +tf_efficientnetv2_s_in21ft1k,74.450,25.550,92.510,7.490,21.46,384,1.000,bicubic,-9.852,-4.742,+6 +efficientnetv2_rw_s,74.170,25.830,91.710,8.290,23.94,384,1.000,bicubic,-9.638,-5.014,+23 +resnest269e,74.170,25.830,91.950,8.050,110.93,416,0.928,bicubic,-10.348,-5.036,-3 +cait_xs24_384,74.160,25.840,91.910,8.090,26.67,384,1.000,bicubic,-9.902,-4.978,+11 +pit_b_distilled_224,74.160,25.840,91.680,8.320,74.79,224,0.900,bicubic,-9.984,-5.176,+7 +swsl_resnext101_32x4d,74.140,25.860,91.990,8.010,44.18,224,0.875,bilinear,-9.090,-4.770,+33 +eca_nfnet_l1,74.120,25.880,92.070,7.930,41.41,320,1.000,bicubic,-9.890,-4.958,+12 +vit_large_r50_s32_224,74.100,25.900,92.390,7.610,328.99,224,0.900,bicubic,-10.334,-4.774,-4 +vit_base_patch16_224_miil,74.040,25.960,91.700,8.300,86.54,224,0.875,bilinear,-10.228,-5.102,0 +swsl_resnext101_32x16d,74.020,25.980,92.160,7.840,194.03,224,0.875,bilinear,-9.326,-4.686,+27 +resnetv2_152x4_bitm,74.010,25.990,92.340,7.660,936.53,480,1.000,bilinear,-10.906,-5.102,-18 +tf_efficientnetv2_s,74.000,26.000,91.530,8.470,21.46,384,1.000,bicubic,-9.894,-5.168,+9 +vit_base_patch16_224,74.000,26.000,92.470,7.530,86.57,224,0.900,bicubic,-10.532,-4.824,-14 +resnetrs420,73.920,26.080,91.760,8.240,191.89,416,1.000,bicubic,-11.088,-5.364,-25 +resnetv2_152x2_bitm,73.920,26.080,92.670,7.330,236.34,448,1.000,bilinear,-10.590,-4.762,-14 +tf_efficientnet_b6,73.900,26.100,91.750,8.250,43.04,528,0.942,bicubic,-10.210,-5.136,-3 +tf_efficientnet_b3_ns,73.890,26.110,91.870,8.130,12.23,300,0.904,bicubic,-10.158,-5.040,-1 +resmlp_big_24_224_in22ft1k,73.880,26.120,91.750,8.250,129.14,224,0.875,bicubic,-10.514,-5.370,-13 +vit_small_r26_s32_384,73.800,26.200,92.300,7.700,36.47,384,1.000,bicubic,-10.246,-5.028,-2 +resnetrs270,73.710,26.290,91.580,8.420,129.86,352,1.000,bicubic,-10.724,-5.390,-17 +resnet200d,73.680,26.320,91.570,8.430,64.69,320,1.000,bicubic,-10.282,-5.254,-1 +resnetv2_101x3_bitm,73.670,26.330,92.470,7.530,387.93,448,1.000,bilinear,-10.770,-4.912,-20 +ig_resnext101_32x8d,73.650,26.350,92.190,7.810,88.79,224,0.875,bilinear,-9.038,-4.446,+32 +tf_efficientnet_b5,73.550,26.450,91.460,8.540,30.39,456,0.934,bicubic,-10.262,-5.288,0 +resnet152d,73.520,26.480,91.230,8.770,60.21,320,1.000,bicubic,-10.160,-5.508,+4 +resnetrs200,73.500,26.500,91.250,8.750,93.21,320,1.000,bicubic,-10.566,-5.624,-12 +resnetrs350,73.400,26.600,91.310,8.690,163.96,384,1.000,bicubic,-11.320,-5.678,-31 +twins_svt_large,73.390,26.610,90.910,9.090,99.27,224,0.900,bicubic,-10.288,-5.684,+2 +regnety_160,73.360,26.640,91.690,8.310,83.59,288,1.000,bicubic,-10.326,-5.086,-1 +efficientnet_b4,73.320,26.680,91.280,8.720,19.34,384,1.000,bicubic,-10.108,-5.316,+3 +resmlp_big_24_distilled_224,73.290,26.710,91.160,8.840,129.14,224,0.875,bicubic,-10.300,-5.488,0 +vit_small_patch16_384,73.290,26.710,91.990,8.010,22.20,384,1.000,bicubic,-10.512,-5.112,-6 +deit_base_distilled_patch16_224,73.240,26.760,91.000,9.000,87.34,224,0.900,bicubic,-10.148,-5.488,+1 +resnetrs152,73.200,26.800,91.260,8.740,86.62,320,1.000,bicubic,-10.512,-5.354,-7 +vit_base_patch32_384,73.130,26.870,91.240,8.760,88.30,384,1.000,bicubic,-10.220,-5.596,+1 +cait_s24_224,73.070,26.930,91.130,8.870,46.92,224,1.000,bicubic,-10.382,-5.434,-4 +resnetv2_152x2_bit_teacher_384,72.890,27.110,91.550,8.450,236.34,384,1.000,bicubic,-10.954,-5.568,-15 +tf_efficientnet_b4_ap,72.890,27.110,90.980,9.020,19.34,380,0.922,bicubic,-10.358,-5.412,0 +dm_nfnet_f0,72.880,27.120,91.080,8.920,71.49,256,0.900,bicubic,-10.506,-5.492,-4 +regnety_032,72.770,27.230,90.950,9.050,19.44,288,1.000,bicubic,-9.954,-5.474,+13 +nfnet_l0,72.610,27.390,91.010,8.990,35.07,288,1.000,bicubic,-10.140,-5.506,+11 +pnasnet5large,72.610,27.390,90.510,9.490,86.06,331,0.911,bicubic,-10.172,-5.530,+9 +twins_pcpvt_large,72.580,27.420,90.700,9.300,60.99,224,0.900,bicubic,-10.560,-5.898,-2 +resnest101e,72.570,27.430,90.820,9.180,48.28,256,0.875,bilinear,-10.320,-5.500,+4 +swsl_resnext50_32x4d,72.560,27.440,90.870,9.130,25.03,224,0.875,bilinear,-9.622,-5.360,+26 +twins_svt_base,72.550,27.450,90.460,9.540,56.07,224,0.900,bicubic,-10.586,-5.958,-5 +tresnet_xl_448,72.550,27.450,90.310,9.690,78.44,448,0.875,bilinear,-10.500,-5.864,-1 +deit_base_patch16_384,72.530,27.470,90.250,9.750,86.86,384,1.000,bicubic,-10.576,-6.122,-5 +resnetv2_50x3_bitm,72.530,27.470,91.760,8.240,217.32,448,1.000,bilinear,-11.484,-5.364,-31 +resnet101d,72.410,27.590,90.650,9.350,44.57,320,1.000,bicubic,-10.612,-5.796,-4 +tf_efficientnet_b4,72.290,27.710,90.590,9.410,19.34,380,0.922,bicubic,-10.732,-5.710,-4 +tf_efficientnet_b2_ns,72.280,27.720,91.090,8.910,9.11,260,0.890,bicubic,-10.100,-5.158,+8 +tresnet_m,72.270,27.730,90.240,9.760,31.39,224,0.875,bilinear,-10.810,-5.878,-9 +resnetv2_50x1_bit_distilled,72.260,27.740,91.010,8.990,25.55,224,0.875,bicubic,-10.558,-5.512,-4 +nasnetalarge,72.230,27.770,90.470,9.530,88.75,331,0.911,bicubic,-10.390,-5.576,+1 +cait_xxs36_384,72.190,27.810,90.840,9.160,17.37,384,1.000,bicubic,-10.004,-5.308,+14 +twins_pcpvt_base,72.180,27.820,90.510,9.490,43.83,224,0.900,bicubic,-10.528,-5.836,-3 +eca_nfnet_l0,71.840,28.160,91.110,8.890,24.14,288,1.000,bicubic,-10.740,-5.380,0 +swin_small_patch4_window7_224,71.740,28.260,90.240,9.760,49.61,224,0.900,bicubic,-11.472,-6.082,-19 +swsl_resnet50,71.700,28.300,90.500,9.500,25.56,224,0.875,bilinear,-9.466,-5.472,+42 +pit_b_224,71.700,28.300,89.250,10.750,73.76,224,0.900,bicubic,-10.746,-6.460,-1 +tresnet_xl,71.660,28.340,89.630,10.370,78.44,224,0.875,bilinear,-10.394,-6.306,+12 +convit_base,71.600,28.400,90.150,9.850,86.54,224,0.875,bicubic,-10.690,-5.788,+3 +tresnet_l_448,71.600,28.400,90.050,9.950,55.99,448,0.875,bilinear,-10.668,-5.926,+4 +ssl_resnext101_32x8d,71.500,28.500,90.460,9.540,88.79,224,0.875,bilinear,-10.116,-5.578,+20 +ecaresnet101d,71.490,28.510,90.330,9.670,44.57,224,0.875,bicubic,-10.682,-5.716,+6 +efficientnet_b3,71.480,28.520,90.060,9.940,12.23,320,1.000,bicubic,-10.762,-6.054,+2 +resnet51q,71.430,28.570,90.180,9.820,35.70,288,1.000,bilinear,-10.930,-6.000,-7 +ssl_resnext101_32x16d,71.410,28.590,90.560,9.440,194.03,224,0.875,bilinear,-10.434,-5.536,+10 +pit_s_distilled_224,71.380,28.620,89.780,10.220,24.04,224,0.900,bicubic,-10.616,-6.018,+6 +mixer_b16_224_miil,71.300,28.700,89.650,10.350,59.88,224,0.875,bilinear,-11.008,-6.066,-6 +resnetv2_152x2_bit_teacher,71.290,28.710,90.430,9.570,236.34,224,0.875,bicubic,-11.572,-6.138,-23 +ecaresnet50t,71.280,28.720,90.420,9.580,25.57,320,0.950,bicubic,-11.066,-5.718,-11 +deit_base_patch16_224,71.170,28.830,89.200,10.800,86.57,224,0.900,bicubic,-10.828,-6.534,+1 +resnetv2_101x1_bitm,71.010,28.990,91.090,8.910,44.54,448,1.000,bilinear,-11.322,-5.428,-12 +visformer_small,71.010,28.990,89.460,10.540,40.22,224,0.900,bicubic,-11.096,-6.412,-3 +tresnet_m_448,70.990,29.010,88.680,11.320,31.39,448,0.875,bilinear,-10.724,-6.892,+4 +resnest50d_4s2x40d,70.950,29.050,89.710,10.290,30.42,224,0.875,bicubic,-10.158,-5.848,+27 +wide_resnet50_2,70.950,29.050,89.230,10.770,68.88,224,0.875,bicubic,-10.506,-6.302,+12 +tnt_s_patch16_224,70.930,29.070,89.600,10.400,23.76,224,0.900,bicubic,-10.588,-6.148,+7 +vit_small_patch16_224,70.930,29.070,90.140,9.860,22.05,224,0.900,bicubic,-10.472,-5.994,+12 +tf_efficientnet_b3_ap,70.920,29.080,89.430,10.570,12.23,300,0.904,bicubic,-10.902,-6.194,-2 +tf_efficientnet_b1_ns,70.870,29.130,90.120,9.880,7.79,240,0.882,bicubic,-10.518,-5.618,+11 +vit_large_patch32_384,70.860,29.140,90.570,9.430,306.63,384,1.000,bicubic,-10.646,-5.522,+5 +rexnet_200,70.840,29.160,89.700,10.300,16.37,224,0.875,bicubic,-10.792,-5.968,-2 +tresnet_l,70.840,29.160,89.630,10.370,55.99,224,0.875,bilinear,-10.650,-5.994,+3 +resnetrs101,70.840,29.160,89.830,10.170,63.62,288,0.940,bicubic,-11.448,-6.178,-18 +tf_efficientnetv2_b3,70.830,29.170,89.500,10.500,14.36,300,0.904,bicubic,-11.140,-6.282,-11 +coat_lite_small,70.800,29.200,89.570,10.430,19.84,224,0.900,bicubic,-11.508,-6.280,-25 +levit_384,70.750,29.250,89.300,10.700,39.13,224,0.900,bicubic,-11.836,-6.716,-33 +tf_efficientnet_b3,70.640,29.360,89.440,10.560,12.23,300,0.904,bicubic,-10.996,-6.278,-8 +cait_xxs24_384,70.600,29.400,89.720,10.280,12.03,384,1.000,bicubic,-10.366,-5.926,+20 +gluon_senet154,70.600,29.400,88.920,11.080,115.09,224,0.875,bicubic,-10.634,-6.428,+8 +convit_small,70.580,29.420,89.580,10.420,27.78,224,0.875,bicubic,-10.846,-6.164,-2 +twins_pcpvt_small,70.550,29.450,89.070,10.930,24.11,224,0.900,bicubic,-10.538,-6.572,+12 +ssl_resnext101_32x4d,70.530,29.470,89.760,10.240,44.18,224,0.875,bilinear,-10.394,-5.968,+17 +deit_small_distilled_patch16_224,70.520,29.480,89.470,10.530,22.44,224,0.900,bicubic,-10.680,-5.908,+5 +vit_small_r26_s32_224,70.520,29.480,90.110,9.890,36.43,224,0.900,bicubic,-11.338,-5.912,-20 +legacy_senet154,70.500,29.500,89.010,10.990,115.09,224,0.875,bilinear,-10.810,-6.486,-1 +twins_svt_small,70.440,29.560,89.360,10.640,24.06,224,0.900,bicubic,-11.242,-6.310,-18 +gluon_seresnext101_64x4d,70.430,29.570,89.350,10.650,88.23,224,0.875,bicubic,-10.464,-5.958,+14 +tf_efficientnet_lite4,70.430,29.570,89.110,10.890,13.01,380,0.920,bilinear,-11.106,-6.558,-16 +resnest50d,70.410,29.590,88.760,11.240,27.48,224,0.875,bilinear,-10.564,-6.618,+8 +resnest50d_1s4x24d,70.400,29.600,89.220,10.780,25.68,224,0.875,bicubic,-10.588,-6.102,+6 +seresnext50_32x4d,70.400,29.600,89.110,10.890,27.56,224,0.875,bicubic,-10.866,-6.510,-5 +gernet_l,70.350,29.650,88.980,11.020,31.08,256,0.875,bilinear,-11.004,-6.556,-10 +gluon_resnet152_v1s,70.290,29.710,88.850,11.150,60.32,224,0.875,bicubic,-10.726,-6.562,+2 +repvgg_b3,70.250,29.750,88.730,11.270,123.09,224,0.875,bilinear,-10.242,-6.530,+19 +coat_mini,70.220,29.780,89.440,10.560,10.34,224,0.900,bicubic,-11.048,-5.952,-10 +ecaresnet101d_pruned,70.130,29.870,89.590,10.410,24.88,224,0.875,bicubic,-10.688,-6.038,+7 +efficientnet_el,70.120,29.880,89.290,10.710,10.59,300,0.904,bicubic,-11.196,-6.236,-14 +inception_resnet_v2,70.120,29.880,88.700,11.300,55.84,299,0.897,bicubic,-10.338,-6.606,+20 +resmlp_36_distilled_224,70.090,29.910,89.100,10.900,44.69,224,0.875,bicubic,-11.070,-6.388,-9 +gluon_seresnext101_32x4d,70.010,29.990,88.900,11.100,48.96,224,0.875,bicubic,-10.894,-6.394,0 +regnety_320,70.000,30.000,88.890,11.110,145.05,224,0.875,bicubic,-10.812,-6.354,+3 +levit_256,69.970,30.030,89.250,10.750,18.89,224,0.900,bicubic,-11.540,-6.240,-28 +gluon_resnet152_v1d,69.960,30.040,88.490,11.510,60.21,224,0.875,bicubic,-10.514,-6.716,+13 +pit_s_224,69.890,30.110,88.930,11.070,23.46,224,0.900,bicubic,-11.204,-6.402,-12 +ecaresnet50d,69.840,30.160,89.400,10.600,25.58,224,0.875,bicubic,-10.752,-5.920,+6 +ssl_resnext50_32x4d,69.710,30.290,89.440,10.560,25.03,224,0.875,bilinear,-10.608,-5.966,+17 +gluon_resnext101_64x4d,69.680,30.320,88.270,11.730,83.46,224,0.875,bicubic,-10.924,-6.718,+3 +resmlp_24_distilled_224,69.680,30.320,89.050,10.950,30.02,224,0.875,bicubic,-11.086,-6.168,-3 +efficientnet_b3_pruned,69.580,30.420,88.980,11.020,9.86,300,0.904,bicubic,-11.278,-6.262,-7 +nf_resnet50,69.540,30.460,88.730,11.270,25.56,288,0.940,bicubic,-11.120,-6.606,-2 +gernet_m,69.530,30.470,88.690,11.310,21.14,224,0.875,bilinear,-11.202,-6.494,-5 +repvgg_b3g4,69.520,30.480,88.450,11.550,83.83,224,0.875,bilinear,-10.692,-6.660,+18 +ens_adv_inception_resnet_v2,69.520,30.480,88.510,11.490,55.84,299,0.897,bicubic,-10.462,-6.428,+28 +efficientnet_el_pruned,69.520,30.480,88.930,11.070,10.59,300,0.904,bicubic,-10.780,-6.098,+15 +efficientnet_b2,69.500,30.500,88.680,11.320,9.11,288,1.000,bicubic,-11.112,-6.638,-6 +rexnet_150,69.470,30.530,88.980,11.020,9.73,224,0.875,bicubic,-10.840,-6.186,+8 +swin_tiny_patch4_window7_224,69.450,30.550,89.020,10.980,28.29,224,0.900,bicubic,-11.928,-6.520,-36 +regnetx_320,69.440,30.560,88.270,11.730,107.81,224,0.875,bicubic,-10.806,-6.756,+12 +vit_base_patch32_224,69.410,30.590,89.420,10.580,88.22,224,0.900,bicubic,-11.314,-6.148,-12 +inception_v4,69.360,30.640,88.780,11.220,42.68,299,0.875,bicubic,-10.808,-6.188,+14 +legacy_seresnext101_32x4d,69.360,30.640,88.070,11.930,48.96,224,0.875,bilinear,-10.868,-6.948,+10 +ecaresnetlight,69.340,30.660,89.220,10.780,30.16,224,0.875,bicubic,-11.122,-6.030,-5 +resnet50d,69.330,30.670,88.220,11.780,25.58,224,0.875,bicubic,-11.200,-6.940,-11 +xception71,69.320,30.680,88.260,11.740,42.34,299,0.903,bicubic,-10.554,-6.662,+23 +vit_small_patch32_384,69.290,30.710,89.820,10.180,22.92,384,1.000,bicubic,-11.190,-5.778,-11 +gluon_xception65,69.160,30.840,88.090,11.910,39.92,299,0.903,bicubic,-10.556,-6.770,+33 +gluon_resnet152_v1c,69.140,30.860,87.870,12.130,60.21,224,0.875,bicubic,-10.770,-6.970,+17 +mixnet_xl,69.100,30.900,88.310,11.690,11.90,224,0.875,bicubic,-11.376,-6.626,-13 +tf_efficientnetv2_b2,69.090,30.910,88.220,11.780,10.10,260,0.890,bicubic,-11.118,-6.822,+4 +gluon_resnet101_v1d,69.010,30.990,88.100,11.900,44.57,224,0.875,bicubic,-11.404,-6.914,-11 +repvgg_b2g4,69.000,31.000,88.360,11.640,61.76,224,0.875,bilinear,-10.366,-6.328,+41 +seresnet50,68.980,31.020,88.710,11.290,28.09,224,0.875,bicubic,-11.294,-6.360,-4 +xception65,68.980,31.020,88.480,11.520,39.92,299,0.903,bicubic,-10.572,-6.174,+32 +gluon_resnext101_32x4d,68.960,31.040,88.360,11.640,44.18,224,0.875,bicubic,-11.374,-6.566,-12 +tf_efficientnet_b2_ap,68.920,31.080,88.350,11.650,9.11,260,0.890,bicubic,-11.380,-6.868,-9 +cspdarknet53,68.890,31.110,88.600,11.400,27.64,256,0.887,bilinear,-11.168,-6.484,+2 +regnety_120,68.850,31.150,88.330,11.670,51.82,224,0.875,bicubic,-11.516,-6.796,-17 +gluon_resnet152_v1b,68.820,31.180,87.710,12.290,60.19,224,0.875,bicubic,-10.866,-7.026,+22 +dpn131,68.770,31.230,87.470,12.530,79.25,224,0.875,bicubic,-11.052,-7.240,+13 +cspresnext50,68.760,31.240,87.950,12.050,20.57,224,0.875,bilinear,-11.280,-6.994,-1 +tf_efficientnet_b2,68.750,31.250,87.990,12.010,9.11,260,0.890,bicubic,-11.336,-6.918,-4 +resnext50d_32x4d,68.740,31.260,88.300,11.700,25.05,224,0.875,bicubic,-10.936,-6.566,+19 +deit_small_patch16_224,68.720,31.280,88.200,11.800,22.05,224,0.900,bicubic,-11.136,-6.852,+5 +gluon_resnet101_v1s,68.710,31.290,87.910,12.090,44.67,224,0.875,bicubic,-11.592,-7.250,-19 +regnety_080,68.700,31.300,87.970,12.030,39.18,224,0.875,bicubic,-11.176,-6.860,+1 +dpn107,68.690,31.310,88.130,11.870,86.92,224,0.875,bicubic,-11.466,-6.780,-11 +gluon_seresnext50_32x4d,68.670,31.330,88.310,11.690,27.56,224,0.875,bicubic,-11.248,-6.512,-5 +hrnet_w64,68.640,31.360,88.050,11.950,128.06,224,0.875,bilinear,-10.834,-6.602,+20 +resnext50_32x4d,68.640,31.360,87.570,12.430,25.03,224,0.875,bicubic,-11.128,-7.028,+6 +dpn98,68.590,31.410,87.680,12.320,61.57,224,0.875,bicubic,-11.052,-6.918,+12 +regnetx_160,68.530,31.470,88.450,11.550,54.28,224,0.875,bicubic,-11.326,-6.380,-2 +cspresnet50,68.460,31.540,88.010,11.990,21.62,256,0.887,bilinear,-11.114,-6.702,+12 +rexnet_130,68.450,31.550,88.040,11.960,7.56,224,0.875,bicubic,-11.050,-6.642,+14 +tf_efficientnet_el,68.420,31.580,88.210,11.790,10.59,300,0.904,bicubic,-11.830,-6.918,-27 +ecaresnet50d_pruned,68.420,31.580,88.370,11.630,19.94,224,0.875,bicubic,-11.296,-6.510,+4 +regnety_064,68.420,31.580,88.080,11.920,30.58,224,0.875,bicubic,-11.302,-6.688,+2 +ssl_resnet50,68.410,31.590,88.560,11.440,25.56,224,0.875,bilinear,-10.812,-6.272,+26 +cait_xxs36_224,68.410,31.590,88.630,11.370,17.30,224,1.000,bicubic,-11.340,-6.236,-1 +skresnext50_32x4d,68.350,31.650,87.570,12.430,27.48,224,0.875,bicubic,-11.806,-7.072,-23 +dla102x2,68.330,31.670,87.890,12.110,41.28,224,0.875,bilinear,-11.118,-6.750,+10 +efficientnet_b2_pruned,68.320,31.680,88.100,11.900,8.31,260,0.890,bicubic,-11.596,-6.756,-18 +resmlp_big_24_224,68.320,31.680,87.520,12.480,129.14,224,0.875,bicubic,-12.708,-7.502,-68 +gluon_resnext50_32x4d,68.310,31.690,87.300,12.700,25.03,224,0.875,bicubic,-11.044,-7.126,+10 +ecaresnet26t,68.230,31.770,88.790,11.210,16.01,320,0.950,bicubic,-11.624,-6.294,-14 +tf_efficientnet_lite3,68.230,31.770,87.740,12.260,8.20,300,0.904,bilinear,-11.590,-7.174,-12 +ese_vovnet39b,68.210,31.790,88.250,11.750,24.57,224,0.875,bicubic,-11.110,-6.462,+8 +regnetx_120,68.150,31.850,87.660,12.340,46.11,224,0.875,bicubic,-11.446,-7.078,-4 +resmlp_36_224,68.080,31.920,88.190,11.810,44.69,224,0.875,bicubic,-11.690,-6.696,-14 +resnetrs50,68.030,31.970,87.710,12.290,35.69,224,0.910,bicubic,-11.862,-7.258,-24 +pit_xs_distilled_224,68.020,31.980,87.720,12.280,11.00,224,0.900,bicubic,-11.286,-6.644,+8 +dpn92,67.990,32.010,87.580,12.420,37.67,224,0.875,bicubic,-12.018,-7.256,-31 +nf_regnet_b1,67.960,32.040,88.200,11.800,10.22,288,0.900,bicubic,-11.332,-6.548,+8 +gluon_resnet50_v1d,67.940,32.060,87.130,12.870,25.58,224,0.875,bicubic,-11.134,-7.340,+19 +resnetv2_50x1_bitm,67.920,32.080,89.300,10.700,25.55,448,1.000,bilinear,-12.422,-6.384,-53 +levit_192,67.900,32.100,87.890,12.110,10.95,224,0.900,bicubic,-11.942,-6.896,-24 +tf_efficientnetv2_b1,67.890,32.110,87.800,12.200,8.14,240,0.882,bicubic,-11.572,-6.922,-7 +regnetx_080,67.880,32.120,86.990,13.010,39.57,224,0.875,bicubic,-11.314,-7.570,+12 +resnext101_32x8d,67.860,32.140,87.490,12.510,88.79,224,0.875,bilinear,-11.448,-7.028,-3 +legacy_seresnext50_32x4d,67.840,32.160,87.620,12.380,27.56,224,0.875,bilinear,-11.238,-6.816,+11 +efficientnet_em,67.840,32.160,88.120,11.880,6.90,240,0.882,bicubic,-11.412,-6.674,+4 +resmlp_24_224,67.810,32.190,87.610,12.390,30.02,224,0.875,bicubic,-11.564,-6.936,-10 +hrnet_w48,67.770,32.230,87.420,12.580,77.47,224,0.875,bilinear,-11.530,-7.092,-3 +hrnet_w44,67.740,32.260,87.560,12.440,67.06,224,0.875,bilinear,-11.156,-6.808,+15 +coat_lite_mini,67.720,32.280,87.700,12.300,11.01,224,0.900,bicubic,-11.368,-6.904,+6 +tf_efficientnet_b0_ns,67.710,32.290,88.070,11.930,5.29,224,0.875,bicubic,-10.948,-6.306,+23 +regnetx_064,67.680,32.320,87.520,12.480,26.21,224,0.875,bicubic,-11.392,-6.938,+7 +xception,67.650,32.350,87.570,12.430,22.86,299,0.897,bicubic,-11.402,-6.822,+7 +dpn68b,67.630,32.370,87.660,12.340,12.61,224,0.875,bicubic,-11.586,-6.754,-2 +dla169,67.610,32.390,87.590,12.410,53.39,224,0.875,bilinear,-11.078,-6.746,+17 +gluon_inception_v3,67.590,32.410,87.470,12.530,23.83,299,0.875,bicubic,-11.216,-6.900,+11 +gluon_resnet101_v1c,67.580,32.420,87.180,12.820,44.57,224,0.875,bicubic,-11.954,-7.398,-25 +regnety_040,67.580,32.420,87.510,12.490,20.65,224,0.875,bicubic,-11.640,-7.146,-7 +res2net50_26w_8s,67.570,32.430,87.280,12.720,48.40,224,0.875,bilinear,-11.628,-7.088,-6 +hrnet_w40,67.560,32.440,87.140,12.860,57.56,224,0.875,bilinear,-11.360,-7.330,+3 +legacy_seresnet152,67.520,32.480,87.390,12.610,66.82,224,0.875,bilinear,-11.140,-6.980,+12 +tf_efficientnet_b1_ap,67.520,32.480,87.760,12.240,7.79,240,0.882,bicubic,-11.760,-6.546,-14 +efficientnet_b1,67.470,32.530,87.510,12.490,7.79,256,1.000,bicubic,-11.324,-6.832,+5 +gluon_resnet101_v1b,67.460,32.540,87.240,12.760,44.55,224,0.875,bicubic,-11.846,-7.284,-21 +tf_efficientnet_cc_b1_8e,67.450,32.550,87.310,12.690,39.72,240,0.882,bicubic,-11.858,-7.060,-23 +res2net101_26w_4s,67.440,32.560,87.010,12.990,45.21,224,0.875,bilinear,-11.758,-7.422,-12 +resnet50,67.440,32.560,87.420,12.580,25.56,224,0.875,bicubic,-11.598,-6.970,-6 +resnetblur50,67.430,32.570,87.440,12.560,25.56,224,0.875,bicubic,-11.856,-7.198,-21 +cait_xxs24_224,67.330,32.670,87.510,12.490,11.96,224,1.000,bicubic,-11.056,-6.800,+18 +regnetx_032,67.290,32.710,87.000,13.000,15.30,224,0.875,bicubic,-10.882,-7.088,+27 +xception41,67.250,32.750,87.200,12.800,26.97,299,0.903,bicubic,-11.266,-7.078,+4 +coat_tiny,67.250,32.750,87.340,12.660,5.50,224,0.900,bicubic,-11.184,-6.698,+13 +resnest26d,67.200,32.800,87.170,12.830,17.07,224,0.875,bilinear,-11.278,-7.128,+7 +repvgg_b2,67.160,32.840,87.330,12.670,89.02,224,0.875,bilinear,-11.632,-7.084,-6 +legacy_seresnet101,67.160,32.840,87.060,12.940,49.33,224,0.875,bilinear,-11.222,-7.204,+14 +dla60x,67.100,32.900,87.190,12.810,17.35,224,0.875,bilinear,-11.146,-6.828,+16 +gluon_resnet50_v1s,67.060,32.940,86.860,13.140,25.68,224,0.875,bicubic,-11.652,-7.378,-6 +tv_resnet152,67.050,32.950,87.550,12.450,60.19,224,0.875,bilinear,-11.262,-6.488,+13 +dla60_res2net,67.020,32.980,87.160,12.840,20.85,224,0.875,bilinear,-11.444,-7.046,+2 +dla102x,67.010,32.990,86.770,13.230,26.31,224,0.875,bilinear,-11.500,-7.458,-3 +mixnet_l,66.940,33.060,86.910,13.090,7.33,224,0.875,bicubic,-12.036,-7.272,-19 +pit_xs_224,66.920,33.080,87.280,12.720,10.62,224,0.900,bicubic,-11.262,-6.888,+14 +res2net50_26w_6s,66.910,33.090,86.860,13.140,37.05,224,0.875,bilinear,-11.660,-7.264,-8 +repvgg_b1,66.900,33.100,86.780,13.220,57.42,224,0.875,bilinear,-11.466,-7.318,+5 +tf_efficientnet_b1,66.880,33.120,87.010,12.990,7.79,240,0.882,bicubic,-11.946,-7.188,-20 +efficientnet_es,66.880,33.120,86.730,13.270,5.44,224,0.875,bicubic,-11.186,-7.196,+16 +regnetx_040,66.840,33.160,86.730,13.270,22.12,224,0.875,bicubic,-11.642,-7.514,-8 +hrnet_w30,66.780,33.220,86.800,13.200,37.71,224,0.875,bilinear,-11.426,-7.422,+7 +tf_mixnet_l,66.780,33.220,86.470,13.530,7.33,224,0.875,bicubic,-11.994,-7.528,-19 +selecsls60b,66.760,33.240,86.530,13.470,32.77,224,0.875,bicubic,-11.652,-7.644,-4 +hrnet_w32,66.750,33.250,87.300,12.700,41.23,224,0.875,bilinear,-11.700,-6.886,-9 +wide_resnet101_2,66.730,33.270,87.030,12.970,126.89,224,0.875,bilinear,-12.126,-7.252,-27 +tf_efficientnetv2_b0,66.700,33.300,86.710,13.290,7.14,224,0.875,bicubic,-11.656,-7.314,-3 +adv_inception_v3,66.650,33.350,86.540,13.460,23.83,299,0.875,bicubic,-10.932,-7.196,+26 +dla60_res2next,66.640,33.360,87.030,12.970,17.03,224,0.875,bilinear,-11.800,-7.122,-12 +vit_tiny_patch16_384,66.610,33.390,87.260,12.740,5.79,384,1.000,bicubic,-11.820,-7.282,-11 +gluon_resnet50_v1c,66.560,33.440,86.180,13.820,25.58,224,0.875,bicubic,-11.452,-7.808,+7 +levit_128,66.550,33.450,86.750,13.250,9.21,224,0.900,bicubic,-11.936,-7.260,-20 +dla102,66.540,33.460,86.910,13.090,33.27,224,0.875,bilinear,-11.492,-7.036,+4 +gmixer_24_224,66.420,33.580,86.150,13.850,24.72,224,0.875,bicubic,-11.616,-7.514,+2 +tf_inception_v3,66.410,33.590,86.660,13.340,23.83,299,0.875,bicubic,-11.452,-6.980,+11 +hardcorenas_f,66.370,33.630,86.200,13.800,8.20,224,0.875,bilinear,-11.734,-7.602,-2 +coat_lite_tiny,66.290,33.710,86.980,13.020,5.72,224,0.900,bicubic,-11.222,-6.936,+20 +efficientnet_b0,66.290,33.710,85.960,14.040,5.29,224,0.875,bicubic,-11.408,-7.572,+11 +legacy_seresnet50,66.250,33.750,86.330,13.670,28.09,224,0.875,bilinear,-11.380,-7.418,+11 +selecsls60,66.210,33.790,86.340,13.660,30.67,224,0.875,bicubic,-11.772,-7.488,0 +tf_efficientnet_em,66.180,33.820,86.360,13.640,6.90,240,0.882,bicubic,-11.950,-7.684,-8 +tv_resnext50_32x4d,66.180,33.820,86.040,13.960,25.03,224,0.875,bilinear,-11.440,-7.656,+9 +tf_efficientnet_cc_b0_8e,66.170,33.830,86.240,13.760,24.01,224,0.875,bicubic,-11.738,-7.414,+1 +inception_v3,66.160,33.840,86.320,13.680,23.83,299,0.875,bicubic,-11.278,-7.154,+15 +res2net50_26w_4s,66.140,33.860,86.600,13.400,25.70,224,0.875,bilinear,-11.824,-7.254,-4 +resmlp_12_distilled_224,66.130,33.870,86.630,13.370,15.35,224,0.875,bicubic,-11.814,-6.928,-4 +efficientnet_b1_pruned,66.090,33.910,86.570,13.430,6.33,240,0.882,bicubic,-12.146,-7.264,-19 +gluon_resnet50_v1b,66.070,33.930,86.260,13.740,25.56,224,0.875,bicubic,-11.510,-7.456,+7 +rexnet_100,66.070,33.930,86.490,13.510,4.80,224,0.875,bicubic,-11.788,-7.380,-2 +regnety_016,66.060,33.940,86.380,13.620,11.20,224,0.875,bicubic,-11.802,-7.340,-5 +res2net50_14w_8s,66.020,33.980,86.250,13.750,25.06,224,0.875,bilinear,-12.130,-7.598,-19 +seresnext26t_32x4d,65.880,34.120,85.680,14.320,16.81,224,0.875,bicubic,-12.106,-8.066,-13 +repvgg_b1g4,65.850,34.150,86.120,13.880,39.97,224,0.875,bilinear,-11.744,-7.706,0 +res2next50,65.850,34.150,85.840,14.160,24.67,224,0.875,bilinear,-12.396,-8.052,-27 +densenet161,65.840,34.160,86.450,13.550,28.68,224,0.875,bicubic,-11.518,-7.188,+7 +hardcorenas_e,65.840,34.160,85.980,14.020,8.07,224,0.875,bilinear,-11.954,-7.714,-8 +resnet34d,65.780,34.220,86.710,13.290,21.82,224,0.875,bicubic,-11.336,-6.672,+11 +mobilenetv3_large_100_miil,65.760,34.240,85.200,14.800,5.48,224,0.875,bilinear,-12.156,-7.710,-15 +skresnet34,65.750,34.250,85.960,14.040,22.28,224,0.875,bicubic,-11.162,-7.362,+17 +tv_resnet101,65.690,34.310,85.980,14.020,44.55,224,0.875,bilinear,-11.684,-7.560,+1 +hardcorenas_d,65.630,34.370,85.460,14.540,7.50,224,0.875,bilinear,-11.802,-8.024,-1 +selecsls42b,65.610,34.390,85.810,14.190,32.46,224,0.875,bicubic,-11.564,-7.580,+5 +tf_efficientnet_b0_ap,65.490,34.510,85.580,14.420,5.29,224,0.875,bicubic,-11.596,-7.676,+7 +seresnext26d_32x4d,65.410,34.590,85.970,14.030,16.81,224,0.875,bicubic,-12.192,-7.638,-12 +tf_efficientnet_lite2,65.380,34.620,85.990,14.010,6.09,260,0.890,bicubic,-12.088,-7.764,-7 +res2net50_48w_2s,65.350,34.650,85.960,14.040,25.29,224,0.875,bilinear,-12.172,-7.594,-10 +densenet201,65.290,34.710,85.690,14.310,20.01,224,0.875,bicubic,-11.996,-7.788,-3 +densenetblur121d,65.280,34.720,85.710,14.290,8.00,224,0.875,bicubic,-11.308,-7.482,+17 +dla60,65.200,34.800,85.760,14.240,22.04,224,0.875,bilinear,-11.832,-7.558,+3 +ese_vovnet19b_dw,65.190,34.810,85.470,14.530,6.54,224,0.875,bicubic,-11.608,-7.798,+8 +tf_efficientnet_cc_b0_4e,65.150,34.850,85.160,14.840,13.31,224,0.875,bicubic,-12.156,-8.174,-8 +gernet_s,65.120,34.880,85.510,14.490,8.17,224,0.875,bilinear,-11.796,-7.622,+3 +legacy_seresnext26_32x4d,65.050,34.950,85.660,14.340,16.79,224,0.875,bicubic,-12.054,-7.656,-4 +mobilenetv2_120d,65.030,34.970,85.960,14.040,5.83,224,0.875,bicubic,-12.254,-7.532,-9 +hrnet_w18,64.920,35.080,85.740,14.260,21.30,224,0.875,bilinear,-11.838,-7.704,+4 +hardcorenas_c,64.860,35.140,85.250,14.750,5.52,224,0.875,bilinear,-12.194,-7.908,-5 +densenet169,64.760,35.240,85.240,14.760,14.15,224,0.875,bicubic,-11.146,-7.786,+20 +mixnet_m,64.700,35.300,85.450,14.550,5.01,224,0.875,bicubic,-12.560,-7.974,-12 +resnet26d,64.680,35.320,85.120,14.880,16.01,224,0.875,bicubic,-12.016,-8.030,+1 +levit_128s,64.610,35.390,84.730,15.270,7.78,224,0.900,bicubic,-11.920,-8.136,+7 +repvgg_a2,64.450,35.550,85.130,14.870,28.21,224,0.875,bilinear,-12.010,-7.874,+8 +hardcorenas_b,64.420,35.580,84.870,15.130,5.18,224,0.875,bilinear,-12.118,-7.884,+4 +tf_efficientnet_lite1,64.380,35.620,85.470,14.530,5.42,240,0.882,bicubic,-12.262,-7.756,-2 +regnetx_016,64.380,35.620,85.470,14.530,9.19,224,0.875,bicubic,-12.570,-7.950,-10 +resmlp_12_224,64.350,35.650,85.580,14.420,15.35,224,0.875,bicubic,-12.304,-7.600,-4 +tf_efficientnet_b0,64.310,35.690,85.280,14.720,5.29,224,0.875,bicubic,-12.538,-7.948,-9 +tf_mixnet_m,64.270,35.730,85.090,14.910,5.01,224,0.875,bicubic,-12.672,-8.062,-13 +dpn68,64.230,35.770,85.180,14.820,12.61,224,0.875,bicubic,-12.088,-7.798,+2 +tf_efficientnet_es,64.230,35.770,84.740,15.260,5.44,224,0.875,bicubic,-12.364,-8.462,-5 +regnety_008,64.160,35.840,85.270,14.730,6.26,224,0.875,bicubic,-12.156,-7.796,+1 +vit_small_patch32_224,64.070,35.930,85.560,14.440,22.88,224,0.900,bicubic,-11.920,-7.712,+3 +mobilenetv2_140,64.060,35.940,85.040,14.960,6.11,224,0.875,bicubic,-12.456,-7.956,-4 +densenet121,63.750,36.250,84.590,15.410,7.98,224,0.875,bicubic,-11.828,-8.062,+8 +hardcorenas_a,63.710,36.290,84.400,15.600,5.26,224,0.875,bilinear,-12.206,-8.114,+2 +resnest14d,63.590,36.410,84.250,15.750,10.61,224,0.875,bilinear,-11.916,-8.268,+8 +tf_mixnet_s,63.560,36.440,84.270,15.730,4.13,224,0.875,bicubic,-12.090,-8.358,+3 +resnet26,63.470,36.530,84.260,15.740,16.00,224,0.875,bicubic,-11.822,-8.310,+10 +mixnet_s,63.390,36.610,84.740,15.260,4.13,224,0.875,bicubic,-12.602,-8.056,-5 +mobilenetv3_large_100,63.360,36.640,84.090,15.910,5.48,224,0.875,bicubic,-12.406,-8.452,-1 +vit_tiny_r_s16_p8_384,63.340,36.660,85.280,14.720,6.36,384,1.000,bicubic,-12.612,-7.980,-5 +efficientnet_es_pruned,63.330,36.670,84.950,15.050,5.44,224,0.875,bicubic,-11.670,-7.498,+14 +tv_resnet50,63.330,36.670,84.640,15.360,25.56,224,0.875,bilinear,-12.808,-8.224,-10 +mixer_b16_224,63.280,36.720,83.310,16.690,59.88,224,0.875,bicubic,-13.322,-8.918,-20 +efficientnet_lite0,63.240,36.760,84.440,15.560,4.65,224,0.875,bicubic,-12.244,-8.070,0 +mobilenetv3_rw,63.220,36.780,84.510,15.490,5.48,224,0.875,bicubic,-12.414,-8.198,-5 +pit_ti_distilled_224,63.150,36.850,83.960,16.040,5.10,224,0.900,bicubic,-11.380,-8.136,+16 +semnasnet_100,63.150,36.850,84.520,15.480,3.89,224,0.875,bicubic,-12.298,-8.084,-1 +regnety_006,63.110,36.890,84.250,15.750,6.06,224,0.875,bicubic,-12.136,-8.282,0 +vit_tiny_patch16_224,63.110,36.890,84.850,15.150,5.72,224,0.900,bicubic,-12.344,-7.998,-4 +tv_densenet121,62.940,37.060,84.250,15.750,7.98,224,0.875,bicubic,-11.798,-7.900,+8 +resnet34,62.870,37.130,84.140,15.860,21.80,224,0.875,bilinear,-12.240,-8.144,+1 +legacy_seresnet34,62.850,37.150,84.210,15.790,21.96,224,0.875,bilinear,-11.958,-7.914,+5 +mobilenetv2_110d,62.830,37.170,84.500,15.500,4.52,224,0.875,bicubic,-12.206,-7.686,+1 +deit_tiny_distilled_patch16_224,62.810,37.190,83.930,16.070,5.91,224,0.900,bicubic,-11.700,-7.960,+9 +hrnet_w18_small_v2,62.800,37.200,83.980,16.020,15.60,224,0.875,bilinear,-12.314,-8.436,-4 +swsl_resnet18,62.760,37.240,84.300,15.700,11.69,224,0.875,bilinear,-10.516,-7.434,+16 +repvgg_b0,62.720,37.280,83.860,16.140,15.82,224,0.875,bilinear,-12.432,-8.558,-8 +gluon_resnet34_v1b,62.570,37.430,83.990,16.010,21.80,224,0.875,bicubic,-12.018,-8.000,+3 +tf_efficientnet_lite0,62.550,37.450,84.220,15.780,4.65,224,0.875,bicubic,-12.280,-7.956,-3 +regnetx_008,62.490,37.510,84.020,15.980,7.26,224,0.875,bicubic,-12.548,-8.316,-7 +dla34,62.480,37.520,83.910,16.090,15.74,224,0.875,bilinear,-12.150,-8.168,-1 +tf_mobilenetv3_large_100,62.460,37.540,83.970,16.030,5.48,224,0.875,bilinear,-13.058,-8.636,-20 +fbnetc_100,62.440,37.560,83.380,16.620,5.57,224,0.875,bilinear,-12.684,-9.006,-13 +mnasnet_100,61.900,38.100,83.710,16.290,4.38,224,0.875,bicubic,-12.758,-8.404,-5 +regnety_004,61.870,38.130,83.430,16.570,4.34,224,0.875,bicubic,-12.164,-8.322,+1 +vgg19_bn,61.860,38.140,83.450,16.550,143.68,224,0.875,bilinear,-12.354,-8.392,-2 +convit_tiny,61.590,38.410,84.120,15.880,5.71,224,0.875,bicubic,-11.526,-7.594,+6 +ssl_resnet18,61.480,38.520,83.300,16.700,11.69,224,0.875,bilinear,-11.130,-8.116,+9 +regnetx_006,61.350,38.650,83.450,16.550,6.20,224,0.875,bicubic,-12.502,-8.222,-1 +spnasnet_100,61.220,38.780,82.790,17.210,4.42,224,0.875,bilinear,-12.864,-9.028,-5 +tv_resnet34,61.190,38.810,82.710,17.290,21.80,224,0.875,bilinear,-12.122,-8.716,0 +pit_ti_224,60.980,39.020,83.860,16.140,4.85,224,0.900,bicubic,-11.932,-7.542,+4 +skresnet18,60.860,39.140,82.880,17.120,11.96,224,0.875,bicubic,-12.178,-8.288,+1 +ghostnet_100,60.830,39.170,82.360,17.640,5.18,224,0.875,bilinear,-13.148,-9.096,-7 +vgg16_bn,60.760,39.240,82.950,17.050,138.37,224,0.875,bilinear,-12.590,-8.556,-5 +tf_mobilenetv3_large_075,60.400,39.600,81.950,18.050,3.99,224,0.875,bilinear,-13.038,-9.400,-7 +mobilenetv2_100,60.190,39.810,82.240,17.760,3.50,224,0.875,bicubic,-12.780,-8.776,-2 +resnet18d,60.160,39.840,82.300,17.700,11.71,224,0.875,bicubic,-12.100,-8.396,+3 +deit_tiny_patch16_224,59.830,40.170,82.670,17.330,5.72,224,0.900,bicubic,-12.338,-8.448,+4 +legacy_seresnet18,59.800,40.200,81.690,18.310,11.78,224,0.875,bicubic,-11.942,-8.644,+6 +vgg19,59.710,40.290,81.450,18.550,143.67,224,0.875,bilinear,-12.658,-9.422,-2 +regnetx_004,59.410,40.590,81.690,18.310,5.16,224,0.875,bicubic,-12.986,-9.140,-4 +tf_mobilenetv3_large_minimal_100,59.070,40.930,81.150,18.850,3.92,224,0.875,bilinear,-13.178,-9.480,-1 +vit_tiny_r_s16_p8_224,59.070,40.930,81.760,18.240,6.34,224,0.900,bicubic,-12.718,-9.068,+1 +vgg13_bn,59.000,41.000,81.070,18.930,133.05,224,0.875,bilinear,-12.594,-9.306,+2 +hrnet_w18_small,58.950,41.050,81.340,18.660,13.19,224,0.875,bilinear,-13.392,-9.338,-6 +vgg16,58.830,41.170,81.660,18.340,138.36,224,0.875,bilinear,-12.764,-8.722,+1 +gluon_resnet18_v1b,58.340,41.660,80.970,19.030,11.69,224,0.875,bicubic,-12.496,-8.792,+1 +vgg11_bn,57.410,42.590,80.020,19.980,132.87,224,0.875,bilinear,-12.950,-9.782,+1 +resnet18,57.170,42.830,80.200,19.800,11.69,224,0.875,bilinear,-12.578,-8.878,+3 +vgg13,57.150,42.850,79.540,20.460,133.05,224,0.875,bilinear,-12.776,-9.706,+1 +regnety_002,57.000,43.000,79.840,20.160,3.16,224,0.875,bicubic,-13.252,-9.700,-1 +mixer_l16_224,56.690,43.310,75.990,24.010,208.20,224,0.875,bicubic,-15.368,-11.678,-9 +regnetx_002,56.050,43.950,79.210,20.790,2.68,224,0.875,bicubic,-12.712,-9.346,+1 +dla60x_c,56.000,44.000,78.930,21.070,1.32,224,0.875,bilinear,-11.892,-9.496,+2 +vgg11,55.800,44.200,78.830,21.170,132.86,224,0.875,bilinear,-13.224,-9.798,-2 +tf_mobilenetv3_small_100,54.530,45.470,77.060,22.940,2.54,224,0.875,bilinear,-13.392,-10.604,-1 +dla46x_c,53.050,46.950,76.870,23.130,1.07,224,0.875,bilinear,-12.920,-10.110,0 +tf_mobilenetv3_small_075,52.160,47.840,75.470,24.530,2.04,224,0.875,bilinear,-13.556,-10.660,0 +dla46_c,52.130,47.870,75.690,24.310,1.30,224,0.875,bilinear,-12.736,-10.602,0 +tf_mobilenetv3_small_minimal_100,49.500,50.500,73.050,26.950,2.04,224,0.875,bilinear,-13.406,-11.180,0 diff --git a/PyTorch/contrib/cv/classification/convmixer/results/results-sketch.csv b/PyTorch/contrib/cv/classification/convmixer/results/results-sketch.csv new file mode 100644 index 0000000000..de2d85d0c7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/results/results-sketch.csv @@ -0,0 +1,421 @@ +model,top1,top1_err,top5,top5_err,param_count,img_size,cropt_pct,interpolation,top1_diff,top5_diff,rank_diff +ig_resnext101_32x48d,58.810,41.190,81.076,18.924,828.41,224,0.875,bilinear,-26.618,-16.496,+22 +ig_resnext101_32x32d,58.386,41.614,80.381,19.619,468.53,224,0.875,bilinear,-26.708,-17.057,+28 +ig_resnext101_32x16d,57.690,42.310,79.905,20.095,194.03,224,0.875,bilinear,-26.480,-17.291,+53 +swsl_resnext101_32x16d,57.458,42.542,80.385,19.615,194.03,224,0.875,bilinear,-25.888,-16.461,+78 +swsl_resnext101_32x8d,56.438,43.562,78.944,21.056,88.79,224,0.875,bilinear,-27.846,-18.232,+48 +ig_resnext101_32x8d,54.918,45.082,77.534,22.466,88.79,224,0.875,bilinear,-27.770,-19.102,+94 +swsl_resnext101_32x4d,53.603,46.397,76.347,23.653,44.18,224,0.875,bilinear,-29.627,-20.413,+77 +vit_large_patch16_384,52.754,47.246,74.696,25.304,304.72,384,1.000,bicubic,-34.326,-23.604,-4 +vit_large_r50_s32_384,52.039,47.961,73.558,26.442,329.09,384,1.000,bicubic,-34.145,-24.360,+2 +vit_large_patch16_224,51.832,48.168,73.694,26.306,304.33,224,0.900,bicubic,-34.010,-24.130,+6 +tf_efficientnet_l2_ns_475,51.494,48.506,73.928,26.072,480.31,475,0.936,bicubic,-36.740,-24.618,-9 +swsl_resnext50_32x4d,50.437,49.563,73.368,26.633,25.03,224,0.875,bilinear,-31.745,-22.862,+104 +swin_large_patch4_window12_384,50.404,49.596,72.564,27.436,196.74,384,1.000,bicubic,-36.744,-25.670,-10 +swsl_resnet50,49.541,50.459,72.334,27.666,25.56,224,0.875,bilinear,-31.625,-23.638,+134 +swin_large_patch4_window7_224,48.991,51.009,71.391,28.609,196.53,224,0.900,bicubic,-37.329,-26.505,-6 +swin_base_patch4_window12_384,48.553,51.447,71.813,28.187,87.90,384,1.000,bicubic,-37.879,-26.245,-8 +vit_large_r50_s32_224,48.203,51.797,70.868,29.132,328.99,224,0.900,bicubic,-36.231,-26.296,+32 +tf_efficientnet_b7_ns,47.800,52.200,69.640,30.360,66.35,600,0.949,bicubic,-39.040,-28.454,-13 +tf_efficientnet_b6_ns,47.761,52.239,69.968,30.032,43.04,528,0.942,bicubic,-38.691,-27.914,-12 +tf_efficientnet_l2_ns,47.570,52.430,70.019,29.981,480.31,800,0.960,bicubic,-40.782,-28.631,-19 +tf_efficientnetv2_l_in21ft1k,46.939,53.061,70.310,29.690,118.52,480,1.000,bicubic,-39.365,-27.668,-11 +vit_base_patch16_384,45.894,54.106,68.557,31.443,86.86,384,1.000,bicubic,-40.112,-29.443,-7 +tf_efficientnet_b8_ap,45.774,54.226,67.911,32.089,87.41,672,0.954,bicubic,-39.596,-29.383,+3 +tf_efficientnet_b5_ns,45.615,54.385,67.842,32.158,30.39,456,0.934,bicubic,-40.473,-29.910,-11 +tf_efficientnetv2_m_in21ft1k,45.582,54.418,69.150,30.849,54.14,480,1.000,bicubic,-40.006,-28.602,-6 +swin_base_patch4_window7_224,45.560,54.440,68.512,31.488,87.77,224,0.900,bicubic,-39.692,-29.050,+1 +cait_m48_448,44.245,55.755,64.653,35.347,356.46,448,1.000,bicubic,-42.239,-33.102,-21 +vit_base_r50_s16_384,43.512,56.488,66.785,33.215,98.95,384,1.000,bicubic,-41.460,-30.503,+8 +tf_efficientnet_b4_ns,43.450,56.550,65.519,34.481,19.34,380,0.922,bicubic,-41.713,-31.951,-1 +vit_base_patch16_224,43.220,56.780,65.708,34.292,86.57,224,0.900,bicubic,-41.312,-31.586,+14 +tf_efficientnet_b8,42.508,57.492,64.857,35.143,87.41,672,0.954,bicubic,-42.862,-32.533,-6 +cait_m36_384,42.398,57.602,63.324,36.676,271.22,384,1.000,bicubic,-43.656,-34.406,-18 +tf_efficientnet_b7,41.431,58.569,63.017,36.983,66.35,600,0.949,bicubic,-43.505,-34.186,+4 +tf_efficientnet_b7_ap,41.429,58.571,62.874,37.126,66.35,600,0.949,bicubic,-43.691,-34.378,-5 +tf_efficientnet_b5_ap,41.418,58.582,62.084,37.916,30.39,456,0.934,bicubic,-42.834,-34.890,+20 +resnetv2_152x4_bitm,41.302,58.698,64.307,35.693,936.53,480,1.000,bilinear,-43.614,-33.135,+2 +tf_efficientnet_b6_ap,41.099,58.901,62.355,37.645,43.04,528,0.942,bicubic,-43.689,-34.783,+3 +tf_efficientnetv2_s_in21ft1k,40.950,59.050,63.849,36.151,21.46,384,1.000,bicubic,-43.352,-33.403,+14 +tf_efficientnet_b4_ap,40.484,59.516,61.723,38.277,19.34,380,0.922,bicubic,-42.764,-34.669,+44 +vit_small_r26_s32_384,40.476,59.524,62.736,37.264,36.47,384,1.000,bicubic,-43.570,-34.592,+22 +vit_base_patch16_224_miil,40.168,59.832,60.887,39.113,86.54,224,0.875,bilinear,-44.100,-35.915,+13 +tf_efficientnetv2_l,39.830,60.170,60.801,39.199,118.52,480,1.000,bicubic,-45.660,-36.571,-21 +dm_nfnet_f3,39.818,60.182,60.610,39.390,254.92,416,0.940,bicubic,-45.704,-36.852,-23 +cait_s36_384,39.765,60.235,60.475,39.525,68.37,384,1.000,bicubic,-45.695,-37.005,-22 +efficientnetv2_rw_m,39.667,60.333,59.687,40.313,53.24,416,1.000,bicubic,-45.141,-37.461,-6 +ecaresnet269d,39.594,60.406,60.343,39.657,102.09,352,1.000,bicubic,-45.382,-36.883,-11 +tf_efficientnet_b3_ns,39.584,60.416,61.453,38.547,12.23,300,0.904,bicubic,-44.464,-35.457,+14 +dm_nfnet_f6,39.578,60.422,60.911,39.089,438.36,576,0.956,bicubic,-46.566,-36.819,-36 +dm_nfnet_f5,39.508,60.492,60.227,39.773,377.21,544,0.954,bicubic,-46.306,-37.261,-32 +efficientnet_b4,39.079,60.921,59.608,40.392,19.34,384,1.000,bicubic,-44.349,-36.988,+28 +resnetv2_152x2_bit_teacher_384,38.979,61.021,62.440,37.560,236.34,384,1.000,bicubic,-44.865,-34.678,+16 +vit_base_patch32_384,38.794,61.206,60.329,39.671,88.30,384,1.000,bicubic,-44.556,-36.507,+29 +eca_nfnet_l2,38.664,61.336,59.445,40.555,56.72,384,1.000,bicubic,-46.033,-37.819,-11 +tf_efficientnet_b5,38.356,61.644,59.913,40.087,30.39,456,0.934,bicubic,-45.456,-36.835,+15 +deit_base_distilled_patch16_384,38.260,61.740,57.783,42.217,87.63,384,1.000,bicubic,-47.162,-39.549,-31 +dm_nfnet_f4,38.224,61.776,58.626,41.374,316.07,512,0.951,bicubic,-47.490,-38.894,-38 +resnetv2_152x2_bitm,37.985,62.015,61.135,38.865,236.34,448,1.000,bilinear,-46.525,-36.297,-11 +cait_s24_384,37.873,62.127,58.079,41.921,47.06,384,1.000,bicubic,-47.173,-39.267,-26 +resnet152d,37.857,62.143,58.356,41.644,60.21,320,1.000,bicubic,-45.823,-38.382,+15 +tf_efficientnetv2_m,37.824,62.176,58.710,41.290,54.14,480,1.000,bicubic,-47.220,-38.568,-27 +resnetrs420,37.747,62.253,58.215,41.785,191.89,416,1.000,bicubic,-47.261,-38.909,-27 +resnetrs350,37.676,62.324,58.083,41.917,163.96,384,1.000,bicubic,-47.044,-38.905,-21 +pit_b_distilled_224,37.590,62.410,57.238,42.762,74.79,224,0.900,bicubic,-46.554,-39.618,-6 +resnet200d,37.505,62.495,58.297,41.703,64.69,320,1.000,bicubic,-46.457,-38.526,+1 +resnetv2_152x2_bit_teacher,37.324,62.676,59.390,40.610,236.34,224,0.875,bicubic,-45.538,-37.178,+29 +resnest269e,37.315,62.685,57.468,42.532,110.93,416,0.928,bicubic,-47.203,-39.518,-21 +resmlp_big_24_224_in22ft1k,37.244,62.756,58.184,41.816,129.14,224,0.875,bicubic,-47.150,-38.937,-17 +vit_small_r26_s32_224,37.234,62.766,59.060,40.940,36.43,224,0.900,bicubic,-44.624,-36.962,+55 +cait_s24_224,37.153,62.847,56.724,43.276,46.92,224,1.000,bicubic,-46.299,-39.840,+8 +vit_base_patch32_224,37.077,62.923,59.294,40.706,88.22,224,0.900,bicubic,-43.647,-36.274,+96 +tf_efficientnet_b3_ap,37.055,62.945,57.240,42.760,12.23,300,0.904,bicubic,-44.767,-38.384,+54 +efficientnetv2_rw_s,37.049,62.951,56.814,43.186,23.94,384,1.000,bicubic,-46.759,-39.910,-2 +seresnet152d,36.790,63.210,56.718,43.282,66.84,320,1.000,bicubic,-47.572,-40.322,-22 +resnetrs200,36.639,63.361,56.828,43.172,93.21,320,1.000,bicubic,-47.427,-40.046,-15 +efficientnet_b3,36.420,63.580,56.845,43.155,12.23,320,1.000,bicubic,-45.822,-39.269,+39 +cait_xs24_384,36.416,63.584,56.944,43.056,26.67,384,1.000,bicubic,-47.645,-39.945,-16 +deit_base_distilled_patch16_224,36.397,63.603,56.617,43.383,87.34,224,0.900,bicubic,-46.991,-39.871,+2 +resnetv2_101x3_bitm,36.381,63.619,59.070,40.930,387.93,448,1.000,bilinear,-48.059,-38.312,-31 +resnetrs270,36.320,63.680,56.562,43.438,129.86,352,1.000,bicubic,-48.114,-40.408,-31 +tresnet_m,36.285,63.715,55.796,44.204,31.39,224,0.875,bilinear,-46.795,-40.322,+9 +mixer_b16_224_miil,36.269,63.731,55.965,44.035,59.88,224,0.875,bilinear,-46.039,-39.751,+29 +tf_efficientnet_b2_ns,36.183,63.817,57.551,42.449,9.11,260,0.890,bicubic,-46.197,-38.697,+23 +dm_nfnet_f2,36.004,63.996,55.456,44.544,193.78,352,0.920,bicubic,-49.060,-41.784,-52 +ecaresnet101d,36.004,63.996,56.165,43.835,44.57,224,0.875,bicubic,-46.168,-39.881,+33 +resnest200e,35.931,64.069,55.849,44.151,70.20,320,0.909,bicubic,-47.901,-41.045,-17 +swsl_resnet18,35.858,64.142,58.455,41.545,11.69,224,0.875,bilinear,-37.418,-33.279,+305 +eca_nfnet_l1,35.823,64.177,55.957,44.043,41.41,320,1.000,bicubic,-48.187,-41.071,-23 +vit_small_patch16_384,35.479,64.521,57.549,42.451,22.20,384,1.000,bicubic,-48.323,-39.553,-17 +resnest101e,35.373,64.627,55.780,44.220,48.28,256,0.875,bilinear,-47.517,-40.540,+4 +convit_base,35.314,64.686,54.927,45.073,86.54,224,0.875,bicubic,-46.976,-41.011,+21 +twins_svt_large,35.086,64.914,54.721,45.279,99.27,224,0.900,bicubic,-48.592,-41.873,-16 +repvgg_b3g4,35.043,64.957,54.772,45.228,83.83,224,0.875,bilinear,-45.169,-40.338,+98 +repvgg_b3,35.043,64.957,54.542,45.458,123.09,224,0.875,bilinear,-45.449,-40.718,+80 +dm_nfnet_f1,34.990,65.010,54.108,45.892,132.63,320,0.910,bicubic,-49.636,-42.992,-51 +resnet101d,34.872,65.128,54.202,45.798,44.57,320,1.000,bicubic,-48.150,-42.244,-4 +resmlp_big_24_distilled_224,34.788,65.213,54.637,45.363,129.14,224,0.875,bicubic,-48.803,-42.011,-20 +vit_large_patch32_384,34.673,65.326,55.729,44.271,306.63,384,1.000,bicubic,-46.833,-40.363,+37 +dm_nfnet_f0,34.618,65.382,54.672,45.328,71.49,256,0.900,bicubic,-48.767,-41.900,-18 +ssl_resnext101_32x16d,34.605,65.395,55.931,44.069,194.03,224,0.875,bilinear,-47.239,-40.165,+25 +repvgg_b2g4,34.587,65.413,54.782,45.218,61.76,224,0.875,bilinear,-44.779,-39.906,+131 +resnest50d_4s2x40d,34.355,65.645,54.725,45.275,30.42,224,0.875,bicubic,-46.753,-40.833,+49 +resnetrs152,34.355,65.645,53.562,46.438,86.62,320,1.000,bicubic,-49.357,-43.052,-30 +tf_efficientnet_b1_ns,34.157,65.843,55.489,44.511,7.79,240,0.882,bicubic,-47.231,-40.249,+36 +twins_pcpvt_large,34.111,65.888,54.128,45.872,60.99,224,0.900,bicubic,-49.029,-42.470,-18 +tf_efficientnet_b4,34.064,65.936,54.198,45.802,19.34,380,0.922,bicubic,-48.958,-42.102,-13 +ssl_resnext101_32x8d,34.017,65.983,55.601,44.399,88.79,224,0.875,bilinear,-47.599,-40.437,+24 +nfnet_l0,34.002,65.999,54.365,45.635,35.07,288,1.000,bicubic,-48.748,-42.151,-10 +tf_efficientnet_b6,33.998,66.002,54.544,45.456,43.04,528,0.942,bicubic,-50.112,-42.342,-50 +efficientnet_b3_pruned,33.996,66.004,54.108,45.892,9.86,300,0.904,bicubic,-46.862,-41.134,+52 +regnety_160,33.976,66.024,53.546,46.454,83.59,288,1.000,bicubic,-49.710,-43.230,-37 +pit_s_distilled_224,33.939,66.061,53.265,46.735,24.04,224,0.900,bicubic,-48.057,-42.533,+10 +resnetv2_50x3_bitm,33.658,66.342,55.882,44.118,217.32,448,1.000,bilinear,-50.356,-41.242,-49 +resnet51q,33.563,66.437,53.021,46.979,35.70,288,1.000,bilinear,-48.797,-43.159,-7 +regnety_032,33.412,66.588,52.754,47.246,19.44,288,1.000,bicubic,-49.312,-43.670,-16 +gernet_l,33.357,66.643,51.901,48.099,31.08,256,0.875,bilinear,-47.997,-43.635,+26 +tresnet_xl,33.257,66.743,52.294,47.706,78.44,224,0.875,bilinear,-48.797,-43.642,+3 +resnest50d_1s4x24d,33.147,66.853,52.839,47.161,25.68,224,0.875,bicubic,-47.841,-42.483,+38 +twins_pcpvt_base,33.021,66.979,52.485,47.515,43.83,224,0.900,bicubic,-49.687,-43.861,-19 +rexnet_200,32.987,67.013,52.939,47.061,16.37,224,0.875,bicubic,-48.645,-42.729,+10 +resnest50d,32.972,67.028,52.713,47.287,27.48,224,0.875,bilinear,-48.002,-42.665,+36 +tf_efficientnetv2_s,32.915,67.085,51.726,48.274,21.46,384,1.000,bicubic,-50.979,-44.972,-55 +convit_small,32.913,67.087,52.123,47.877,27.78,224,0.875,bicubic,-48.513,-43.621,+15 +vit_small_patch16_224,32.885,67.115,53.923,46.077,22.05,224,0.900,bicubic,-48.517,-42.211,+15 +tf_efficientnet_b3,32.860,67.140,52.950,47.050,12.23,300,0.904,bicubic,-48.776,-42.768,+4 +pnasnet5large,32.848,67.152,50.500,49.500,86.06,331,0.911,bicubic,-49.934,-45.540,-29 +twins_svt_base,32.836,67.164,51.559,48.441,56.07,224,0.900,bicubic,-50.300,-44.859,-39 +nasnetalarge,32.775,67.225,50.141,49.859,88.75,331,0.911,bicubic,-49.845,-45.906,-26 +gernet_m,32.740,67.260,51.913,48.087,21.14,224,0.875,bilinear,-47.992,-43.271,+37 +inception_resnet_v2,32.738,67.262,50.648,49.352,55.84,299,0.897,bicubic,-47.720,-44.658,+48 +gluon_resnet152_v1d,32.734,67.266,51.088,48.912,60.21,224,0.875,bicubic,-47.740,-44.118,+45 +pit_b_224,32.718,67.282,49.852,50.148,73.76,224,0.900,bicubic,-49.728,-45.858,-27 +tf_efficientnet_b2_ap,32.681,67.319,52.239,47.761,9.11,260,0.890,bicubic,-47.619,-42.979,+53 +tresnet_l,32.559,67.441,51.139,48.861,55.99,224,0.875,bilinear,-48.931,-44.485,+2 +cait_xxs36_384,32.549,67.451,52.233,47.767,17.37,384,1.000,bicubic,-49.645,-43.915,-19 +wide_resnet50_2,32.439,67.561,51.459,48.541,68.88,224,0.875,bicubic,-49.017,-44.073,+1 +ens_adv_inception_resnet_v2,32.370,67.629,50.427,49.573,55.84,299,0.897,bicubic,-47.611,-44.511,+64 +deit_base_patch16_224,32.363,67.637,51.011,48.989,86.57,224,0.900,bicubic,-49.635,-44.723,-17 +swin_small_patch4_window7_224,32.341,67.659,50.905,49.095,49.61,224,0.900,bicubic,-50.871,-45.417,-53 +gluon_resnet152_v1s,32.331,67.669,50.526,49.474,60.32,224,0.875,bicubic,-48.685,-44.886,+15 +deit_small_distilled_patch16_224,32.284,67.716,52.102,47.898,22.44,224,0.900,bicubic,-48.916,-43.276,+7 +gluon_seresnext101_64x4d,32.205,67.795,50.319,49.681,88.23,224,0.875,bicubic,-48.689,-44.989,+19 +coat_lite_small,32.127,67.873,49.934,50.066,19.84,224,0.900,bicubic,-50.181,-45.916,-33 +gluon_seresnext101_32x4d,32.107,67.893,51.237,48.763,48.96,224,0.875,bicubic,-48.797,-44.057,+16 +deit_base_patch16_384,31.989,68.011,50.547,49.453,86.86,384,1.000,bicubic,-51.117,-45.825,-56 +seresnext50_32x4d,31.985,68.015,51.231,48.769,27.56,224,0.875,bicubic,-49.281,-44.389,0 +levit_384,31.877,68.123,50.598,49.402,39.13,224,0.900,bicubic,-50.709,-45.418,-44 +resnetrs101,31.858,68.142,51.017,48.983,63.62,288,0.940,bicubic,-50.430,-44.991,-35 +cspresnext50,31.822,68.178,51.602,48.398,20.57,224,0.875,bilinear,-48.218,-43.342,+50 +tnt_s_patch16_224,31.643,68.357,51.143,48.857,23.76,224,0.900,bicubic,-49.875,-44.605,-17 +eca_nfnet_l0,31.612,68.388,51.612,48.388,24.14,288,1.000,bicubic,-50.968,-44.878,-47 +resnetv2_50x1_bit_distilled,31.584,68.416,51.263,48.737,25.55,224,0.875,bicubic,-51.234,-45.259,-56 +resnet50,31.547,68.453,50.170,49.830,25.56,224,0.875,bicubic,-47.491,-44.220,+102 +ssl_resnext101_32x4d,31.423,68.577,52.121,47.879,44.18,224,0.875,bilinear,-49.501,-43.607,+5 +inception_v4,31.378,68.622,49.244,50.756,42.68,299,0.875,bicubic,-48.790,-45.724,+39 +rexnet_150,31.366,68.634,51.288,48.712,9.73,224,0.875,bicubic,-48.944,-43.878,+28 +pit_s_224,31.333,68.667,49.661,50.339,23.46,224,0.900,bicubic,-49.761,-45.671,-5 +cait_xxs36_224,31.278,68.722,50.616,49.384,17.30,224,1.000,bicubic,-48.472,-44.250,+58 +cspresnet50,31.270,68.730,51.223,48.777,21.62,256,0.887,bilinear,-48.304,-43.489,+65 +coat_mini,31.203,68.797,49.773,50.227,10.34,224,0.900,bicubic,-50.065,-45.619,-15 +ecaresnetlight,31.121,68.879,50.243,49.757,30.16,224,0.875,bicubic,-49.341,-45.007,+16 +gluon_resnet101_v1s,31.115,68.885,49.793,50.207,44.67,224,0.875,bicubic,-49.187,-45.367,+23 +tf_efficientnet_cc_b0_8e,31.087,68.913,50.761,49.239,24.01,224,0.875,bicubic,-46.821,-42.892,+141 +resmlp_36_distilled_224,31.070,68.930,49.683,50.317,44.69,224,0.875,bicubic,-50.090,-45.805,-14 +ecaresnet50d,31.058,68.942,50.848,49.152,25.58,224,0.875,bicubic,-49.534,-44.472,+6 +ecaresnet50t,31.058,68.942,50.577,49.423,25.57,320,0.950,bicubic,-51.288,-45.561,-58 +resnet50d,31.020,68.980,49.808,50.192,25.58,224,0.875,bicubic,-49.510,-45.352,+5 +cspdarknet53,31.018,68.981,50.390,49.610,27.64,256,0.887,bilinear,-49.040,-44.694,+30 +gluon_resnet152_v1c,30.991,69.009,48.924,51.076,60.21,224,0.875,bicubic,-48.919,-45.916,+35 +gluon_resnext101_64x4d,30.987,69.013,48.549,51.451,83.46,224,0.875,bicubic,-49.617,-46.439,0 +twins_svt_small,30.985,69.015,49.223,50.777,24.06,224,0.900,bicubic,-50.697,-46.447,-43 +resmlp_24_distilled_224,30.901,69.099,50.178,49.822,30.02,224,0.875,bicubic,-49.865,-45.040,-7 +tf_efficientnet_cc_b1_8e,30.899,69.101,50.080,49.920,39.72,240,0.882,bicubic,-48.409,-44.290,+63 +ecaresnet101d_pruned,30.897,69.103,50.013,49.987,24.88,224,0.875,bicubic,-49.921,-45.615,-11 +gluon_resnext101_32x4d,30.877,69.123,48.537,51.463,44.18,224,0.875,bicubic,-49.457,-46.389,+7 +tf_efficientnetv2_b3,30.861,69.139,49.814,50.186,14.36,300,0.904,bicubic,-51.109,-45.968,-53 +tf_efficientnet_lite4,30.830,69.170,50.386,49.614,13.01,380,0.920,bilinear,-50.706,-45.282,-45 +nf_resnet50,30.702,69.298,49.958,50.042,25.56,288,0.940,bicubic,-49.958,-45.378,-10 +dpn107,30.678,69.322,48.810,51.190,86.92,224,0.875,bicubic,-49.478,-46.100,+16 +ese_vovnet39b,30.657,69.343,49.875,50.125,24.57,224,0.875,bicubic,-48.663,-44.837,+54 +gluon_resnet152_v1b,30.623,69.376,48.521,51.479,60.19,224,0.875,bicubic,-49.063,-46.215,+39 +tresnet_xl_448,30.614,69.386,49.069,50.931,78.44,448,0.875,bilinear,-52.436,-47.105,-91 +ssl_resnext50_32x4d,30.594,69.406,50.657,49.343,25.03,224,0.875,bilinear,-49.724,-44.749,0 +gluon_resnet101_v1d,30.523,69.477,47.950,52.050,44.57,224,0.875,bicubic,-49.891,-47.064,-5 +dpn68b,30.517,69.483,49.158,50.842,12.61,224,0.875,bicubic,-48.699,-45.256,+61 +resnest26d,30.490,69.510,50.677,49.323,17.07,224,0.875,bilinear,-47.988,-43.621,+88 +efficientnet_b2,30.435,69.565,49.698,50.302,9.11,288,1.000,bicubic,-50.177,-45.620,-18 +tf_efficientnet_b1_ap,30.421,69.579,49.553,50.447,7.79,240,0.882,bicubic,-48.859,-44.753,+54 +twins_pcpvt_small,30.382,69.618,49.386,50.614,24.11,224,0.900,bicubic,-50.706,-46.256,-36 +visformer_small,30.329,69.671,48.285,51.715,40.22,224,0.900,bicubic,-51.777,-47.587,-71 +pit_xs_distilled_224,30.278,69.722,49.836,50.164,11.00,224,0.900,bicubic,-49.028,-44.528,+47 +seresnet50,30.077,69.923,49.292,50.708,28.09,224,0.875,bicubic,-50.197,-45.778,-4 +dpn98,30.067,69.933,48.244,51.756,61.57,224,0.875,bicubic,-49.575,-46.354,+29 +tf_efficientnet_b2,30.026,69.974,49.581,50.419,9.11,260,0.890,bicubic,-50.060,-45.328,+3 +dpn131,30.024,69.976,48.146,51.854,79.25,224,0.875,bicubic,-49.798,-46.564,+17 +efficientnet_el,30.018,69.982,48.834,51.166,10.59,300,0.904,bicubic,-51.298,-46.692,-53 +legacy_senet154,30.001,69.999,48.034,51.966,115.09,224,0.875,bilinear,-51.309,-47.462,-53 +dpn92,29.953,70.047,49.162,50.838,37.67,224,0.875,bicubic,-50.055,-45.674,+2 +resnetv2_101x1_bitm,29.898,70.102,51.121,48.879,44.54,448,1.000,bilinear,-52.434,-45.397,-90 +gluon_senet154,29.877,70.123,47.894,52.106,115.09,224,0.875,bicubic,-51.357,-47.454,-53 +xception,29.865,70.135,48.686,51.314,22.86,299,0.897,bicubic,-49.187,-45.706,+53 +adv_inception_v3,29.816,70.184,47.847,52.153,23.83,299,0.875,bicubic,-47.766,-45.889,+112 +gluon_xception65,29.784,70.216,47.755,52.245,39.92,299,0.903,bicubic,-49.932,-47.105,+16 +resmlp_36_224,29.692,70.308,48.969,51.031,44.69,224,0.875,bicubic,-50.078,-45.917,+10 +resnetblur50,29.625,70.375,48.248,51.752,25.56,224,0.875,bicubic,-49.661,-46.390,+36 +efficientnet_em,29.486,70.514,48.946,51.054,6.90,240,0.882,bicubic,-49.766,-45.848,+37 +resnext101_32x8d,29.439,70.561,48.486,51.514,88.79,224,0.875,bilinear,-49.869,-46.032,+28 +coat_lite_mini,29.433,70.567,47.724,52.276,11.01,224,0.900,bicubic,-49.655,-46.880,+42 +ssl_resnet50,29.423,70.577,49.781,50.219,25.56,224,0.875,bilinear,-49.799,-45.051,+35 +deit_small_patch16_224,29.421,70.579,48.256,51.744,22.05,224,0.900,bicubic,-50.435,-46.796,-2 +nf_regnet_b1,29.390,70.611,49.425,50.575,10.22,288,0.900,bicubic,-49.903,-45.323,+29 +cait_xxs24_384,29.387,70.612,48.753,51.247,12.03,384,1.000,bicubic,-51.578,-46.893,-54 +swin_tiny_patch4_window7_224,29.334,70.666,47.602,52.398,28.29,224,0.900,bicubic,-52.044,-47.938,-72 +resnext50_32x4d,29.331,70.669,47.397,52.603,25.03,224,0.875,bicubic,-50.438,-47.201,+1 +resnet34d,29.328,70.671,48.409,51.591,21.82,224,0.875,bicubic,-47.788,-44.973,+113 +cait_xxs24_224,29.303,70.697,48.535,51.465,11.96,224,1.000,bicubic,-49.083,-45.775,+65 +ecaresnet50d_pruned,29.215,70.785,48.453,51.547,19.94,224,0.875,bicubic,-50.501,-46.427,+1 +tresnet_l_448,29.165,70.835,47.232,52.768,55.99,448,0.875,bilinear,-53.103,-48.744,-104 +gluon_inception_v3,29.122,70.878,46.957,53.043,23.83,299,0.875,bicubic,-49.684,-47.413,+42 +xception71,29.047,70.953,47.405,52.595,42.34,299,0.903,bicubic,-50.826,-47.517,-13 +hrnet_w64,28.989,71.011,47.142,52.858,128.06,224,0.875,bilinear,-50.485,-47.510,+7 +tf_efficientnet_b0_ns,28.902,71.098,49.011,50.989,5.29,224,0.875,bicubic,-49.756,-45.365,+46 +xception65,28.896,71.104,47.167,52.833,39.92,299,0.903,bicubic,-50.656,-47.487,+2 +tf_efficientnet_b1,28.886,71.114,47.503,52.497,7.79,240,0.882,bicubic,-49.940,-46.695,+36 +gluon_resnet101_v1b,28.878,71.121,46.389,53.611,44.55,224,0.875,bicubic,-50.427,-48.135,+12 +vit_small_patch32_384,28.871,71.129,48.887,51.113,22.92,384,1.000,bicubic,-51.609,-46.711,-52 +skresnext50_32x4d,28.818,71.182,46.497,53.503,27.48,224,0.875,bicubic,-51.338,-48.145,-31 +levit_256,28.745,71.255,46.723,53.277,18.89,224,0.900,bicubic,-52.765,-48.767,-94 +tf_efficientnet_lite3,28.660,71.340,47.354,52.646,8.20,300,0.904,bilinear,-51.160,-47.560,-16 +gluon_seresnext50_32x4d,28.651,71.349,46.436,53.564,27.56,224,0.875,bicubic,-51.267,-48.386,-28 +skresnet34,28.645,71.355,47.953,52.047,22.28,224,0.875,bicubic,-48.267,-45.369,+105 +hrnet_w40,28.641,71.359,47.454,52.546,57.56,224,0.875,bilinear,-50.279,-47.016,+25 +tf_efficientnetv2_b0,28.566,71.434,47.079,52.921,7.14,224,0.875,bicubic,-49.790,-46.945,+51 +tv_resnet152,28.533,71.467,47.118,52.882,60.19,224,0.875,bilinear,-49.779,-46.920,+51 +repvgg_b2,28.427,71.573,47.038,52.962,89.02,224,0.875,bilinear,-50.365,-47.376,+28 +hrnet_w48,28.413,71.587,47.586,52.414,77.47,224,0.875,bilinear,-50.887,-46.926,+3 +gluon_resnext50_32x4d,28.375,71.624,45.328,54.672,25.03,224,0.875,bicubic,-50.978,-49.098,-4 +efficientnet_b2_pruned,28.362,71.638,47.051,52.949,8.31,260,0.890,bicubic,-51.554,-47.805,-35 +tf_efficientnet_b0_ap,28.346,71.654,47.531,52.469,5.29,224,0.875,bicubic,-48.740,-45.725,+91 +tf_efficientnet_cc_b0_4e,28.315,71.685,47.364,52.636,13.31,224,0.875,bicubic,-48.991,-45.970,+83 +dla102x2,28.313,71.687,46.761,53.239,41.28,224,0.875,bilinear,-51.135,-47.879,-11 +dla169,28.313,71.687,47.391,52.609,53.39,224,0.875,bilinear,-50.375,-46.945,+24 +mixnet_xl,28.287,71.713,46.702,53.298,11.90,224,0.875,bicubic,-52.189,-48.234,-68 +gluon_resnet50_v1d,28.246,71.754,45.878,54.122,25.58,224,0.875,bicubic,-50.828,-48.592,+8 +wide_resnet101_2,28.108,71.892,46.401,53.599,126.89,224,0.875,bilinear,-50.748,-47.881,+14 +gluon_resnet101_v1c,28.104,71.896,45.961,54.039,44.57,224,0.875,bicubic,-51.430,-48.617,-20 +regnetx_320,28.093,71.907,45.126,54.874,107.81,224,0.875,bicubic,-52.153,-49.900,-57 +densenet161,28.081,71.919,46.641,53.359,28.68,224,0.875,bicubic,-49.277,-46.997,+74 +regnety_320,28.059,71.941,45.444,54.556,145.05,224,0.875,bicubic,-52.753,-49.800,-85 +gernet_s,28.022,71.978,46.723,53.277,8.17,224,0.875,bilinear,-48.894,-46.409,+85 +levit_192,28.016,71.984,45.880,54.120,10.95,224,0.900,bicubic,-51.826,-48.906,-41 +efficientnet_el_pruned,28.016,71.984,46.790,53.210,10.59,300,0.904,bicubic,-52.284,-48.238,-64 +xception41,27.888,72.112,45.890,54.110,26.97,299,0.903,bicubic,-50.628,-48.388,+17 +regnetx_160,27.817,72.183,45.617,54.383,54.28,224,0.875,bicubic,-52.039,-49.213,-45 +tf_inception_v3,27.780,72.220,45.721,54.279,23.83,299,0.875,bicubic,-50.082,-47.919,+51 +res2net101_26w_4s,27.768,72.232,45.179,54.821,45.21,224,0.875,bilinear,-51.430,-49.253,-8 +tf_efficientnetv2_b1,27.760,72.240,46.578,53.422,8.14,240,0.882,bicubic,-51.702,-48.144,-28 +repvgg_b1,27.656,72.344,46.531,53.469,57.42,224,0.875,bilinear,-50.710,-47.567,+25 +hrnet_w44,27.621,72.379,45.837,54.163,67.06,224,0.875,bilinear,-51.275,-48.531,-1 +inception_v3,27.556,72.444,45.263,54.737,23.83,299,0.875,bicubic,-49.882,-48.211,+59 +resmlp_24_224,27.521,72.479,45.696,54.304,30.02,224,0.875,bicubic,-51.853,-48.851,-30 +pit_xs_224,27.491,72.509,45.900,54.100,10.62,224,0.900,bicubic,-50.691,-48.268,+28 +regnetx_080,27.405,72.595,45.002,54.998,39.57,224,0.875,bicubic,-51.789,-49.558,-14 +hrnet_w30,27.381,72.619,46.554,53.446,37.71,224,0.875,bilinear,-50.825,-47.668,+25 +hrnet_w32,27.369,72.631,45.994,54.006,41.23,224,0.875,bilinear,-51.081,-48.192,+11 +gluon_resnet50_v1s,27.326,72.674,45.222,54.778,25.68,224,0.875,bicubic,-51.386,-49.016,-1 +densenet201,27.265,72.735,46.222,53.778,20.01,224,0.875,bicubic,-50.021,-47.256,+57 +densenetblur121d,27.228,72.772,46.299,53.701,8.00,224,0.875,bicubic,-49.360,-46.893,+77 +regnety_064,27.220,72.780,44.847,55.153,30.58,224,0.875,bicubic,-52.502,-49.921,-52 +efficientnet_b1_pruned,27.181,72.819,45.872,54.128,6.33,240,0.882,bicubic,-51.055,-47.962,+18 +tf_efficientnetv2_b2,27.163,72.837,44.570,55.430,10.10,260,0.890,bicubic,-53.045,-50.472,-78 +resnetrs50,27.110,72.890,45.029,54.971,35.69,224,0.910,bicubic,-52.782,-49.939,-67 +rexnet_130,27.094,72.906,45.933,54.067,7.56,224,0.875,bicubic,-52.406,-48.749,-46 +res2net50_26w_8s,27.078,72.921,44.428,55.572,48.40,224,0.875,bilinear,-52.119,-49.940,-27 +dla102x,27.061,72.939,45.475,54.525,26.31,224,0.875,bilinear,-51.449,-48.753,-4 +gmixer_24_224,27.027,72.972,44.361,55.639,24.72,224,0.875,bicubic,-51.008,-49.303,+20 +tv_resnet101,26.963,73.037,45.234,54.766,44.55,224,0.875,bilinear,-50.411,-48.306,+44 +resnext50d_32x4d,26.876,73.124,44.436,55.564,25.05,224,0.875,bicubic,-52.800,-50.430,-57 +regnetx_120,26.868,73.132,44.682,55.318,46.11,224,0.875,bicubic,-52.728,-50.056,-56 +rexnet_100,26.831,73.169,45.369,54.631,4.80,224,0.875,bicubic,-51.027,-48.501,+27 +densenet169,26.829,73.171,45.373,54.627,14.15,224,0.875,bicubic,-49.077,-47.653,+76 +legacy_seresnext101_32x4d,26.811,73.189,43.497,56.503,48.96,224,0.875,bilinear,-53.417,-51.521,-91 +regnety_120,26.788,73.212,44.454,55.546,51.82,224,0.875,bicubic,-53.578,-50.672,-103 +regnetx_064,26.784,73.216,44.927,55.073,26.21,224,0.875,bicubic,-52.288,-49.531,-31 +regnetx_032,26.703,73.297,45.236,54.764,15.30,224,0.875,bicubic,-51.469,-48.852,+6 +legacy_seresnet152,26.676,73.324,43.947,56.053,66.82,224,0.875,bilinear,-51.984,-50.423,-19 +densenet121,26.664,73.336,45.900,54.100,7.98,224,0.875,bicubic,-48.914,-46.752,+74 +efficientnet_es,26.621,73.379,45.112,54.888,5.44,224,0.875,bicubic,-51.445,-48.814,+7 +res2net50_26w_6s,26.595,73.405,43.990,56.010,37.05,224,0.875,bilinear,-51.975,-50.134,-20 +repvgg_b1g4,26.579,73.421,45.084,54.916,39.97,224,0.875,bilinear,-51.015,-48.742,+23 +dla60x,26.552,73.448,45.023,54.977,17.35,224,0.875,bilinear,-51.694,-48.995,-5 +regnety_080,26.524,73.476,44.359,55.641,39.18,224,0.875,bicubic,-53.352,-50.471,-86 +coat_lite_tiny,26.507,73.493,44.644,55.356,5.72,224,0.900,bicubic,-51.005,-49.272,+24 +tf_efficientnet_b0,26.485,73.515,45.646,54.354,5.29,224,0.875,bicubic,-50.363,-47.582,+43 +res2net50_14w_8s,26.483,73.517,44.371,55.629,25.06,224,0.875,bilinear,-51.667,-49.477,-3 +mobilenetv3_large_100_miil,26.481,73.519,44.473,55.527,5.48,224,0.875,bilinear,-51.435,-48.437,+7 +gluon_resnet50_v1b,26.436,73.564,44.035,55.965,25.56,224,0.875,bicubic,-51.144,-49.681,+18 +tf_efficientnet_el,26.357,73.643,44.175,55.825,10.59,300,0.904,bicubic,-53.893,-50.953,-109 +levit_128,26.332,73.668,44.096,55.904,9.21,224,0.900,bicubic,-52.154,-49.914,-27 +resmlp_big_24_224,26.318,73.682,43.556,56.444,129.14,224,0.875,bicubic,-54.710,-51.466,-146 +resmlp_12_distilled_224,26.314,73.686,44.874,55.126,15.35,224,0.875,bicubic,-51.630,-48.684,+1 +regnetx_040,26.243,73.757,44.438,55.562,22.12,224,0.875,bicubic,-52.239,-49.806,-29 +vit_small_patch32_224,26.151,73.849,45.104,54.896,22.88,224,0.900,bicubic,-49.839,-48.168,+51 +dpn68,26.129,73.871,44.228,55.772,12.61,224,0.875,bicubic,-50.189,-48.750,+46 +efficientnet_b1,26.061,73.939,44.080,55.920,7.79,256,1.000,bicubic,-52.733,-50.262,-43 +hrnet_w18,25.986,74.014,44.813,55.187,21.30,224,0.875,bilinear,-50.772,-48.631,+33 +hardcorenas_f,25.951,74.049,44.220,55.780,8.20,224,0.875,bilinear,-52.153,-49.582,-13 +regnety_040,25.923,74.077,43.848,56.152,20.65,224,0.875,bicubic,-53.297,-50.808,-63 +resnet34,25.888,74.112,43.982,56.018,21.80,224,0.875,bilinear,-49.222,-48.302,+63 +res2net50_26w_4s,25.866,74.134,43.155,56.845,25.70,224,0.875,bilinear,-52.098,-50.699,-9 +tresnet_m_448,25.852,74.148,42.874,57.126,31.39,448,0.875,bilinear,-55.862,-52.698,-184 +coat_tiny,25.843,74.157,43.276,56.724,5.50,224,0.900,bicubic,-52.591,-50.761,-34 +hardcorenas_c,25.815,74.185,44.772,55.228,5.52,224,0.875,bilinear,-51.239,-48.386,+18 +gluon_resnet50_v1c,25.784,74.216,43.031,56.969,25.58,224,0.875,bicubic,-52.228,-50.957,-16 +selecsls60,25.729,74.272,44.065,55.935,30.67,224,0.875,bicubic,-52.254,-49.764,-15 +hardcorenas_e,25.662,74.338,43.412,56.588,8.07,224,0.875,bilinear,-52.132,-50.282,-8 +dla60_res2net,25.652,74.348,43.599,56.401,20.85,224,0.875,bilinear,-52.812,-50.607,-42 +dla60_res2next,25.640,74.360,43.670,56.330,17.03,224,0.875,bilinear,-52.800,-50.482,-41 +ecaresnet26t,25.538,74.462,43.660,56.340,16.01,320,0.950,bicubic,-54.316,-51.424,-109 +resmlp_12_224,25.518,74.482,44.324,55.676,15.35,224,0.875,bicubic,-51.136,-48.856,+21 +mixnet_l,25.512,74.488,43.455,56.545,7.33,224,0.875,bicubic,-53.464,-50.727,-65 +tf_efficientnet_lite1,25.499,74.501,43.585,56.415,5.42,240,0.882,bicubic,-51.143,-49.641,+20 +tv_resnext50_32x4d,25.455,74.545,42.787,57.213,25.03,224,0.875,bilinear,-52.165,-50.909,-12 +repvgg_a2,25.436,74.564,43.939,56.061,28.21,224,0.875,bilinear,-51.024,-49.065,+25 +tf_mixnet_l,25.422,74.578,42.534,57.466,7.33,224,0.875,bicubic,-53.352,-51.464,-61 +hardcorenas_b,25.402,74.598,44.190,55.810,5.18,224,0.875,bilinear,-51.136,-48.564,+20 +res2next50,25.389,74.611,42.508,57.492,24.67,224,0.875,bilinear,-52.857,-51.384,-40 +legacy_seresnet101,25.334,74.666,42.825,57.175,49.33,224,0.875,bilinear,-53.048,-51.439,-46 +selecsls60b,25.332,74.668,43.559,56.441,32.77,224,0.875,bicubic,-53.080,-50.615,-49 +resnetv2_50x1_bitm,25.324,74.676,45.359,54.641,25.55,448,1.000,bilinear,-55.018,-50.325,-149 +dla102,25.316,74.684,43.827,56.173,33.27,224,0.875,bilinear,-52.716,-50.119,-34 +hardcorenas_d,25.300,74.700,43.121,56.879,7.50,224,0.875,bilinear,-52.132,-50.363,-12 +resnest14d,25.284,74.716,44.114,55.886,10.61,224,0.875,bilinear,-50.222,-48.404,+30 +legacy_seresnext50_32x4d,25.210,74.790,41.936,58.064,27.56,224,0.875,bilinear,-53.868,-52.500,-83 +mixer_b16_224,25.121,74.879,41.227,58.773,59.88,224,0.875,bicubic,-51.481,-51.001,+8 +res2net50_48w_2s,25.027,74.973,42.208,57.792,25.29,224,0.875,bilinear,-52.495,-51.346,-20 +efficientnet_b0,25.015,74.985,42.787,57.213,5.29,224,0.875,bicubic,-52.683,-50.745,-28 +gluon_resnet34_v1b,24.939,75.061,42.243,57.757,21.80,224,0.875,bicubic,-49.649,-49.747,+43 +mobilenetv2_120d,24.937,75.063,43.058,56.942,5.83,224,0.875,bicubic,-52.347,-50.434,-14 +dla60,24.933,75.067,43.296,56.704,22.04,224,0.875,bilinear,-52.099,-50.022,-8 +regnety_016,24.811,75.189,42.616,57.384,11.20,224,0.875,bicubic,-53.051,-51.104,-36 +tf_efficientnet_lite2,24.530,75.470,42.280,57.720,6.09,260,0.890,bicubic,-52.938,-51.474,-24 +skresnet18,24.483,75.517,42.536,57.464,11.96,224,0.875,bicubic,-48.555,-48.632,+51 +regnetx_016,24.473,75.527,42.514,57.486,9.19,224,0.875,bicubic,-52.477,-50.906,-11 +pit_ti_distilled_224,24.406,75.594,42.730,57.270,5.10,224,0.900,bicubic,-50.124,-49.366,+37 +tf_efficientnet_lite0,24.373,75.627,42.487,57.513,4.65,224,0.875,bicubic,-50.457,-49.689,+30 +hardcorenas_a,24.369,75.631,43.284,56.716,5.26,224,0.875,bilinear,-51.547,-49.230,+9 +tv_resnet50,24.070,75.930,41.313,58.687,25.56,224,0.875,bilinear,-52.068,-51.551,+4 +levit_128s,24.058,75.942,41.007,58.993,7.78,224,0.900,bicubic,-52.472,-51.859,-2 +legacy_seresnet34,24.027,75.973,41.909,58.091,21.96,224,0.875,bilinear,-50.781,-50.215,+27 +resnet18d,23.929,76.071,42.300,57.700,11.71,224,0.875,bicubic,-48.331,-48.396,+50 +efficientnet_lite0,23.909,76.091,42.088,57.912,4.65,224,0.875,bicubic,-51.575,-50.422,+12 +tv_densenet121,23.844,76.156,41.925,58.075,7.98,224,0.875,bicubic,-50.894,-50.225,+25 +efficientnet_es_pruned,23.828,76.172,41.995,58.005,5.44,224,0.875,bicubic,-51.172,-50.453,+21 +mobilenetv2_140,23.712,76.288,41.477,58.523,6.11,224,0.875,bicubic,-52.804,-51.519,-7 +mixnet_m,23.710,76.290,41.141,58.859,5.01,224,0.875,bicubic,-53.550,-52.284,-30 +dla34,23.669,76.331,41.551,58.449,15.74,224,0.875,bilinear,-50.961,-50.527,+23 +legacy_seresnet50,23.651,76.349,40.091,59.909,28.09,224,0.875,bilinear,-53.978,-53.657,-48 +ese_vovnet19b_dw,23.535,76.465,41.288,58.712,6.54,224,0.875,bicubic,-53.263,-51.980,-21 +tf_mixnet_m,23.484,76.516,40.989,59.011,5.01,224,0.875,bicubic,-53.458,-52.163,-26 +tv_resnet34,23.473,76.527,41.367,58.633,21.80,224,0.875,bilinear,-49.839,-50.059,+30 +tf_efficientnet_em,23.359,76.641,40.404,59.596,6.90,240,0.882,bicubic,-54.771,-53.640,-69 +selecsls42b,23.357,76.643,40.677,59.323,32.46,224,0.875,bicubic,-53.817,-52.713,-36 +repvgg_b0,23.316,76.684,41.182,58.818,15.82,224,0.875,bilinear,-51.837,-51.236,+5 +mobilenetv2_110d,23.066,76.934,40.716,59.284,4.52,224,0.875,bicubic,-51.970,-51.470,+9 +deit_tiny_distilled_patch16_224,22.718,77.282,40.771,59.229,5.91,224,0.900,bicubic,-51.792,-51.119,+17 +mobilenetv3_large_100,22.655,77.345,40.781,59.219,5.48,224,0.875,bicubic,-53.111,-51.761,-9 +mobilenetv3_rw,22.630,77.370,40.374,59.626,5.48,224,0.875,bicubic,-53.004,-52.334,-8 +tf_mobilenetv3_large_100,22.569,77.431,39.767,60.233,5.48,224,0.875,bilinear,-52.949,-52.839,-7 +tf_efficientnet_es,22.413,77.587,39.095,60.905,5.44,224,0.875,bicubic,-54.180,-54.107,-26 +hrnet_w18_small_v2,22.337,77.663,39.861,60.139,15.60,224,0.875,bilinear,-52.777,-52.555,0 +convit_tiny,22.282,77.718,39.669,60.331,5.71,224,0.875,bicubic,-50.834,-52.045,+21 +regnety_008,22.119,77.881,38.900,61.100,6.26,224,0.875,bicubic,-54.197,-54.166,-22 +seresnext26t_32x4d,21.991,78.009,38.482,61.518,16.81,224,0.875,bicubic,-55.995,-55.264,-75 +regnety_006,21.971,78.029,38.955,61.045,6.06,224,0.875,bicubic,-53.275,-53.577,-7 +vit_tiny_r_s16_p8_384,21.954,78.046,39.405,60.595,6.36,384,1.000,bicubic,-53.998,-53.855,-21 +regnetx_008,21.940,78.060,38.928,61.072,7.26,224,0.875,bicubic,-53.098,-53.408,-4 +resnet26d,21.907,78.094,38.619,61.381,16.01,224,0.875,bicubic,-54.789,-54.531,-38 +semnasnet_100,21.903,78.097,38.600,61.400,3.89,224,0.875,bicubic,-53.545,-54.004,-13 +pit_ti_224,21.875,78.125,39.541,60.459,4.85,224,0.900,bicubic,-51.037,-51.861,+16 +regnetx_006,21.738,78.263,38.904,61.096,6.20,224,0.875,bicubic,-52.115,-52.768,+7 +vit_tiny_patch16_384,21.708,78.292,39.329,60.671,5.79,384,1.000,bicubic,-56.722,-55.213,-103 +vgg19_bn,21.628,78.373,39.283,60.717,143.68,224,0.875,bilinear,-52.587,-52.559,+1 +ghostnet_100,21.620,78.380,38.692,61.308,5.18,224,0.875,bilinear,-52.358,-52.764,+3 +gluon_resnet18_v1b,21.549,78.451,38.869,61.131,11.69,224,0.875,bicubic,-49.287,-50.893,+24 +fbnetc_100,21.484,78.516,38.161,61.839,5.57,224,0.875,bilinear,-53.640,-54.224,-16 +mnasnet_100,21.350,78.650,37.719,62.281,4.38,224,0.875,bicubic,-53.308,-54.395,-8 +resnet26,21.295,78.705,38.018,61.982,16.00,224,0.875,bicubic,-53.997,-54.552,-21 +ssl_resnet18,21.278,78.722,39.113,60.887,11.69,224,0.875,bilinear,-51.332,-52.303,+8 +mixnet_s,21.254,78.746,38.187,61.813,4.13,224,0.875,bicubic,-54.738,-54.609,-37 +seresnext26d_32x4d,21.252,78.748,37.311,62.689,16.81,224,0.875,bicubic,-56.350,-56.297,-79 +legacy_seresnext26_32x4d,21.093,78.907,37.633,62.367,16.79,224,0.875,bicubic,-56.011,-55.683,-63 +regnetx_004,20.898,79.102,37.566,62.434,5.16,224,0.875,bicubic,-51.498,-53.264,+5 +spnasnet_100,20.863,79.137,37.896,62.104,4.42,224,0.875,bilinear,-53.221,-53.922,-9 +legacy_seresnet18,20.837,79.162,37.619,62.381,11.78,224,0.875,bicubic,-50.905,-52.715,+11 +mobilenetv2_100,20.773,79.227,37.759,62.241,3.50,224,0.875,bicubic,-52.197,-53.257,-1 +tf_mixnet_s,20.470,79.530,36.607,63.393,4.13,224,0.875,bicubic,-55.180,-56.021,-38 +vit_tiny_patch16_224,20.458,79.542,37.597,62.403,5.72,224,0.900,bicubic,-54.996,-55.251,-33 +regnety_004,20.415,79.585,37.002,62.998,4.34,224,0.875,bicubic,-53.619,-54.750,-13 +hrnet_w18_small,20.368,79.632,37.093,62.907,13.19,224,0.875,bilinear,-51.974,-53.585,0 +tf_mobilenetv3_large_075,20.366,79.634,36.764,63.236,3.99,224,0.875,bilinear,-53.072,-54.586,-12 +resnet18,20.228,79.772,37.261,62.739,11.69,224,0.875,bilinear,-49.520,-51.817,+11 +mixer_l16_224,20.171,79.829,32.956,67.044,208.20,224,0.875,bicubic,-51.887,-54.712,+1 +deit_tiny_patch16_224,20.162,79.838,37.546,62.454,5.72,224,0.900,bicubic,-52.007,-53.572,-1 +tf_mobilenetv3_large_minimal_100,20.122,79.878,36.908,63.092,3.92,224,0.875,bilinear,-52.126,-53.722,-3 +vgg16_bn,19.959,80.041,36.301,63.699,138.37,224,0.875,bilinear,-53.391,-55.205,-16 +vit_tiny_r_s16_p8_224,19.334,80.666,36.047,63.953,6.34,224,0.900,bicubic,-52.454,-54.781,-2 +vgg19,17.929,82.071,33.054,66.946,143.67,224,0.875,bilinear,-54.439,-57.818,-9 +vgg13_bn,17.802,82.198,34.039,65.961,133.05,224,0.875,bilinear,-53.792,-56.337,-2 +vgg16,17.540,82.460,32.773,67.227,138.36,224,0.875,bilinear,-54.054,-57.609,-2 +regnety_002,17.450,82.550,32.431,67.569,3.16,224,0.875,bicubic,-52.802,-57.109,0 +vgg11_bn,17.403,82.597,33.011,66.989,132.87,224,0.875,bilinear,-52.957,-56.791,-2 +regnetx_002,16.962,83.038,32.225,67.775,2.68,224,0.875,bicubic,-51.800,-56.331,+2 +dla60x_c,16.310,83.690,31.761,68.239,1.32,224,0.875,bilinear,-51.582,-56.665,+3 +tf_mobilenetv3_small_100,16.227,83.772,31.223,68.777,2.54,224,0.875,bilinear,-51.694,-56.441,+1 +vgg13,16.100,83.900,30.985,69.015,133.05,224,0.875,bilinear,-53.826,-58.261,-4 +vgg11,15.728,84.272,30.453,69.547,132.86,224,0.875,bilinear,-53.296,-58.175,-3 +tf_mobilenetv3_small_075,14.944,85.056,29.572,70.428,2.04,224,0.875,bilinear,-50.772,-56.558,+1 +dla46_c,14.657,85.343,29.380,70.620,1.30,224,0.875,bilinear,-50.209,-56.912,+1 +dla46x_c,14.382,85.618,29.191,70.809,1.07,224,0.875,bilinear,-51.588,-57.789,-2 +tf_mobilenetv3_small_minimal_100,13.964,86.036,27.988,72.012,2.04,224,0.875,bilinear,-48.942,-56.242,0 diff --git a/PyTorch/contrib/cv/classification/convmixer/setup.cfg b/PyTorch/contrib/cv/classification/convmixer/setup.cfg new file mode 100644 index 0000000000..6289c6c3a1 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/setup.cfg @@ -0,0 +1,5 @@ +[dist_conda] + +conda_name_differences = 'torch:pytorch' +channels = pytorch +noarch = True diff --git a/PyTorch/contrib/cv/classification/convmixer/setup.py b/PyTorch/contrib/cv/classification/convmixer/setup.py new file mode 100644 index 0000000000..882ed467a3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/setup.py @@ -0,0 +1,48 @@ +""" Setup +""" +from setuptools import setup, find_packages +from codecs import open +from os import path + +here = path.abspath(path.dirname(__file__)) + +# Get the long description from the README file +with open(path.join(here, 'README.md'), encoding='utf-8') as f: + long_description = f.read() + +exec(open('timm/version.py').read()) +setup( + name='timm', + version=__version__, + description='(Unofficial) PyTorch Image Models', + long_description=long_description, + long_description_content_type='text/markdown', + url='https://github.com/rwightman/pytorch-image-models', + author='Ross Wightman', + author_email='hello@rwightman.com', + classifiers=[ + # How mature is this project? Common values are + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + + # Note that this is a string of words separated by whitespace, not a list. + keywords='pytorch pretrained models efficientnet mobilenetv3 mnasnet', + packages=find_packages(exclude=['convert', 'tests', 'results']), + include_package_data=True, + install_requires=['torch >= 1.4', 'torchvision'], + python_requires='>=3.6', +) diff --git a/PyTorch/contrib/cv/classification/convmixer/test/env_npu.sh b/PyTorch/contrib/cv/classification/convmixer/test/env_npu.sh new file mode 100644 index 0000000000..1699c787e2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/test/env_npu.sh @@ -0,0 +1,76 @@ +#!/bin/bash +export install_path=/usr/local/Ascend +export ASCEND_HOME_PATH=${install_path}/ascend-toolkit/latest +export TOOLCHAIN_HOME=${install_path}/ascend-toolkit/latest/toolkit + +# export LD_LIBRARY_PATH=${install_path}/driver/lib64:${install_path}/driver/lib64/common:${install_path}/driver/lib64/driver:$LD_LIBRARY_PATH +# export LD_LIBRARY_PATH=${install_path}/ascend-toolkit/latest/lib64:${install_path}/ascend-toolkit/latest/compiler/lib64/plugin/opskernel:${install_path}/ascend-toolkit/latest/compiler/lib64/plugin/nnengine:$LD_LIBRARY_PATH +# export PYTHONPATH=${install_path}/ascend-toolkit/latest/python/site-packages:${install_path}/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe:$PYTHONPATH +# export PATH=${install_path}/ascend-toolkit/latest/bin:${install_path}/ascend-toolkit/latest/compiler/ccec_compiler/bin:$PATH + + + +if [ -d ${install_path}/toolkit ]; then + export LD_LIBRARY_PATH=${install_path}/fwkacllib/lib64/:/usr/include/hdf5/lib/:/usr/local/:/usr/local/lib/:/usr/lib/:${install_path}/driver/lib64/common/:${install_path}/driver/lib64/driver/:${install_path}/add-ons:${path_lib}:${LD_LIBRARY_PATH} + export PATH=${install_path}/fwkacllib/ccec_compiler/bin:${install_path}/fwkacllib/bin:$PATH + export PYTHONPATH=${install_path}/fwkacllib/python/site-packages:${install_path}/tfplugin/python/site-packages:${install_path}/toolkit/python/site-packages:$PYTHONPATH + export PYTHONPATH=/usr/local/python3.7.5/lib/python3.7/site-packages:$PYTHONPATH + export ASCEND_OPP_PATH=${install_path}/opp +else + export LD_LIBRARY_PATH=${install_path}/ascend-toolkit/latest/fwkacllib/lib64/:/usr/local/:/usr/local/lib/:/usr/lib64/:/usr/lib/:/usr/local/python3.7.5/lib/:/usr/local/openblas/lib:${install_path}/driver/lib64/common/:${install_path}/driver/lib64/driver/:${install_path}/add-ons/:/usr/lib/aarch64-linux-gnu:$LD_LIBRARY_PATH + export PATH=$PATH:${install_path}/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin/:${install_path}/ascend-toolkit/latest/toolkit/tools/ide_daemon/bin/ + export ASCEND_OPP_PATH=${install_path}/ascend-toolkit/latest/opp/ + export OPTION_EXEC_EXTERN_PLUGIN_PATH=${install_path}/ascend-toolkit/latest/fwkacllib/lib64/plugin/opskernel/libfe.so:${install_path}/ascend-toolkit/latest/fwkacllib/lib64/plugin/opskernel/libaicpu_engine.so:${install_path}/ascend-toolkit/latest/fwkacllib/lib64/plugin/opskernel/libge_local_engine.so + export PYTHONPATH=${install_path}/ascend-toolkit/latest/fwkacllib/python/site-packages/:${install_path}/ascend-toolkit/latest/fwkacllib/python/site-packages/auto_tune.egg/auto_tune:${install_path}/ascend-toolkit/latest/fwkacllib/python/site-packages/schedule_search.egg:$PYTHONPATH + export ASCEND_AICPU_PATH=${install_path}/ascend-toolkit/latest +fi + +${install_path}/driver/tools/msnpureport -g error -d 0 +${install_path}/driver/tools/msnpureport -g error -d 1 +${install_path}/driver/tools/msnpureport -g error -d 2 +${install_path}/driver/tools/msnpureport -g error -d 3 +${install_path}/driver/tools/msnpureport -g error -d 4 +${install_path}/driver/tools/msnpureport -g error -d 5 +${install_path}/driver/tools/msnpureport -g error -d 6 +${install_path}/driver/tools/msnpureport -g error -d 7 + +#将Host日志输出到串口,0-关闭/1-开启 +export ASCEND_SLOG_PRINT_TO_STDOUT=0 +#设置默认日志级别,0-debug/1-info/2-warning/3-error +export ASCEND_GLOBAL_LOG_LEVEL=3 +#设置Event日志开启标志,0-关闭/1-开启 +export ASCEND_GLOBAL_EVENT_ENABLE=0 +#设置是否开启taskque,0-关闭/1-开启 +export TASK_QUEUE_ENABLE=0 +#设置是否开启PTCopy,0-关闭/1-开启 +export PTCOPY_ENABLE=1 +#设置是否开启combined标志,0-关闭/1-开启 +export COMBINED_ENABLE=1 +export TRI_COMBINED_ENABLE=1 +#设置特殊场景是否需要重新编译,不需要修改 +export DYNAMIC_OP="ADD#MUL" +#HCCL白名单开关,1-关闭/0-开启 +export HCCL_WHITELIST_DISABLE=1 + +ulimit -SHn 512000 + +path_lib=$(python3.7 -c """ +import sys +import re +result='' +for index in range(len(sys.path)): + match_sit = re.search('-packages', sys.path[index]) + if match_sit is not None: + match_lib = re.search('lib', sys.path[index]) + + if match_lib is not None: + end=match_lib.span()[1] + result += sys.path[index][0:end] + ':' + + result+=sys.path[index] + '/torch/lib:' +print(result)""" +) + +echo ${path_lib} + +export LD_LIBRARY_PATH=/usr/local/python3.7.5/lib/:${path_lib}:$LD_LIBRARY_PATH diff --git a/PyTorch/contrib/cv/classification/convmixer/test/train_eval_1p.sh b/PyTorch/contrib/cv/classification/convmixer/test/train_eval_1p.sh new file mode 100644 index 0000000000..37562e14df --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/test/train_eval_1p.sh @@ -0,0 +1,112 @@ +#!/bin/bash + +##################基础配置参数,需要模型审视修改################## +# 指定参数 --data_path=XXX +# 网络名称,同目录名称 +Network="convmixer_1536_20" +# 所选模型 +model="convmixer_1536_20" +# 训练batch_size +batch_size=64 +# 训练使用的npu卡数 +RANK_SIZE=1 +# 数据集类别数量 +nb_classes=1000 +# 数据集路径,保持为空,不需要修改 +data_path="" +# 权重文件路径 +checkpoint="" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_eval_1P.sh " + echo " " + echo "parameter explain: + --model choose the training model + --checkpoint + --nb_classes numbers of data classes + --data_path source val data + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 ***********************************? +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + elif [[ $para == --nb_classes* ]];then + nb_classes=`echo ${para#*=}` + elif [[ $para == --checkpoint* ]];then + checkpoint=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi +if [[ $checkpoint == "" ]];then + echo "[Error] para \"checkpoint\" must be confing" + exit 1 +fi + +echo "data_path: $data_path" +echo "checkpoint: $checkpoint" + +##################指定训练脚本执行路径################## +# cd到与test文件同层级目录下执行脚本,提高兼容性;test_path_dir为包含test文件夹的路径 +cur_path=`pwd` +cur_path_last_dirname=${cur_path##*/} +if [ x"${cur_path_last_dirname}" == x"test" ]; then + test_path_dir=${cur_path} + cd .. + cur_path=`pwd` +else + test_path_dir=${cur_path}/test +fi +echo "cur_path: ${cur_path}" +##################创建日志输出目录,根据模型审视################## +# 模型采用非循环方式启动多卡训练,创建日志输出目录如下;采用循环方式启动多卡训练的模型,在循环中创建日志输出目录,可参考CRNN模型 +# 非循环方式下8卡训练日志输出路径中的ASCEND_DEVICE_ID默认为0,只是人为指定文件夹名称, 不涉及训练业务 +ASCEND_DEVICE_ID=0 +if [ -d ${test_path_dir}/output/$ASCEND_DEVICE_ID ];then + rm -rf ${test_path_dir}/output/$ASCEND_DEVICE_ID + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +else + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +fi +echo "test_path_dir: ${test_path_dir}" +##################启动训练脚本################## +#训练开始时间,不需要修改 +start_time=$(date +%s) +# source 环境变量 +source ./test/env_npu.sh +python validate_npu.py \ +--model ${model} \ +--b ${batch_size} \ +--num-classes ${nb_classes} \ +--checkpoint ${checkpoint} \ +${data_path} > ${test_path_dir}/output/${ASCEND_DEVICE_ID}/eval_${ASCEND_DEVICE_ID}.log 2>&1 & +wait + +##################获取训练数据################## +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" + +# 输出测试精度,需要模型审视修改 +# Test: [ 97/97] Time: 0.659 (0.298) Loss: 0.9513 (1.0809) Acc@1: 80.0595 (80.2100) Acc@5: 97.0238 (95.1520) +eval_accuracy_acc1=`grep -a 'Test: ' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/eval_${ASCEND_DEVICE_ID}.log|awk -F "(" '{print $4}'|awk -F ")" '{print $1}'|awk 'END {print}'` +eval_accuracy_acc5=`grep -a 'Test: ' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/eval_${ASCEND_DEVICE_ID}.log|awk -F "(" '{print $5}'|awk -F ")" '{print $1}'|awk 'END {print}'` +# 打印,不需要修改 +echo "Final Train Accuracy : Acc@1: ${eval_accuracy_acc1} % , Acc@5: ${eval_accuracy_acc5}" +echo "E2E Training Duration sec : $e2e_time" + diff --git a/PyTorch/contrib/cv/classification/convmixer/test/train_full_8p.sh b/PyTorch/contrib/cv/classification/convmixer/test/train_full_8p.sh new file mode 100644 index 0000000000..f7c722de0f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/test/train_full_8p.sh @@ -0,0 +1,154 @@ +#!/bin/bash + +##################基础配置参数,需要模型审视修改################## +# 指定参数 --data_path=XXX +# 网络名称,同目录名称 +Network="convmixer_1536_20" +# 所选模型 +model="convmixer_1536_20" +# 训练batch_size +batch_size=64 +# 训练使用的npu卡数 +RANK_SIZE=8 +# 数据集类别数量 +nb_classes=1000 +# 数据集路径,保持为空,不需要修改 +data_path="" +epochs=150 + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_8P.sh " + echo " " + echo "parameter explain: + --model choose the training model + --nb_classes numbers of data classes + --data_path source data + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 ***********************************? +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + elif [[ $para == --nb_classes* ]];then + nb_classes=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +echo "data_path: $data_path" + +##################指定训练脚本执行路径################## +# cd到与test文件同层级目录下执行脚本,提高兼容性;test_path_dir为包含test文件夹的路径 +cur_path=`pwd` +cur_path_last_dirname=${cur_path##*/} +if [ x"${cur_path_last_dirname}" == x"test" ]; then + test_path_dir=${cur_path} + cd .. + cur_path=`pwd` +else + test_path_dir=${cur_path}/test +fi +echo "cur_path: ${cur_path}" +##################创建日志输出目录,根据模型审视################## **********************************? +# 模型采用非循环方式启动多卡训练,创建日志输出目录如下;采用循环方式启动多卡训练的模型,在循环中创建日志输出目录,可参考CRNN模型 +# 非循环方式下8卡训练日志输出路径中的ASCEND_DEVICE_ID默认为0,只是人为指定文件夹名称, 不涉及训练业务 +ASCEND_DEVICE_ID=0 +if [ -d ${test_path_dir}/output/$ASCEND_DEVICE_ID ];then + rm -rf ${test_path_dir}/output/$ASCEND_DEVICE_ID + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +else + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +fi +echo "test_path_dir: ${test_path_dir}" +##################启动训练脚本################## +#训练开始时间,不需要修改 +start_time=$(date +%s) +# source 环境变量 +source ./test/env_npu.sh +OMP_NUM_THREADS=1 python -m torch.distributed.launch \ +--nproc_per_node=${RANK_SIZE} \ +--master_port=54866 \ +train_npu.py \ +${data_path} \ +--model convmixer_1536_20 \ +-b ${batch_size} \ +-j 10 \ +--opt adamw \ +--epochs ${epochs} \ +--sched onecycle \ +--amp \ +--input-size 3 224 224 \ +--lr 0.01 \ +--aa rand-m9-mstd0.5-inc1 \ +--cutmix 0.5 \ +--mixup 0.5 \ +--reprob 0.25 \ +--remode pixel \ +--num-classes ${nb_classes} \ +--warmup-epochs 0 \ +--opt-eps=1e-3 \ +--clip-grad 1.0 \ +--device npu > ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +wait + +##################获取训练数据################## +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +# 输出性能FPS,需要模型审视修改 +# Train: 149 [2501/2502 (100%)] Loss: 2.638 (2.98) Time: 2.998s, 170.77/s (1.258s, 406.96/s) LR: 0.000e+00 Data: 1.744 (0.017) +FPS=`grep -a 'Train: ' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F "(" '{print $4}'|awk -F ")" '{print $1}'|awk -F ", " '{print $2}'|awk 'END {print}'` + +# 打印,不需要修改 +echo "Final Performance image/sec : $FPS" + +# 输出训练精度,需要模型审视修改 +# *** Best metric: 80.29799995361329 (epoch 131) +train_accuracy=`grep -a '*** Best metric:' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F " " '{print $4}'|awk 'END {print}'` +# 打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy} %" +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' +echo "CaseName: $CaseName" +# 获取性能数据 +# 吞吐量 +ActualFPS=${FPS} +# 单迭代训练时长 +TrainingTime=`grep -a 'Training time: ' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk -F ":" '{print $2}'` +echo "TrainingTime: $TrainingTime" +# 从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +# Train: 149 [2501/2502 (100%)] Loss: 2.638 (2.98) Time: 2.998s, 170.77/s (1.258s, 406.96/s) LR: 0.000e+00 Data: 1.744 (0.017) +grep 'Train:' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F "]" '{print $2}' |awk -F " " '{print $2}' >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +# 最后一个迭代loss值 +ActualLoss=`awk 'END {print}' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt | awk -F ":" '{print $2}'` +# 关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/PyTorch/contrib/cv/classification/convmixer/test/train_performance_1p.sh b/PyTorch/contrib/cv/classification/convmixer/test/train_performance_1p.sh new file mode 100644 index 0000000000..a1131eb830 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/test/train_performance_1p.sh @@ -0,0 +1,155 @@ +#!/bin/bash + +##################基础配置参数,需要模型审视修改################## +# 指定参数 --data_path=XXX +# 网络名称,同目录名称 +Network="convmixer_1536_20" +# 所选模型 +model="convmixer_1536_20" +# 训练batch_size +batch_size=64 +# 训练使用的npu卡数 +RANK_SIZE=1 +# 数据集类别数量 +nb_classes=1000 +# 数据集路径,保持为空,不需要修改 +data_path="" +epochs=1 + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_8P.sh " + echo " " + echo "parameter explain: + --model choose the training model + --nb_classes numbers of data classes + --data_path source data + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 ***********************************? +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + elif [[ $para == --nb_classes* ]];then + nb_classes=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +echo "data_path: $data_path" + +##################指定训练脚本执行路径################## +# cd到与test文件同层级目录下执行脚本,提高兼容性;test_path_dir为包含test文件夹的路径 +cur_path=`pwd` +cur_path_last_dirname=${cur_path##*/} +if [ x"${cur_path_last_dirname}" == x"test" ]; then + test_path_dir=${cur_path} + cd .. + cur_path=`pwd` +else + test_path_dir=${cur_path}/test +fi +echo "cur_path: ${cur_path}" +##################创建日志输出目录,根据模型审视################## **********************************? +# 模型采用非循环方式启动多卡训练,创建日志输出目录如下;采用循环方式启动多卡训练的模型,在循环中创建日志输出目录,可参考CRNN模型 +# 非循环方式下8卡训练日志输出路径中的ASCEND_DEVICE_ID默认为0,只是人为指定文件夹名称, 不涉及训练业务 +ASCEND_DEVICE_ID=0 +if [ -d ${test_path_dir}/output/$ASCEND_DEVICE_ID ];then + rm -rf ${test_path_dir}/output/$ASCEND_DEVICE_ID + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +else + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +fi +echo "test_path_dir: ${test_path_dir}" +##################启动训练脚本################## +#训练开始时间,不需要修改 +start_time=$(date +%s) +# source 环境变量 +source ./test/env_npu.sh +OMP_NUM_THREADS=1 python3 -m torch.distributed.launch \ +--nproc_per_node=${RANK_SIZE} \ +--master_port=54866 \ +train_npu.py \ +${data_path} \ +--model convmixer_1536_20 \ +-b ${batch_size} \ +-j 10 \ +--opt adamw \ +--epochs ${epochs} \ +--sched onecycle \ +--amp \ +--input-size 3 224 224 \ +--lr 0.01 \ +--aa rand-m9-mstd0.5-inc1 \ +--cutmix 0.5 \ +--mixup 0.5 \ +--reprob 0.25 \ +--remode pixel \ +--num-classes ${nb_classes} \ +--warmup-epochs 0 \ +--opt-eps=1e-3 \ +--clip-grad 1.0 \ +--device npu \ +--performance_1p > ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +wait + +##################获取训练数据################## +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +# 输出性能FPS,需要模型审视修改 +# Train: 149 [2501/2502 (100%)] Loss: 2.638 (2.98) Time: 2.998s, 170.77/s (1.258s, 406.96/s) LR: 0.000e+00 Data: 1.744 (0.017) +FPS=`grep -a 'Train: ' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F "(" '{print $4}'|awk -F ")" '{print $1}'|awk -F ", " '{print $2}'|awk 'END {print}'` + +# 打印,不需要修改 +echo "Final Performance image/sec : $FPS" + +# 输出训练精度,需要模型审视修改 +# *** Best metric: 80.29799995361329 (epoch 131) +train_accuracy=`grep -a '*** Best metric:' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F " " '{print $4}'|awk 'END {print}'` +# 打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy} %" +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' +echo "CaseName: $CaseName" +# 获取性能数据 +# 吞吐量 +ActualFPS=${FPS} +# 单迭代训练时长 +TrainingTime=`grep -a 'Training time: ' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk -F ":" '{print $2}'` +echo "TrainingTime: $TrainingTime" +# 从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +# Train: 149 [2501/2502 (100%)] Loss: 2.638 (2.98) Time: 2.998s, 170.77/s (1.258s, 406.96/s) LR: 0.000e+00 Data: 1.744 (0.017) +grep 'Train:' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F "]" '{print $2}' |awk -F " " '{print $2}' >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +# 最后一个迭代loss值 +ActualLoss=`awk 'END {print}' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt | awk -F ":" '{print $2}'` +# 关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/PyTorch/contrib/cv/classification/convmixer/test/train_performance_8p.sh b/PyTorch/contrib/cv/classification/convmixer/test/train_performance_8p.sh new file mode 100644 index 0000000000..22131ead9f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/test/train_performance_8p.sh @@ -0,0 +1,154 @@ +#!/bin/bash + +##################基础配置参数,需要模型审视修改################## +# 指定参数 --data_path=XXX +# 网络名称,同目录名称 +Network="convmixer_1536_20" +# 所选模型 +model="convmixer_1536_20" +# 训练batch_size +batch_size=64 +# 训练使用的npu卡数 +RANK_SIZE=8 +# 数据集类别数量 +nb_classes=1000 +# 数据集路径,保持为空,不需要修改 +data_path="" +epochs=3 + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_8P.sh " + echo " " + echo "parameter explain: + --model choose the training model + --nb_classes numbers of data classes + --data_path source data + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 ***********************************? +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + elif [[ $para == --nb_classes* ]];then + nb_classes=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +echo "data_path: $data_path" + +##################指定训练脚本执行路径################## +# cd到与test文件同层级目录下执行脚本,提高兼容性;test_path_dir为包含test文件夹的路径 +cur_path=`pwd` +cur_path_last_dirname=${cur_path##*/} +if [ x"${cur_path_last_dirname}" == x"test" ]; then + test_path_dir=${cur_path} + cd .. + cur_path=`pwd` +else + test_path_dir=${cur_path}/test +fi +echo "cur_path: ${cur_path}" +##################创建日志输出目录,根据模型审视################## **********************************? +# 模型采用非循环方式启动多卡训练,创建日志输出目录如下;采用循环方式启动多卡训练的模型,在循环中创建日志输出目录,可参考CRNN模型 +# 非循环方式下8卡训练日志输出路径中的ASCEND_DEVICE_ID默认为0,只是人为指定文件夹名称, 不涉及训练业务 +ASCEND_DEVICE_ID=0 +if [ -d ${test_path_dir}/output/$ASCEND_DEVICE_ID ];then + rm -rf ${test_path_dir}/output/$ASCEND_DEVICE_ID + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +else + mkdir -p ${test_path_dir}/output/$ASCEND_DEVICE_ID +fi +echo "test_path_dir: ${test_path_dir}" +##################启动训练脚本################## +#训练开始时间,不需要修改 +start_time=$(date +%s) +# source 环境变量 +source ./test/env_npu.sh +OMP_NUM_THREADS=1 python -m torch.distributed.launch \ +--nproc_per_node=${RANK_SIZE} \ +--master_port=54866 \ +train_npu.py \ +${data_path} \ +--model convmixer_1536_20 \ +-b ${batch_size} \ +-j 10 \ +--opt adamw \ +--epochs ${epochs} \ +--sched onecycle \ +--amp \ +--input-size 3 224 224 \ +--lr 0.01 \ +--aa rand-m9-mstd0.5-inc1 \ +--cutmix 0.5 \ +--mixup 0.5 \ +--reprob 0.25 \ +--remode pixel \ +--num-classes ${nb_classes} \ +--warmup-epochs 0 \ +--opt-eps=1e-3 \ +--clip-grad 1.0 \ +--device npu > ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +wait + +##################获取训练数据################## +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +# 输出性能FPS,需要模型审视修改 +# Train: 149 [2501/2502 (100%)] Loss: 2.638 (2.98) Time: 2.998s, 170.77/s (1.258s, 406.96/s) LR: 0.000e+00 Data: 1.744 (0.017) +FPS=`grep -a 'Train: ' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F "(" '{print $4}'|awk -F ")" '{print $1}'|awk -F ", " '{print $2}'|awk 'END {print}'` + +# 打印,不需要修改 +echo "Final Performance image/sec : $FPS" + +# 输出训练精度,需要模型审视修改 +# *** Best metric: 80.29799995361329 (epoch 131) +train_accuracy=`grep -a '*** Best metric:' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F " " '{print $4}'|awk 'END {print}'` +# 打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy} %" +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' +echo "CaseName: $CaseName" +# 获取性能数据 +# 吞吐量 +ActualFPS=${FPS} +# 单迭代训练时长 +TrainingTime=`grep -a 'Training time: ' ${test_path_dir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk -F ":" '{print $2}'` +echo "TrainingTime: $TrainingTime" +# 从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +# Train: 149 [2501/2502 (100%)] Loss: 2.638 (2.98) Time: 2.998s, 170.77/s (1.258s, 406.96/s) LR: 0.000e+00 Data: 1.744 (0.017) +grep 'Train:' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F "]" '{print $2}' |awk -F " " '{print $2}' >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +# 最后一个迭代loss值 +ActualLoss=`awk 'END {print}' ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt | awk -F ":" '{print $2}'` +# 关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> ${test_path_dir}/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/PyTorch/contrib/cv/classification/convmixer/tests/__init__.py b/PyTorch/contrib/cv/classification/convmixer/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/PyTorch/contrib/cv/classification/convmixer/tests/test_layers.py b/PyTorch/contrib/cv/classification/convmixer/tests/test_layers.py new file mode 100644 index 0000000000..508a6aae67 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/tests/test_layers.py @@ -0,0 +1,71 @@ +import pytest +import torch +import torch.nn as nn +import platform +import os + +from timm.models.layers import create_act_layer, get_act_layer, set_layer_config + + +class MLP(nn.Module): + def __init__(self, act_layer="relu", inplace=True): + super(MLP, self).__init__() + self.fc1 = nn.Linear(1000, 100) + self.act = create_act_layer(act_layer, inplace=inplace) + self.fc2 = nn.Linear(100, 10) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + +def _run_act_layer_grad(act_type, inplace=True): + x = torch.rand(10, 1000) * 10 + m = MLP(act_layer=act_type, inplace=inplace) + + def _run(x, act_layer=''): + if act_layer: + # replace act layer if set + m.act = create_act_layer(act_layer, inplace=inplace) + out = m(x) + l = (out - 0).pow(2).sum() + return l + + out_me = _run(x) + + with set_layer_config(scriptable=True): + out_jit = _run(x, act_type) + + assert torch.isclose(out_jit, out_me) + + with set_layer_config(no_jit=True): + out_basic = _run(x, act_type) + + assert torch.isclose(out_basic, out_jit) + + +def test_swish_grad(): + for _ in range(100): + _run_act_layer_grad('swish') + + +def test_mish_grad(): + for _ in range(100): + _run_act_layer_grad('mish') + + +def test_hard_sigmoid_grad(): + for _ in range(100): + _run_act_layer_grad('hard_sigmoid', inplace=None) + + +def test_hard_swish_grad(): + for _ in range(100): + _run_act_layer_grad('hard_swish') + + +def test_hard_mish_grad(): + for _ in range(100): + _run_act_layer_grad('hard_mish') diff --git a/PyTorch/contrib/cv/classification/convmixer/tests/test_models.py b/PyTorch/contrib/cv/classification/convmixer/tests/test_models.py new file mode 100644 index 0000000000..c0d0e9013a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/tests/test_models.py @@ -0,0 +1,299 @@ +import pytest +import torch +import platform +import os +import fnmatch + +import timm +from timm import list_models, create_model, set_scriptable, has_model_default_key, is_model_default_key, \ + get_model_default_value + +if hasattr(torch._C, '_jit_set_profiling_executor'): + # legacy executor is too slow to compile large models for unit tests + # no need for the fusion performance here + torch._C._jit_set_profiling_executor(True) + torch._C._jit_set_profiling_mode(False) + +# transformer models don't support many of the spatial / feature based model functionalities +NON_STD_FILTERS = [ + 'vit_*', 'tnt_*', 'pit_*', 'swin_*', 'coat_*', 'cait_*', '*mixer_*', 'gmlp_*', 'resmlp_*', 'twins_*', + 'convit_*', 'levit*', 'visformer*', 'deit*', 'jx_nest_*', 'nest_*', 'xcit_*', 'crossvit_*', 'beit_*'] +NUM_NON_STD = len(NON_STD_FILTERS) + +# exclude models that cause specific test failures +if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system(): + # GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models + EXCLUDE_FILTERS = [ + '*efficientnet_l2*', '*resnext101_32x48d', '*in21k', '*152x4_bitm', '*101x3_bitm', '*50x3_bitm', + '*nfnet_f3*', '*nfnet_f4*', '*nfnet_f5*', '*nfnet_f6*', '*nfnet_f7*', '*efficientnetv2_xl*', + '*resnetrs350*', '*resnetrs420*', 'xcit_large_24_p8*'] +else: + EXCLUDE_FILTERS = [] + +TARGET_FWD_SIZE = MAX_FWD_SIZE = 384 +TARGET_BWD_SIZE = 128 +MAX_BWD_SIZE = 320 +MAX_FWD_OUT_SIZE = 448 +TARGET_JIT_SIZE = 128 +MAX_JIT_SIZE = 320 +TARGET_FFEAT_SIZE = 96 +MAX_FFEAT_SIZE = 256 + + +def _get_input_size(model=None, model_name='', target=None): + if model is None: + assert model_name, "One of model or model_name must be provided" + input_size = get_model_default_value(model_name, 'input_size') + fixed_input_size = get_model_default_value(model_name, 'fixed_input_size') + min_input_size = get_model_default_value(model_name, 'min_input_size') + else: + default_cfg = model.default_cfg + input_size = default_cfg['input_size'] + fixed_input_size = default_cfg.get('fixed_input_size', None) + min_input_size = default_cfg.get('min_input_size', None) + assert input_size is not None + + if fixed_input_size: + return input_size + + if min_input_size: + if target and max(input_size) > target: + input_size = min_input_size + else: + if target and max(input_size) > target: + input_size = tuple([min(x, target) for x in input_size]) + return input_size + + +@pytest.mark.timeout(120) +@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_forward(model_name, batch_size): + """Run a single forward pass with each model""" + model = create_model(model_name, pretrained=False) + model.eval() + + input_size = _get_input_size(model=model, target=TARGET_FWD_SIZE) + if max(input_size) > MAX_FWD_SIZE: + pytest.skip("Fixed input size model > limit.") + inputs = torch.randn((batch_size, *input_size)) + outputs = model(inputs) + + assert outputs.shape[0] == batch_size + assert not torch.isnan(outputs).any(), 'Output included NaNs' + + +@pytest.mark.timeout(120) +@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS, name_matches_cfg=True)) +@pytest.mark.parametrize('batch_size', [2]) +def test_model_backward(model_name, batch_size): + """Run a single forward pass with each model""" + input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_SIZE) + if max(input_size) > MAX_BWD_SIZE: + pytest.skip("Fixed input size model > limit.") + + model = create_model(model_name, pretrained=False, num_classes=42) + num_params = sum([x.numel() for x in model.parameters()]) + model.train() + + inputs = torch.randn((batch_size, *input_size)) + outputs = model(inputs) + if isinstance(outputs, tuple): + outputs = torch.cat(outputs) + outputs.mean().backward() + for n, x in model.named_parameters(): + assert x.grad is not None, f'No gradient for {n}' + num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None]) + + assert outputs.shape[-1] == 42 + assert num_params == num_grad, 'Some parameters are missing gradients' + assert not torch.isnan(outputs).any(), 'Output included NaNs' + + +@pytest.mark.timeout(300) +@pytest.mark.parametrize('model_name', list_models(exclude_filters=NON_STD_FILTERS)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_default_cfgs(model_name, batch_size): + """Run a single forward pass with each model""" + model = create_model(model_name, pretrained=False) + model.eval() + state_dict = model.state_dict() + cfg = model.default_cfg + + pool_size = cfg['pool_size'] + input_size = model.default_cfg['input_size'] + + if all([x <= MAX_FWD_OUT_SIZE for x in input_size]) and \ + not any([fnmatch.fnmatch(model_name, x) for x in EXCLUDE_FILTERS]): + # output sizes only checked if default res <= 448 * 448 to keep resource down + input_size = tuple([min(x, MAX_FWD_OUT_SIZE) for x in input_size]) + input_tensor = torch.randn((batch_size, *input_size)) + + # test forward_features (always unpooled) + outputs = model.forward_features(input_tensor) + assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] + + # test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features + model.reset_classifier(0) + outputs = model.forward(input_tensor) + assert len(outputs.shape) == 2 + assert outputs.shape[-1] == model.num_features + + # test model forward without pooling and classifier + model.reset_classifier(0, '') # reset classifier and set global pooling to pass-through + outputs = model.forward(input_tensor) + assert len(outputs.shape) == 4 + if not isinstance(model, timm.models.MobileNetV3) and not isinstance(model, timm.models.GhostNet): + # FIXME mobilenetv3/ghostnet forward_features vs removed pooling differ + assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] + + if 'pruned' not in model_name: # FIXME better pruned model handling + # test classifier + global pool deletion via __init__ + model = create_model(model_name, pretrained=False, num_classes=0, global_pool='').eval() + outputs = model.forward(input_tensor) + assert len(outputs.shape) == 4 + if not isinstance(model, timm.models.MobileNetV3) and not isinstance(model, timm.models.GhostNet): + # FIXME mobilenetv3/ghostnet forward_features vs removed pooling differ + assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2] + + # check classifier name matches default_cfg + classifier = cfg['classifier'] + if not isinstance(classifier, (tuple, list)): + classifier = classifier, + for c in classifier: + assert c + ".weight" in state_dict.keys(), f'{c} not in model params' + + # check first conv(s) names match default_cfg + first_conv = cfg['first_conv'] + if isinstance(first_conv, str): + first_conv = (first_conv,) + assert isinstance(first_conv, (tuple, list)) + for fc in first_conv: + assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params' + + +@pytest.mark.timeout(300) +@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_default_cfgs_non_std(model_name, batch_size): + """Run a single forward pass with each model""" + model = create_model(model_name, pretrained=False) + model.eval() + state_dict = model.state_dict() + cfg = model.default_cfg + + input_size = _get_input_size(model=model) + if max(input_size) > 320: # FIXME const + pytest.skip("Fixed input size model > limit.") + + input_tensor = torch.randn((batch_size, *input_size)) + + outputs = model.forward_features(input_tensor) + if isinstance(outputs, (tuple, list)): + outputs = outputs[0] + assert outputs.shape[1] == model.num_features + + # test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features + model.reset_classifier(0) + outputs = model.forward(input_tensor) + if isinstance(outputs, (tuple, list)): + outputs = outputs[0] + assert len(outputs.shape) == 2 + assert outputs.shape[1] == model.num_features + + model = create_model(model_name, pretrained=False, num_classes=0).eval() + outputs = model.forward(input_tensor) + if isinstance(outputs, (tuple, list)): + outputs = outputs[0] + assert len(outputs.shape) == 2 + assert outputs.shape[1] == model.num_features + + # check classifier name matches default_cfg + classifier = cfg['classifier'] + if not isinstance(classifier, (tuple, list)): + classifier = classifier, + for c in classifier: + assert c + ".weight" in state_dict.keys(), f'{c} not in model params' + + # check first conv(s) names match default_cfg + first_conv = cfg['first_conv'] + if isinstance(first_conv, str): + first_conv = (first_conv,) + assert isinstance(first_conv, (tuple, list)) + for fc in first_conv: + assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params' + + +if 'GITHUB_ACTIONS' not in os.environ: + @pytest.mark.timeout(120) + @pytest.mark.parametrize('model_name', list_models(pretrained=True)) + @pytest.mark.parametrize('batch_size', [1]) + def test_model_load_pretrained(model_name, batch_size): + """Create that pretrained weights load, verify support for in_chans != 3 while doing so.""" + in_chans = 3 if 'pruned' in model_name else 1 # pruning not currently supported with in_chans change + create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=5) + create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=0) + + @pytest.mark.timeout(120) + @pytest.mark.parametrize('model_name', list_models(pretrained=True, exclude_filters=NON_STD_FILTERS)) + @pytest.mark.parametrize('batch_size', [1]) + def test_model_features_pretrained(model_name, batch_size): + """Create that pretrained weights load when features_only==True.""" + create_model(model_name, pretrained=True, features_only=True) + +EXCLUDE_JIT_FILTERS = [ + '*iabn*', 'tresnet*', # models using inplace abn unlikely to ever be scriptable + 'dla*', 'hrnet*', 'ghostnet*', # hopefully fix at some point + 'vit_large_*', 'vit_huge_*', +] + + +@pytest.mark.timeout(120) +@pytest.mark.parametrize( + 'model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS, name_matches_cfg=True)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_forward_torchscript(model_name, batch_size): + """Run a single forward pass with each model""" + input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE) + if max(input_size) > MAX_JIT_SIZE: + pytest.skip("Fixed input size model > limit.") + + with set_scriptable(True): + model = create_model(model_name, pretrained=False) + model.eval() + + model = torch.jit.script(model) + outputs = model(torch.randn((batch_size, *input_size))) + + assert outputs.shape[0] == batch_size + assert not torch.isnan(outputs).any(), 'Output included NaNs' + + +EXCLUDE_FEAT_FILTERS = [ + '*pruned*', # hopefully fix at some point +] + NON_STD_FILTERS +if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system(): + # GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models + EXCLUDE_FEAT_FILTERS += ['*resnext101_32x32d', '*resnext101_32x16d'] + + +@pytest.mark.timeout(120) +@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FEAT_FILTERS)) +@pytest.mark.parametrize('batch_size', [1]) +def test_model_forward_features(model_name, batch_size): + """Run a single forward pass with each model in feature extraction mode""" + model = create_model(model_name, pretrained=False, features_only=True) + model.eval() + expected_channels = model.feature_info.channels() + assert len(expected_channels) >= 4 # all models here should have at least 4 feature levels by default, some 5 or 6 + + input_size = _get_input_size(model=model, target=TARGET_FFEAT_SIZE) + if max(input_size) > MAX_FFEAT_SIZE: + pytest.skip("Fixed input size model > limit.") + + outputs = model(torch.randn((batch_size, *input_size))) + assert len(expected_channels) == len(outputs) + for e, o in zip(expected_channels, outputs): + assert e == o.shape[1] + assert o.shape[0] == batch_size + assert not torch.isnan(o).any() diff --git a/PyTorch/contrib/cv/classification/convmixer/tests/test_optim.py b/PyTorch/contrib/cv/classification/convmixer/tests/test_optim.py new file mode 100644 index 0000000000..737674e5cf --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/tests/test_optim.py @@ -0,0 +1,733 @@ +""" Optimzier Tests + +These tests were adapted from PyTorch' optimizer tests. + +""" +import math +import pytest +import functools +from copy import deepcopy + +import torch +from torch.testing._internal.common_utils import TestCase +from torch.autograd import Variable +from timm.scheduler import PlateauLRScheduler + +from timm.optim import create_optimizer_v2 + + +# HACK relying on internal PyTorch test functionality for comparisons that I don't want to write +torch_tc = TestCase() + + +def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors): + weight = Variable(weight, requires_grad=True) + bias = Variable(bias, requires_grad=True) + input = Variable(input) + optimizer = constructor(weight, bias) + schedulers = [] + for scheduler_constructor in scheduler_constructors: + schedulers.append(scheduler_constructor(optimizer)) + + # to check if the optimizer can be printed as a string + optimizer.__repr__() + + def fn(): + optimizer.zero_grad() + y = weight.mv(input) + if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device(): + y = y.cuda(bias.get_device()) + loss = (y + bias).pow(2).sum() + loss.backward() + return loss + + initial_value = fn().item() + for _i in range(200): + for scheduler in schedulers: + if isinstance(scheduler, PlateauLRScheduler): + val_loss = fn() + scheduler.step(val_loss) + else: + scheduler.step() + optimizer.step(fn) + + assert fn().item() < initial_value + + +def _test_state_dict(weight, bias, input, constructor): + weight = Variable(weight, requires_grad=True) + bias = Variable(bias, requires_grad=True) + input = Variable(input) + + def fn_base(optimizer, weight, bias): + optimizer.zero_grad() + i = input_cuda if weight.is_cuda else input + loss = (weight.mv(i) + bias).pow(2).sum() + loss.backward() + return loss + + optimizer = constructor(weight, bias) + fn = functools.partial(fn_base, optimizer, weight, bias) + + # Prime the optimizer + for _i in range(20): + optimizer.step(fn) + # Clone the weights and construct new optimizer for them + weight_c = Variable(weight.data.clone(), requires_grad=True) + bias_c = Variable(bias.data.clone(), requires_grad=True) + optimizer_c = constructor(weight_c, bias_c) + fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c) + # Load state dict + state_dict = deepcopy(optimizer.state_dict()) + state_dict_c = deepcopy(optimizer.state_dict()) + optimizer_c.load_state_dict(state_dict_c) + + # Run both optimizations in parallel + for _i in range(20): + optimizer.step(fn) + optimizer_c.step(fn_c) + #assert torch.equal(weight, weight_c) + #assert torch.equal(bias, bias_c) + torch_tc.assertEqual(weight, weight_c) + torch_tc.assertEqual(bias, bias_c) + # Make sure state dict wasn't modified + torch_tc.assertEqual(state_dict, state_dict_c) + # Make sure state dict is deterministic with equal but not identical parameters + torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict()) + # Make sure repeated parameters have identical representation in state dict + optimizer_c.param_groups.extend(optimizer_c.param_groups) + torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1]) + + # Check that state dict can be loaded even when we cast parameters + # to a different type and move to a different device. + if not torch.cuda.is_available(): + return + + input_cuda = Variable(input.data.float().cuda()) + weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True) + bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True) + optimizer_cuda = constructor(weight_cuda, bias_cuda) + fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda) + + state_dict = deepcopy(optimizer.state_dict()) + state_dict_c = deepcopy(optimizer.state_dict()) + optimizer_cuda.load_state_dict(state_dict_c) + + # Make sure state dict wasn't modified + torch_tc.assertEqual(state_dict, state_dict_c) + + for _i in range(20): + optimizer.step(fn) + optimizer_cuda.step(fn_cuda) + torch_tc.assertEqual(weight, weight_cuda) + torch_tc.assertEqual(bias, bias_cuda) + + # validate deepcopy() copies all public attributes + def getPublicAttr(obj): + return set(k for k in obj.__dict__ if not k.startswith('_')) + + assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer)) + + +def _test_basic_cases(constructor, scheduler_constructors=None): + if scheduler_constructors is None: + scheduler_constructors = [] + _test_state_dict( + torch.randn(10, 5), + torch.randn(10), + torch.randn(5), + constructor + ) + _test_basic_cases_template( + torch.randn(10, 5), + torch.randn(10), + torch.randn(5), + constructor, + scheduler_constructors + ) + # non-contiguous parameters + _test_basic_cases_template( + torch.randn(10, 5, 2)[..., 0], + torch.randn(10, 2)[..., 0], + torch.randn(5), + constructor, + scheduler_constructors + ) + # CUDA + if not torch.cuda.is_available(): + return + _test_basic_cases_template( + torch.randn(10, 5).cuda(), + torch.randn(10).cuda(), + torch.randn(5).cuda(), + constructor, + scheduler_constructors + ) + + +def _test_model(optimizer, params, device=torch.device('cpu')): + weight = torch.tensor( + [[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]], + device=device, requires_grad=True) + bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True) + weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True) + bias2 = torch.tensor([-0.0711], device=device, requires_grad=True) + input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2) + + model = torch.nn.Sequential(torch.nn.Linear(2, 3), + torch.nn.Sigmoid(), + torch.nn.Linear(3, 1), + torch.nn.Sigmoid()) + model.to(device) + + pretrained_dict = model.state_dict() + pretrained_dict['0.weight'] = weight + pretrained_dict['0.bias'] = bias + pretrained_dict['2.weight'] = weight2 + pretrained_dict['2.bias'] = bias2 + model.load_state_dict(pretrained_dict) + + optimizer = create_optimizer_v2(model, opt=optimizer, **params) + + prev_loss = float('inf') + for i in range(20): + optimizer.zero_grad() + output = model(input) + loss = output.sum() + loss.backward() + loss = loss.item() + assert loss < prev_loss + prev_loss = loss + optimizer.step() + + +def rosenbrock(tensor): + x, y = tensor + return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 + + +def drosenbrock(tensor): + x, y = tensor + return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2))) + + +def _test_rosenbrock(constructor, scheduler_constructors=None): + if scheduler_constructors is None: + scheduler_constructors = [] + params_t = torch.tensor([1.5, 1.5]) + + params = Variable(params_t, requires_grad=True) + optimizer = constructor([params]) + schedulers = [] + for scheduler_constructor in scheduler_constructors: + schedulers.append(scheduler_constructor(optimizer)) + + solution = torch.tensor([1, 1]) + initial_dist = params.data.dist(solution) + + def eval(params, w): + # Depending on w, provide only the x or y gradient + optimizer.zero_grad() + loss = rosenbrock(params) + loss.backward() + grad = drosenbrock(params.data) + # NB: We torture test the optimizer by returning an + # uncoalesced sparse tensor + if w: + i = torch.LongTensor([[0, 0]]) + x = grad[0] + v = torch.tensor([x / 4., x - x / 4.]) + else: + i = torch.LongTensor([[1, 1]]) + y = grad[1] + v = torch.tensor([y - y / 4., y / 4.]) + x = torch.sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype) + with torch.no_grad(): + params.grad = x.to_dense() + return loss + + for i in range(2000): + # Do cyclic coordinate descent + w = i % 2 + optimizer.step(functools.partial(eval, params, w)) + for scheduler in schedulers: + if isinstance(scheduler, PlateauLRScheduler): + scheduler.step(rosenbrock(params)) + else: + scheduler.step() + + torch_tc.assertLessEqual(params.data.dist(solution), initial_dist) + + +def _build_params_dict(weight, bias, **kwargs): + return [{'params': [weight]}, dict(params=[bias], **kwargs)] + + +def _build_params_dict_single(weight, bias, **kwargs): + return [dict(params=bias, **kwargs)] + + +#@pytest.mark.parametrize('optimizer', ['sgd', 'momentum']) +# FIXME momentum variant frequently fails in GitHub runner, but never local after many attempts +@pytest.mark.parametrize('optimizer', ['sgd']) +def test_sgd(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=1e-2), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-2), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-2), optimizer) + ) + # _test_basic_cases( + # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), + # [lambda opt: StepLR(opt, gamma=0.9, step_size=10)] + # ) + # _test_basic_cases( + # lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3), + # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="linear")] + # ) + # _test_basic_cases( + # lambda weight, bias: optimizer([weight, bias], lr=1e-3), + # [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="constant")] + # ) + # _test_basic_cases( + # lambda weight, bias: optimizer([weight, bias], lr=1e-3), + # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), + # lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4)] + # ) + # _test_basic_cases( + # lambda weight, bias: optimizer([weight, bias], lr=1e-3), + # [lambda opt: StepLR(opt, gamma=0.9, step_size=10), + # lambda opt: ReduceLROnPlateau(opt)] + # ) + # _test_basic_cases( + # lambda weight, bias: optimizer([weight, bias], lr=1e-3), + # [lambda opt: StepLR(opt, gamma=0.99, step_size=10), + # lambda opt: ExponentialLR(opt, gamma=0.99), + # lambda opt: ReduceLROnPlateau(opt)] + # ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1, weight_decay=.1) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['adamw', 'adam', 'nadam', 'adamax']) +def test_adam(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['adabelief']) +def test_adabelief(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['radam', 'radabelief']) +def test_rectified(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['adadelta', 'adagrad']) +def test_adaother(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-1) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['adafactor']) +def test_adafactor(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2(_build_params_dict_single(weight, bias), optimizer) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['lamb', 'lambc']) +def test_lamb(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=1e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['lars', 'larc', 'nlars', 'nlarc']) +def test_lars(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=1e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=1e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['madgrad', 'madgradw']) +def test_madgrad(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) + ) + _test_model(optimizer, dict(lr=1e-2)) + + +@pytest.mark.parametrize('optimizer', ['novograd']) +def test_novograd(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['rmsprop', 'rmsproptf']) +def test_rmsprop(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-2) + ) + _test_model(optimizer, dict(lr=1e-2)) + + +@pytest.mark.parametrize('optimizer', ['adamp']) +def test_adamp(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + _test_model(optimizer, dict(lr=5e-2)) + + +@pytest.mark.parametrize('optimizer', ['sgdp']) +def test_sgdp(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + _test_model(optimizer, dict(lr=1e-3)) + + +@pytest.mark.parametrize('optimizer', ['lookahead_sgd', 'lookahead_momentum']) +def test_lookahead_sgd(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-3) + ) + + +@pytest.mark.parametrize('optimizer', ['lookahead_adamw', 'lookahead_adam']) +def test_lookahead_adam(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=5e-2) + ) + + +@pytest.mark.parametrize('optimizer', ['lookahead_radam']) +def test_lookahead_radam(optimizer): + _test_basic_cases( + lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), + optimizer, + lr=1e-3) + ) + _test_basic_cases( + lambda weight, bias: create_optimizer_v2( + _build_params_dict_single(weight, bias, lr=3e-3), optimizer) + ) + _test_rosenbrock( + lambda params: create_optimizer_v2(params, optimizer, lr=1e-4) + ) + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/__init__.py new file mode 100644 index 0000000000..04ec7e51b8 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/__init__.py @@ -0,0 +1,4 @@ +from .version import __version__ +from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ + is_scriptable, is_exportable, set_scriptable, set_exportable, has_model_default_key, is_model_default_key, \ + get_model_default_value, is_model_pretrained diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/__init__.py new file mode 100644 index 0000000000..7d3cb2b4d7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/__init__.py @@ -0,0 +1,12 @@ +from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\ + rand_augment_transform, auto_augment_transform +from .config import resolve_data_config +from .constants import * +from .dataset import ImageDataset, IterableImageDataset, AugMixDataset +from .dataset_factory import create_dataset +from .loader import create_loader +from .mixup import Mixup, FastCollateMixup +from .parsers import create_parser +from .real_labels import RealLabelsImagenet +from .transforms import * +from .transforms_factory import create_transform \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/auto_augment.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/auto_augment.py new file mode 100644 index 0000000000..7d80d702e5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/auto_augment.py @@ -0,0 +1,833 @@ +""" AutoAugment, RandAugment, and AugMix for PyTorch + +This code implements the searched ImageNet policies with various tweaks and improvements and +does not include any of the search code. + +AA and RA Implementation adapted from: + https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py + +AugMix adapted from: + https://github.com/google-research/augmix + +Papers: + AutoAugment: Learning Augmentation Policies from Data - https://arxiv.org/abs/1805.09501 + Learning Data Augmentation Strategies for Object Detection - https://arxiv.org/abs/1906.11172 + RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 + AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import random +import math +import re +from PIL import Image, ImageOps, ImageEnhance, ImageChops +import PIL +import numpy as np + + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) + +_FILL = (128, 128, 128) + +_LEVEL_DENOM = 10. # denominator for conversion from 'Mx' magnitude scale to fractional aug level for op arguments + +_HPARAMS_DEFAULT = dict( + translate_const=250, + img_mean=_FILL, +) + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _interpolation(kwargs): + interpolation = kwargs.pop('resample', Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if 'fillcolor' in kwargs and _PIL_VER < (5, 0): + kwargs.pop('fillcolor') + kwargs['resample'] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs['resample']) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _LEVEL_DENOM) * 30. + level = _randomly_negate(level) + return level, + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return (level / _LEVEL_DENOM) * 1.8 + 0.1, + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] if level <= _LEVEL_DENOM + level = (level / _LEVEL_DENOM) * .9 + level = max(0.1, 1.0 + _randomly_negate(level)) # keep it >= 0.1 + return level, + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _LEVEL_DENOM) * 0.3 + level = _randomly_negate(level) + return level, + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams['translate_const'] + level = (level / _LEVEL_DENOM) * float(translate_const) + level = _randomly_negate(level) + return level, + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get('translate_pct', 0.45) + level = (level / _LEVEL_DENOM) * translate_pct + level = _randomly_negate(level) + return level, + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 4), + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return 4 - _posterize_level_to_arg(level, hparams)[0], + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 4) + 4, + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 256), + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return 256 - _solarize_level_to_arg(level, _hparams)[0], + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return int((level / _LEVEL_DENOM) * 110), + + +LEVEL_TO_ARG = { + 'AutoContrast': None, + 'Equalize': None, + 'Invert': None, + 'Rotate': _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + 'Posterize': _posterize_level_to_arg, + 'PosterizeIncreasing': _posterize_increasing_level_to_arg, + 'PosterizeOriginal': _posterize_original_level_to_arg, + 'Solarize': _solarize_level_to_arg, + 'SolarizeIncreasing': _solarize_increasing_level_to_arg, + 'SolarizeAdd': _solarize_add_level_to_arg, + 'Color': _enhance_level_to_arg, + 'ColorIncreasing': _enhance_increasing_level_to_arg, + 'Contrast': _enhance_level_to_arg, + 'ContrastIncreasing': _enhance_increasing_level_to_arg, + 'Brightness': _enhance_level_to_arg, + 'BrightnessIncreasing': _enhance_increasing_level_to_arg, + 'Sharpness': _enhance_level_to_arg, + 'SharpnessIncreasing': _enhance_increasing_level_to_arg, + 'ShearX': _shear_level_to_arg, + 'ShearY': _shear_level_to_arg, + 'TranslateX': _translate_abs_level_to_arg, + 'TranslateY': _translate_abs_level_to_arg, + 'TranslateXRel': _translate_rel_level_to_arg, + 'TranslateYRel': _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + 'AutoContrast': auto_contrast, + 'Equalize': equalize, + 'Invert': invert, + 'Rotate': rotate, + 'Posterize': posterize, + 'PosterizeIncreasing': posterize, + 'PosterizeOriginal': posterize, + 'Solarize': solarize, + 'SolarizeIncreasing': solarize, + 'SolarizeAdd': solarize_add, + 'Color': color, + 'ColorIncreasing': color, + 'Contrast': contrast, + 'ContrastIncreasing': contrast, + 'Brightness': brightness, + 'BrightnessIncreasing': brightness, + 'Sharpness': sharpness, + 'SharpnessIncreasing': sharpness, + 'ShearX': shear_x, + 'ShearY': shear_y, + 'TranslateX': translate_x_abs, + 'TranslateY': translate_y_abs, + 'TranslateXRel': translate_x_rel, + 'TranslateYRel': translate_y_rel, +} + + +class AugmentOp: + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = dict( + fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, + resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION, + ) + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + # If magnitude_std is inf, we sample magnitude from a uniform distribution + self.magnitude_std = self.hparams.get('magnitude_std', 0) + self.magnitude_max = self.hparams.get('magnitude_max', None) + + def __call__(self, img): + if self.prob < 1.0 and random.random() > self.prob: + return img + magnitude = self.magnitude + if self.magnitude_std > 0: + # magnitude randomization enabled + if self.magnitude_std == float('inf'): + magnitude = random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + # default upper_bound for the timm RA impl is _LEVEL_DENOM (10) + # setting magnitude_max overrides this to allow M > 10 (behaviour closer to Google TF RA impl) + upper_bound = self.magnitude_max or _LEVEL_DENOM + magnitude = max(0., min(magnitude, upper_bound)) + level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() + return self.aug_fn(img, *level_args, **self.kwargs) + + +def auto_augment_policy_v0(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_v0r(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used + # in Google research implementation (number of bits discarded increases with magnitude) + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_original(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 + policy = [ + [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_originalr(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation + policy = [ + [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy(name='v0', hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + if name == 'original': + return auto_augment_policy_original(hparams) + elif name == 'originalr': + return auto_augment_policy_originalr(hparams) + elif name == 'v0': + return auto_augment_policy_v0(hparams) + elif name == 'v0r': + return auto_augment_policy_v0r(hparams) + else: + assert False, 'Unknown AA policy (%s)' % name + + +class AutoAugment: + + def __init__(self, policy): + self.policy = policy + + def __call__(self, img): + sub_policy = random.choice(self.policy) + for op in sub_policy: + img = op(img) + return img + + +def auto_augment_transform(config_str, hparams): + """ + Create a AutoAugment transform + + :param config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr'). + The remaining sections, not order sepecific determine + 'mstd' - float std deviation of magnitude noise applied + Ex 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5 + + :param hparams: Other hparams (kwargs) for the AutoAugmentation scheme + + :return: A PyTorch compatible Transform + """ + config = config_str.split('-') + policy_name = config[0] + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + else: + assert False, 'Unknown AutoAugment config section' + aa_policy = auto_augment_policy(policy_name, hparams=hparams) + return AutoAugment(aa_policy) + + +_RAND_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'Posterize', + 'Solarize', + 'SolarizeAdd', + 'Color', + 'Contrast', + 'Brightness', + 'Sharpness', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + +_RAND_INCREASING_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'SolarizeAdd', + 'ColorIncreasing', + 'ContrastIncreasing', + 'BrightnessIncreasing', + 'SharpnessIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + 'Rotate': 0.3, + 'ShearX': 0.2, + 'ShearY': 0.2, + 'TranslateXRel': 0.1, + 'TranslateYRel': 0.1, + 'Color': .025, + 'Sharpness': 0.025, + 'AutoContrast': 0.025, + 'Solarize': .005, + 'SolarizeAdd': .005, + 'Contrast': .005, + 'Brightness': .005, + 'Equalize': .005, + 'Posterize': 0, + 'Invert': 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [AugmentOp( + name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights) + for op in ops: + img = op(img) + return img + + +def rand_augment_transform(config_str, hparams): + """ + Create a RandAugment transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied, or uniform sampling if infinity (or > 100) + 'mmax' - set upper bound for magnitude to something other than default of _LEVEL_DENOM (10) + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + + :return: A PyTorch compatible Transform + """ + magnitude = _LEVEL_DENOM # default to _LEVEL_DENOM for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split('-') + assert config[0] == 'rand' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param / randomization of magnitude values + mstd = float(val) + if mstd > 100: + # use uniform sampling in 0 to magnitude if mstd is > 100 + mstd = float('inf') + hparams.setdefault('magnitude_std', mstd) + elif key == 'mmax': + # clip magnitude between [0, mmax] instead of default [0, _LEVEL_DENOM] + hparams.setdefault('magnitude_max', int(val)) + elif key == 'inc': + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == 'm': + magnitude = int(val) + elif key == 'n': + num_layers = int(val) + elif key == 'w': + weight_idx = int(val) + else: + assert False, 'Unknown RandAugment config section' + ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) + choice_weights = None if weight_idx is None else _select_rand_weights(weight_idx) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) + + +_AUGMIX_TRANSFORMS = [ + 'AutoContrast', + 'ColorIncreasing', # not in paper + 'ContrastIncreasing', # not in paper + 'BrightnessIncreasing', # not in paper + 'SharpnessIncreasing', # not in paper + 'Equalize', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', +] + + +def augmix_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _AUGMIX_TRANSFORMS + return [AugmentOp( + name, prob=1.0, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class AugMixAugment: + """ AugMix Transform + Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + """ + def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False): + self.ops = ops + self.alpha = alpha + self.width = width + self.depth = depth + self.blended = blended # blended mode is faster but not well tested + + def _calc_blended_weights(self, ws, m): + ws = ws * m + cump = 1. + rws = [] + for w in ws[::-1]: + alpha = w / cump + cump *= (1 - alpha) + rws.append(alpha) + return np.array(rws[::-1], dtype=np.float32) + + def _apply_blended(self, img, mixing_weights, m): + # This is my first crack and implementing a slightly faster mixed augmentation. Instead + # of accumulating the mix for each chain in a Numpy array and then blending with original, + # it recomputes the blending coefficients and applies one PIL image blend per chain. + # TODO the results appear in the right ballpark but they differ by more than rounding. + img_orig = img.copy() + ws = self._calc_blended_weights(mixing_weights, m) + for w in ws: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img_orig # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + img = Image.blend(img, img_aug, w) + return img + + def _apply_basic(self, img, mixing_weights, m): + # This is a literal adaptation of the paper/official implementation without normalizations and + # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the + # typical augmentation transforms, could use a GPU / Kornia implementation. + img_shape = img.size[0], img.size[1], len(img.getbands()) + mixed = np.zeros(img_shape, dtype=np.float32) + for mw in mixing_weights: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + mixed += mw * np.asarray(img_aug, dtype=np.float32) + np.clip(mixed, 0, 255., out=mixed) + mixed = Image.fromarray(mixed.astype(np.uint8)) + return Image.blend(img, mixed, m) + + def __call__(self, img): + mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) + m = np.float32(np.random.beta(self.alpha, self.alpha)) + if self.blended: + mixed = self._apply_blended(img, mixing_weights, m) + else: + mixed = self._apply_basic(img, mixing_weights, m) + return mixed + + +def augment_and_mix_transform(config_str, hparams): + """ Create AugMix PyTorch transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude (severity) of augmentation mix (default: 3) + 'w' - integer width of augmentation chain (default: 3) + 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1) + 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0) + 'mstd' - float std deviation of magnitude noise applied (default: 0) + Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2 + + :param hparams: Other hparams (kwargs) for the Augmentation transforms + + :return: A PyTorch compatible Transform + """ + magnitude = 3 + width = 3 + depth = -1 + alpha = 1. + blended = False + config = config_str.split('-') + assert config[0] == 'augmix' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + elif key == 'm': + magnitude = int(val) + elif key == 'w': + width = int(val) + elif key == 'd': + depth = int(val) + elif key == 'a': + alpha = float(val) + elif key == 'b': + blended = bool(val) + else: + assert False, 'Unknown AugMix config section' + hparams.setdefault('magnitude_std', float('inf')) # default to uniform sampling (if not set via mstd arg) + ops = augmix_ops(magnitude=magnitude, hparams=hparams) + return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/config.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/config.py new file mode 100644 index 0000000000..38f5689a70 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/config.py @@ -0,0 +1,78 @@ +import logging +from .constants import * + + +_logger = logging.getLogger(__name__) + + +def resolve_data_config(args, default_cfg={}, model=None, use_test_size=False, verbose=False): + new_config = {} + default_cfg = default_cfg + if not default_cfg and model is not None and hasattr(model, 'default_cfg'): + default_cfg = model.default_cfg + + # Resolve input/image size + in_chans = 3 + if 'chans' in args and args['chans'] is not None: + in_chans = args['chans'] + + input_size = (in_chans, 224, 224) + if 'input_size' in args and args['input_size'] is not None: + assert isinstance(args['input_size'], (tuple, list)) + assert len(args['input_size']) == 3 + input_size = tuple(args['input_size']) + in_chans = input_size[0] # input_size overrides in_chans + elif 'img_size' in args and args['img_size'] is not None: + assert isinstance(args['img_size'], int) + input_size = (in_chans, args['img_size'], args['img_size']) + else: + if use_test_size and 'test_input_size' in default_cfg: + input_size = default_cfg['test_input_size'] + elif 'input_size' in default_cfg: + input_size = default_cfg['input_size'] + new_config['input_size'] = input_size + + # resolve interpolation method + new_config['interpolation'] = 'bicubic' + if 'interpolation' in args and args['interpolation']: + new_config['interpolation'] = args['interpolation'] + elif 'interpolation' in default_cfg: + new_config['interpolation'] = default_cfg['interpolation'] + + # resolve dataset + model mean for normalization + new_config['mean'] = IMAGENET_DEFAULT_MEAN + if 'mean' in args and args['mean'] is not None: + mean = tuple(args['mean']) + if len(mean) == 1: + mean = tuple(list(mean) * in_chans) + else: + assert len(mean) == in_chans + new_config['mean'] = mean + elif 'mean' in default_cfg: + new_config['mean'] = default_cfg['mean'] + + # resolve dataset + model std deviation for normalization + new_config['std'] = IMAGENET_DEFAULT_STD + if 'std' in args and args['std'] is not None: + std = tuple(args['std']) + if len(std) == 1: + std = tuple(list(std) * in_chans) + else: + assert len(std) == in_chans + new_config['std'] = std + elif 'std' in default_cfg: + new_config['std'] = default_cfg['std'] + + # resolve default crop percentage + new_config['crop_pct'] = DEFAULT_CROP_PCT + if 'crop_pct' in args and args['crop_pct'] is not None: + new_config['crop_pct'] = args['crop_pct'] + elif 'crop_pct' in default_cfg: + new_config['crop_pct'] = default_cfg['crop_pct'] + + if verbose: + _logger.info('Data processing configuration for current model + dataset:') + for n, v in new_config.items(): + _logger.info('\t%s: %s' % (n, str(v))) + + return new_config diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/constants.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/constants.py new file mode 100644 index 0000000000..d6d4a01b03 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/constants.py @@ -0,0 +1,7 @@ +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) +IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) +IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/dataset.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/dataset.py new file mode 100644 index 0000000000..e719f3f6d7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/dataset.py @@ -0,0 +1,146 @@ +""" Quick n Simple Image Folder, Tarfile based DataSet + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.utils.data as data +import os +import torch +import logging + +from PIL import Image + +from .parsers import create_parser + +_logger = logging.getLogger(__name__) + + +_ERROR_RETRY = 50 + + +class ImageDataset(data.Dataset): + + def __init__( + self, + root, + parser=None, + class_map='', + load_bytes=False, + transform=None, + ): + if parser is None or isinstance(parser, str): + parser = create_parser(parser or '', root=root, class_map=class_map) + self.parser = parser + self.load_bytes = load_bytes + self.transform = transform + self._consecutive_errors = 0 + + def __getitem__(self, index): + img, target = self.parser[index] + try: + img = img.read() if self.load_bytes else Image.open(img).convert('RGB') + except Exception as e: + _logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}') + self._consecutive_errors += 1 + if self._consecutive_errors < _ERROR_RETRY: + return self.__getitem__((index + 1) % len(self.parser)) + else: + raise e + self._consecutive_errors = 0 + if self.transform is not None: + img = self.transform(img) + if target is None: + target = torch.tensor(-1, dtype=torch.long) + return img, target + + def __len__(self): + return len(self.parser) + + def filename(self, index, basename=False, absolute=False): + return self.parser.filename(index, basename, absolute) + + def filenames(self, basename=False, absolute=False): + return self.parser.filenames(basename, absolute) + + +class IterableImageDataset(data.IterableDataset): + + def __init__( + self, + root, + parser=None, + split='train', + is_training=False, + batch_size=None, + class_map='', + load_bytes=False, + repeats=0, + transform=None, + ): + assert parser is not None + if isinstance(parser, str): + self.parser = create_parser( + parser, root=root, split=split, is_training=is_training, batch_size=batch_size, repeats=repeats) + else: + self.parser = parser + self.transform = transform + self._consecutive_errors = 0 + + def __iter__(self): + for img, target in self.parser: + if self.transform is not None: + img = self.transform(img) + if target is None: + target = torch.tensor(-1, dtype=torch.long) + yield img, target + + def __len__(self): + if hasattr(self.parser, '__len__'): + return len(self.parser) + else: + return 0 + + def filename(self, index, basename=False, absolute=False): + assert False, 'Filename lookup by index not supported, use filenames().' + + def filenames(self, basename=False, absolute=False): + return self.parser.filenames(basename, absolute) + + +class AugMixDataset(torch.utils.data.Dataset): + """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" + + def __init__(self, dataset, num_splits=2): + self.augmentation = None + self.normalize = None + self.dataset = dataset + if self.dataset.transform is not None: + self._set_transforms(self.dataset.transform) + self.num_splits = num_splits + + def _set_transforms(self, x): + assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' + self.dataset.transform = x[0] + self.augmentation = x[1] + self.normalize = x[2] + + @property + def transform(self): + return self.dataset.transform + + @transform.setter + def transform(self, x): + self._set_transforms(x) + + def _normalize(self, x): + return x if self.normalize is None else self.normalize(x) + + def __getitem__(self, i): + x, y = self.dataset[i] # all splits share the same dataset base transform + x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) + # run the full augmentation on the remaining splits + for _ in range(self.num_splits - 1): + x_list.append(self._normalize(self.augmentation(x))) + return tuple(x_list), y + + def __len__(self): + return len(self.dataset) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/dataset_factory.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/dataset_factory.py new file mode 100644 index 0000000000..ccc99d5c2c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/dataset_factory.py @@ -0,0 +1,30 @@ +import os + +from .dataset import IterableImageDataset, ImageDataset + + +def _search_split(root, split): + # look for sub-folder with name of split in root and use that if it exists + split_name = split.split('[')[0] + try_root = os.path.join(root, split_name) + if os.path.exists(try_root): + return try_root + if split_name == 'validation': + try_root = os.path.join(root, 'val') + if os.path.exists(try_root): + return try_root + return root + + +def create_dataset(name, root, split='validation', search_split=True, is_training=False, batch_size=None, **kwargs): + name = name.lower() + if name.startswith('tfds'): + ds = IterableImageDataset( + root, parser=name, split=split, is_training=is_training, batch_size=batch_size, **kwargs) + else: + # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future + kwargs.pop('repeats', 0) # FIXME currently only Iterable dataset support the repeat multiplier + if search_split and os.path.isdir(root): + root = _search_split(root, split) + ds = ImageDataset(root, parser=name, **kwargs) + return ds diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/distributed_sampler.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/distributed_sampler.py new file mode 100644 index 0000000000..fa403d0acc --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/distributed_sampler.py @@ -0,0 +1,128 @@ +import math +import torch +from torch.utils.data import Sampler +import torch.distributed as dist + + +class OrderedDistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + indices = list(range(len(self.dataset))) + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + +class RepeatAugSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset for distributed, + with repeated augmentation. + It ensures that different each augmented version of a sample will be visible to a + different process (GPU). Heavily based on torch.utils.data.DistributedSampler + + This sampler was taken from https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py + Used in + Copyright (c) 2015-present, Facebook, Inc. + """ + + def __init__( + self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + num_repeats=3, + selected_round=256, + selected_ratio=0, + ): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.shuffle = shuffle + self.num_repeats = num_repeats + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + # Determine the number of samples to select per epoch for each rank. + # num_selected logic defaults to be the same as original RASampler impl, but this one can be tweaked + # via selected_ratio and selected_round args. + selected_ratio = selected_ratio or num_replicas # ratio to reduce selected samples by, num_replicas if 0 + if selected_round: + self.num_selected_samples = int(math.floor( + len(self.dataset) // selected_round * selected_round / selected_ratio)) + else: + self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio)) + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = list(range(len(self.dataset))) + + # produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....] + indices = [x for x in indices for _ in range(self.num_repeats)] + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + indices += indices[:padding_size] + assert len(indices) == self.total_size + + # subsample per rank + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + # return up to num selected samples + return iter(indices[:self.num_selected_samples]) + + def __len__(self): + return self.num_selected_samples + + def set_epoch(self, epoch): + self.epoch = epoch \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/loader.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/loader.py new file mode 100644 index 0000000000..7bc66bdb57 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/loader.py @@ -0,0 +1,298 @@ +""" Loader Factory, Fast Collate, CUDA Prefetcher + +Prefetcher and Fast Collate inspired by NVIDIA APEX example at +https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf + +Hacked together by / Copyright 2021 Ross Wightman +""" +import random +from functools import partial +from typing import Callable + +import torch.utils.data +import numpy as np + +from .transforms_factory import create_transform +from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler +from .random_erasing import RandomErasing +from .mixup import FastCollateMixup + + +def fast_collate(batch): + """ A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)""" + assert isinstance(batch[0], tuple) + batch_size = len(batch) + if isinstance(batch[0][0], tuple): + # This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position + # such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position + inner_tuple_size = len(batch[0][0]) + flattened_batch_size = batch_size * inner_tuple_size + targets = torch.zeros(flattened_batch_size, dtype=torch.int64) + tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length + for j in range(inner_tuple_size): + targets[i + j * batch_size] = batch[i][1] + tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j]) + return tensor, targets + elif isinstance(batch[0][0], np.ndarray): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i] += torch.from_numpy(batch[i][0]) + return tensor, targets + elif isinstance(batch[0][0], torch.Tensor): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i].copy_(batch[i][0]) + return tensor, targets + else: + assert False + + +class PrefetchLoader: + + def __init__(self, + loader, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + fp16=False, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0): + self.loader = loader + # self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1) + # self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1) + self.mean = torch.tensor([x * 255 for x in mean]).npu().view(1, 3, 1, 1) + self.std = torch.tensor([x * 255 for x in std]).npu().view(1, 3, 1, 1) + + self.fp16 = fp16 + if fp16: + self.mean = self.mean.half() + self.std = self.std.half() + if re_prob > 0.: + self.random_erasing = RandomErasing( + probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='npu') # ==== + else: + self.random_erasing = None + + def __iter__(self): + # stream = torch.cuda.Stream() + stream = torch.npu.Stream() + + first = True + + for next_input, next_target in self.loader: + # with torch.cuda.stream(stream): + with torch.npu.stream(stream): + # next_input = next_input.cuda(non_blocking=True) + # next_target = next_target.cuda(non_blocking=True) + next_input = next_input.npu(non_blocking=True) + next_target = next_target.npu(non_blocking=True) + if self.fp16: + next_input = next_input.half().sub_(self.mean).div_(self.std) + else: + next_input = next_input.float().sub_(self.mean).div_(self.std) + if self.random_erasing is not None: + next_input = self.random_erasing(next_input) + + if not first: + yield input, target + else: + first = False + + # torch.cuda.current_stream().wait_stream(stream) + torch.npu.current_stream().wait_stream(stream) + input = next_input + target = next_target + + yield input, target + + def __len__(self): + return len(self.loader) + + @property + def sampler(self): + return self.loader.sampler + + @property + def dataset(self): + return self.loader.dataset + + @property + def mixup_enabled(self): + if isinstance(self.loader.collate_fn, FastCollateMixup): + return self.loader.collate_fn.mixup_enabled + else: + return False + + @mixup_enabled.setter + def mixup_enabled(self, x): + if isinstance(self.loader.collate_fn, FastCollateMixup): + self.loader.collate_fn.mixup_enabled = x + + +def _worker_init(worker_id, worker_seeding='all'): + worker_info = torch.utils.data.get_worker_info() + assert worker_info.id == worker_id + if isinstance(worker_seeding, Callable): + seed = worker_seeding(worker_info) + random.seed(seed) + torch.manual_seed(seed) + np.random.seed(seed % (2 ** 32 - 1)) + else: + assert worker_seeding in ('all', 'part') + # random / torch seed already called in dataloader iter class w/ worker_info.seed + # to reproduce some old results (same seed + hparam combo), partial seeding is required (skip numpy re-seed) + if worker_seeding == 'all': + np.random.seed(worker_info.seed % (2 ** 32 - 1)) + + +def create_loader( + dataset, + input_size, + batch_size, + is_training=False, + use_prefetcher=True, + no_aug=False, + re_prob=0., + re_mode='const', + re_count=1, + re_split=False, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + num_aug_repeats=0, + num_aug_splits=0, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=1, + distributed=False, + crop_pct=None, + collate_fn=None, + pin_memory=False, + fp16=False, + tf_preprocessing=False, + use_multi_epochs_loader=False, + persistent_workers=True, + worker_seeding='all', +): + re_num_splits = 0 + if re_split: + # apply RE to second half of batch if no aug split otherwise line up with aug split + re_num_splits = num_aug_splits or 2 + dataset.transform = create_transform( + input_size, + is_training=is_training, + use_prefetcher=use_prefetcher, + no_aug=no_aug, + scale=scale, + ratio=ratio, + hflip=hflip, + vflip=vflip, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + mean=mean, + std=std, + crop_pct=crop_pct, + tf_preprocessing=tf_preprocessing, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=num_aug_splits > 0, + ) + + sampler = None + if distributed and not isinstance(dataset, torch.utils.data.IterableDataset): + if is_training: + if num_aug_repeats: + sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats) + else: + sampler = torch.utils.data.distributed.DistributedSampler(dataset) + else: + # This will add extra duplicate entries to result in equal num + # of samples per-process, will slightly alter validation results + sampler = OrderedDistributedSampler(dataset) + else: + assert num_aug_repeats == 0, "RepeatAugment not currently supported in non-distributed or IterableDataset use" + + if collate_fn is None: + collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate + + loader_class = torch.utils.data.DataLoader + if use_multi_epochs_loader: + loader_class = MultiEpochsDataLoader + + loader_args = dict( + batch_size=batch_size, + shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training, + num_workers=num_workers, + sampler=sampler, + collate_fn=collate_fn, + pin_memory=pin_memory, + drop_last=is_training, + worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding), + persistent_workers=persistent_workers + ) + try: + loader = loader_class(dataset, **loader_args) + except TypeError as e: + loader_args.pop('persistent_workers') # only in Pytorch 1.7+ + loader = loader_class(dataset, **loader_args) + if use_prefetcher: + prefetch_re_prob = re_prob if is_training and not no_aug else 0. + loader = PrefetchLoader( + loader, + mean=mean, + std=std, + fp16=fp16, + re_prob=prefetch_re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits + ) + + return loader + + +class MultiEpochsDataLoader(torch.utils.data.DataLoader): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._DataLoader__initialized = False + self.batch_sampler = _RepeatSampler(self.batch_sampler) + self._DataLoader__initialized = True + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever. + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/mixup.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/mixup.py new file mode 100644 index 0000000000..38477548a0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/mixup.py @@ -0,0 +1,316 @@ +""" Mixup and Cutmix + +Papers: +mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412) + +CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899) + +Code Reference: +CutMix: https://github.com/clovaai/CutMix-PyTorch + +Hacked together by / Copyright 2020 Ross Wightman +""" +import numpy as np +import torch + + +def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'): + x = x.long().view(-1, 1) + return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value) + + +def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'): + off_value = smoothing / num_classes + on_value = 1. - smoothing + off_value + y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device) + y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device) + return y1 * lam + y2 * (1. - lam) + + +def rand_bbox(img_shape, lam, margin=0., count=None): + """ Standard CutMix bounding-box + Generates a random square bbox based on lambda value. This impl includes + support for enforcing a border margin as percent of bbox dimensions. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image) + count (int): Number of bbox to generate + """ + ratio = np.sqrt(1 - lam) + img_h, img_w = img_shape[-2:] + cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) + margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) + cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) + cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) + yl = np.clip(cy - cut_h // 2, 0, img_h) + yh = np.clip(cy + cut_h // 2, 0, img_h) + xl = np.clip(cx - cut_w // 2, 0, img_w) + xh = np.clip(cx + cut_w // 2, 0, img_w) + return yl, yh, xl, xh + + +def rand_bbox_minmax(img_shape, minmax, count=None): + """ Min-Max CutMix bounding-box + Inspired by Darknet cutmix impl, generates a random rectangular bbox + based on min/max percent values applied to each dimension of the input image. + + Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max. + + Args: + img_shape (tuple): Image shape as tuple + minmax (tuple or list): Min and max bbox ratios (as percent of image size) + count (int): Number of bbox to generate + """ + assert len(minmax) == 2 + img_h, img_w = img_shape[-2:] + cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count) + cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count) + yl = np.random.randint(0, img_h - cut_h, size=count) + xl = np.random.randint(0, img_w - cut_w, size=count) + yu = yl + cut_h + xu = xl + cut_w + return yl, yu, xl, xu + + +def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None): + """ Generate bbox and apply lambda correction. + """ + if ratio_minmax is not None: + yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count) + else: + yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count) + if correct_lam or ratio_minmax is not None: + bbox_area = (yu - yl) * (xu - xl) + lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1]) + return (yl, yu, xl, xu), lam + + +class Mixup: + """ Mixup/Cutmix that applies different params to each element or whole batch + + Args: + mixup_alpha (float): mixup alpha value, mixup is active if > 0. + cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0. + cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None. + prob (float): probability of applying mixup or cutmix per batch or element + switch_prob (float): probability of switching to cutmix instead of mixup when both are active + mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element) + correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders + label_smoothing (float): apply label smoothing to the mixed target tensor + num_classes (int): number of classes for target + """ + def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5, + mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000): + self.mixup_alpha = mixup_alpha + self.cutmix_alpha = cutmix_alpha + self.cutmix_minmax = cutmix_minmax + if self.cutmix_minmax is not None: + assert len(self.cutmix_minmax) == 2 + # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe + self.cutmix_alpha = 1.0 + self.mix_prob = prob + self.switch_prob = switch_prob + self.label_smoothing = label_smoothing + self.num_classes = num_classes + self.mode = mode + self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix + self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop) + + def _params_per_elem(self, batch_size): + lam = np.ones(batch_size, dtype=np.float32) + use_cutmix = np.zeros(batch_size, dtype=np.bool) + if self.mixup_enabled: + if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: + use_cutmix = np.random.rand(batch_size) < self.switch_prob + lam_mix = np.where( + use_cutmix, + np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size), + np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)) + elif self.mixup_alpha > 0.: + lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size) + elif self.cutmix_alpha > 0.: + use_cutmix = np.ones(batch_size, dtype=np.bool) + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size) + else: + assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." + lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam) + return lam, use_cutmix + + def _params_per_batch(self): + lam = 1. + use_cutmix = False + if self.mixup_enabled and np.random.rand() < self.mix_prob: + if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: + use_cutmix = np.random.rand() < self.switch_prob + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \ + np.random.beta(self.mixup_alpha, self.mixup_alpha) + elif self.mixup_alpha > 0.: + lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha) + elif self.cutmix_alpha > 0.: + use_cutmix = True + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) + else: + assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." + lam = float(lam_mix) + return lam, use_cutmix + + def _mix_elem(self, x): + batch_size = len(x) + lam_batch, use_cutmix = self._params_per_elem(batch_size) + x_orig = x.clone() # need to keep an unmodified original for mixing source + for i in range(batch_size): + j = batch_size - i - 1 + lam = lam_batch[i] + if lam != 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + x[i] = x[i] * lam + x_orig[j] * (1 - lam) + return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) + + def _mix_pair(self, x): + batch_size = len(x) + lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) + x_orig = x.clone() # need to keep an unmodified original for mixing source + for i in range(batch_size // 2): + j = batch_size - i - 1 + lam = lam_batch[i] + if lam != 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] + x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + x[i] = x[i] * lam + x_orig[j] * (1 - lam) + x[j] = x[j] * lam + x_orig[i] * (1 - lam) + lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) + return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) + + def _mix_batch(self, x): + lam, use_cutmix = self._params_per_batch() + if lam == 1.: + return 1. + if use_cutmix: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] + else: + x_flipped = x.flip(0).mul_(1. - lam) + x.mul_(lam).add_(x_flipped) + return lam + + def __call__(self, x, target): + assert len(x) % 2 == 0, 'Batch size should be even when using this' + if self.mode == 'elem': + lam = self._mix_elem(x) + elif self.mode == 'pair': + lam = self._mix_pair(x) + else: + lam = self._mix_batch(x) + target = mixup_target(target, self.num_classes, lam, self.label_smoothing) + return x, target + + +class FastCollateMixup(Mixup): + """ Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch + + A Mixup impl that's performed while collating the batches. + """ + + def _mix_elem_collate(self, output, batch, half=False): + batch_size = len(batch) + num_elem = batch_size // 2 if half else batch_size + assert len(output) == num_elem + lam_batch, use_cutmix = self._params_per_elem(num_elem) + for i in range(num_elem): + j = batch_size - i - 1 + lam = lam_batch[i] + mixed = batch[i][0] + if lam != 1.: + if use_cutmix[i]: + if not half: + mixed = mixed.copy() + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) + np.rint(mixed, out=mixed) + output[i] += torch.from_numpy(mixed.astype(np.uint8)) + if half: + lam_batch = np.concatenate((lam_batch, np.ones(num_elem))) + return torch.tensor(lam_batch).unsqueeze(1) + + def _mix_pair_collate(self, output, batch): + batch_size = len(batch) + lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) + for i in range(batch_size // 2): + j = batch_size - i - 1 + lam = lam_batch[i] + mixed_i = batch[i][0] + mixed_j = batch[j][0] + assert 0 <= lam <= 1.0 + if lam < 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + patch_i = mixed_i[:, yl:yh, xl:xh].copy() + mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh] + mixed_j[:, yl:yh, xl:xh] = patch_i + lam_batch[i] = lam + else: + mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam) + mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam) + mixed_i = mixed_temp + np.rint(mixed_j, out=mixed_j) + np.rint(mixed_i, out=mixed_i) + output[i] += torch.from_numpy(mixed_i.astype(np.uint8)) + output[j] += torch.from_numpy(mixed_j.astype(np.uint8)) + lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) + return torch.tensor(lam_batch).unsqueeze(1) + + def _mix_batch_collate(self, output, batch): + batch_size = len(batch) + lam, use_cutmix = self._params_per_batch() + if use_cutmix: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + for i in range(batch_size): + j = batch_size - i - 1 + mixed = batch[i][0] + if lam != 1.: + if use_cutmix: + mixed = mixed.copy() # don't want to modify the original while iterating + mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] + else: + mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) + np.rint(mixed, out=mixed) + output[i] += torch.from_numpy(mixed.astype(np.uint8)) + return lam + + def __call__(self, batch, _=None): + batch_size = len(batch) + assert batch_size % 2 == 0, 'Batch size should be even when using this' + half = 'half' in self.mode + if half: + batch_size //= 2 + output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + if self.mode == 'elem' or self.mode == 'half': + lam = self._mix_elem_collate(output, batch, half=half) + elif self.mode == 'pair': + lam = self._mix_pair_collate(output, batch) + else: + lam = self._mix_batch_collate(output, batch) + target = torch.tensor([b[1] for b in batch], dtype=torch.int64) + target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu') + target = target[:batch_size] + return output, target + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/__init__.py new file mode 100644 index 0000000000..eeb44e3714 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/__init__.py @@ -0,0 +1 @@ +from .parser_factory import create_parser diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/class_map.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/class_map.py new file mode 100644 index 0000000000..9ef4d1fab4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/class_map.py @@ -0,0 +1,16 @@ +import os + + +def load_class_map(filename, root=''): + class_map_path = filename + if not os.path.exists(class_map_path): + class_map_path = os.path.join(root, filename) + assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % filename + class_map_ext = os.path.splitext(filename)[-1].lower() + if class_map_ext == '.txt': + with open(class_map_path) as f: + class_to_idx = {v.strip(): k for k, v in enumerate(f)} + else: + assert False, 'Unsupported class map extension' + return class_to_idx + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/constants.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/constants.py new file mode 100644 index 0000000000..e7ba484e72 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/constants.py @@ -0,0 +1 @@ +IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser.py new file mode 100644 index 0000000000..76ab6d1828 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser.py @@ -0,0 +1,17 @@ +from abc import abstractmethod + + +class Parser: + def __init__(self): + pass + + @abstractmethod + def _filename(self, index, basename=False, absolute=False): + pass + + def filename(self, index, basename=False, absolute=False): + return self._filename(index, basename=basename, absolute=absolute) + + def filenames(self, basename=False, absolute=False): + return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))] + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_factory.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_factory.py new file mode 100644 index 0000000000..419ffe899b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_factory.py @@ -0,0 +1,29 @@ +import os + +from .parser_image_folder import ParserImageFolder +from .parser_image_tar import ParserImageTar +from .parser_image_in_tar import ParserImageInTar + + +def create_parser(name, root, split='train', **kwargs): + name = name.lower() + name = name.split('/', 2) + prefix = '' + if len(name) > 1: + prefix = name[0] + name = name[-1] + + # FIXME improve the selection right now just tfds prefix or fallback path, will need options to + # explicitly select other options shortly + if prefix == 'tfds': + from .parser_tfds import ParserTfds # defer tensorflow import + parser = ParserTfds(root, name, split=split, shuffle=kwargs.pop('shuffle', False), **kwargs) + else: + assert os.path.exists(root) + # default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder + # FIXME support split here, in parser? + if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': + parser = ParserImageInTar(root, **kwargs) + else: + parser = ParserImageFolder(root, **kwargs) + return parser diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_folder.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_folder.py new file mode 100644 index 0000000000..ed349009a4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_folder.py @@ -0,0 +1,69 @@ +""" A dataset parser that reads images from folders + +Folders are scannerd recursively to find image files. Labels are based +on the folder hierarchy, just leaf folders by default. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os + +from timm.utils.misc import natural_key + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS + + +def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True): + labels = [] + filenames = [] + for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True): + rel_path = os.path.relpath(root, folder) if (root != folder) else '' + label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') + for f in files: + base, ext = os.path.splitext(f) + if ext.lower() in types: + filenames.append(os.path.join(root, f)) + labels.append(label) + if class_to_idx is None: + # building class index + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx] + if sort: + images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) + return images_and_targets, class_to_idx + + +class ParserImageFolder(Parser): + + def __init__( + self, + root, + class_map=''): + super().__init__() + + self.root = root + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + self.samples, self.class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx) + if len(self.samples) == 0: + raise RuntimeError( + f'Found 0 images in subfolders of {root}. Supported image extensions are {", ".join(IMG_EXTENSIONS)}') + + def __getitem__(self, index): + path, target = self.samples[index] + return open(path, 'rb'), target + + def __len__(self): + return len(self.samples) + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0] + if basename: + filename = os.path.basename(filename) + elif not absolute: + filename = os.path.relpath(filename, self.root) + return filename diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_in_tar.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_in_tar.py new file mode 100644 index 0000000000..c6ada962ca --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_in_tar.py @@ -0,0 +1,222 @@ +""" A dataset parser that reads tarfile based datasets + +This parser can read and extract image samples from: +* a single tar of image files +* a folder of multiple tarfiles containing imagefiles +* a tar of tars containing image files + +Labels are based on the combined folder and/or tar name structure. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import tarfile +import pickle +import logging +import numpy as np +from glob import glob +from typing import List, Dict + +from timm.utils.misc import natural_key + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS + + +_logger = logging.getLogger(__name__) +CACHE_FILENAME_SUFFIX = '_tarinfos.pickle' + + +class TarState: + + def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None): + self.tf: tarfile.TarFile = tf + self.ti: tarfile.TarInfo = ti + self.children: Dict[str, TarState] = {} # child states (tars within tars) + + def reset(self): + self.tf = None + + +def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions=IMG_EXTENSIONS): + sample_count = 0 + for i, ti in enumerate(tf): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + name, ext = os.path.splitext(basename) + ext = ext.lower() + if ext == '.tar': + with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf: + child_info = dict( + name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[]) + sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions) + _logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.') + parent_info['children'].append(child_info) + elif ext in extensions: + parent_info['samples'].append(ti) + sample_count += 1 + return sample_count + + +def extract_tarinfos(root, class_name_to_idx=None, cache_tarinfo=None, extensions=IMG_EXTENSIONS, sort=True): + root_is_tar = False + if os.path.isfile(root): + assert os.path.splitext(root)[-1].lower() == '.tar' + tar_filenames = [root] + root, root_name = os.path.split(root) + root_name = os.path.splitext(root_name)[0] + root_is_tar = True + else: + root_name = root.strip(os.path.sep).split(os.path.sep)[-1] + tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True) + num_tars = len(tar_filenames) + tar_bytes = sum([os.path.getsize(f) for f in tar_filenames]) + assert num_tars, f'No .tar files found at specified path ({root}).' + + _logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...') + info = dict(tartrees=[]) + cache_path = '' + if cache_tarinfo is None: + cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB + if cache_tarinfo: + cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX + cache_path = os.path.join(root, cache_filename) + if os.path.exists(cache_path): + _logger.info(f'Reading tar info from cache file {cache_path}.') + with open(cache_path, 'rb') as pf: + info = pickle.load(pf) + assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles" + else: + for i, fn in enumerate(tar_filenames): + path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0] + with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode + parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[]) + num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions) + num_children = len(parent_info["children"]) + _logger.debug( + f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.') + info['tartrees'].append(parent_info) + if cache_path: + _logger.info(f'Writing tar info to cache file {cache_path}.') + with open(cache_path, 'wb') as pf: + pickle.dump(info, pf) + + samples = [] + labels = [] + build_class_map = False + if class_name_to_idx is None: + build_class_map = True + + # Flatten tartree info into lists of samples and targets w/ targets based on label id via + # class map arg or from unique paths. + # NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children + # this covers my current use cases and keeps things a little easier to test for now. + tarfiles = [] + + def _label_from_paths(*path, leaf_only=True): + path = os.path.join(*path).strip(os.path.sep) + return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_') + + def _add_samples(info, fn): + added = 0 + for s in info['samples']: + label = _label_from_paths(info['path'], os.path.dirname(s.path)) + if not build_class_map and label not in class_name_to_idx: + continue + samples.append((s, fn, info['ti'])) + labels.append(label) + added += 1 + return added + + _logger.info(f'Collecting samples and building tar states.') + for parent_info in info['tartrees']: + # if tartree has children, we assume all samples are at the child level + tar_name = None if root_is_tar else parent_info['name'] + tar_state = TarState() + parent_added = 0 + for child_info in parent_info['children']: + child_added = _add_samples(child_info, fn=tar_name) + if child_added: + tar_state.children[child_info['name']] = TarState(ti=child_info['ti']) + parent_added += child_added + parent_added += _add_samples(parent_info, fn=tar_name) + if parent_added: + tarfiles.append((tar_name, tar_state)) + del info + + if build_class_map: + # build class index + sorted_labels = list(sorted(set(labels), key=natural_key)) + class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + + _logger.info(f'Mapping targets and sorting samples.') + samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx] + if sort: + samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path)) + samples, targets = zip(*samples_and_targets) + samples = np.array(samples) + targets = np.array(targets) + _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.') + return samples, targets, class_name_to_idx, tarfiles + + +class ParserImageInTar(Parser): + """ Multi-tarfile dataset parser where there is one .tar file per class + """ + + def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): + super().__init__() + + class_name_to_idx = None + if class_map: + class_name_to_idx = load_class_map(class_map, root) + self.root = root + self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos( + self.root, + class_name_to_idx=class_name_to_idx, + cache_tarinfo=cache_tarinfo, + extensions=IMG_EXTENSIONS) + self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()} + if len(tarfiles) == 1 and tarfiles[0][0] is None: + self.root_is_tar = True + self.tar_state = tarfiles[0][1] + else: + self.root_is_tar = False + self.tar_state = dict(tarfiles) + self.cache_tarfiles = cache_tarfiles + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + sample = self.samples[index] + target = self.targets[index] + sample_ti, parent_fn, child_ti = sample + parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root + + tf = None + cache_state = None + if self.cache_tarfiles: + cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn] + tf = cache_state.tf + if tf is None: + tf = tarfile.open(parent_abs) + if self.cache_tarfiles: + cache_state.tf = tf + if child_ti is not None: + ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None + if ctf is None: + ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) + if self.cache_tarfiles: + cache_state.children[child_ti.name].tf = ctf + tf = ctf + + return tf.extractfile(sample_ti), target + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0].name + if basename: + filename = os.path.basename(filename) + return filename diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_tar.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_tar.py new file mode 100644 index 0000000000..467537f479 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_image_tar.py @@ -0,0 +1,72 @@ +""" A dataset parser that reads single tarfile based datasets + +This parser can read datasets consisting if a single tarfile containing images. +I am planning to deprecated it in favour of ParerImageInTar. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import tarfile + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS +from timm.utils.misc import natural_key + + +def extract_tarinfo(tarfile, class_to_idx=None, sort=True): + files = [] + labels = [] + for ti in tarfile.getmembers(): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + label = os.path.basename(dirname) + ext = os.path.splitext(basename)[1] + if ext.lower() in IMG_EXTENSIONS: + files.append(ti) + labels.append(label) + if class_to_idx is None: + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx] + if sort: + tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) + return tarinfo_and_targets, class_to_idx + + +class ParserImageTar(Parser): + """ Single tarfile dataset where classes are mapped to folders within tar + NOTE: This class is being deprecated in favour of the more capable ParserImageInTar that can + operate on folders of tars or tars in tars. + """ + def __init__(self, root, class_map=''): + super().__init__() + + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + assert os.path.isfile(root) + self.root = root + + with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later + self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx) + self.imgs = self.samples + self.tarfile = None # lazy init in __getitem__ + + def __getitem__(self, index): + if self.tarfile is None: + self.tarfile = tarfile.open(self.root) + tarinfo, target = self.samples[index] + fileobj = self.tarfile.extractfile(tarinfo) + return fileobj, target + + def __len__(self): + return len(self.samples) + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0].name + if basename: + filename = os.path.basename(filename) + return filename diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_tfds.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_tfds.py new file mode 100644 index 0000000000..2ff90b09f3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/parsers/parser_tfds.py @@ -0,0 +1,223 @@ +""" Dataset parser interface that wraps TFDS datasets + +Wraps many (most?) TFDS image-classification datasets +from https://github.com/tensorflow/datasets +https://www.tensorflow.org/datasets/catalog/overview#image_classification + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import io +import math +import torch +import torch.distributed as dist +from PIL import Image + +try: + import tensorflow as tf + tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu) + import tensorflow_datasets as tfds +except ImportError as e: + print(e) + print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.") + exit(1) +from .parser import Parser + + +MAX_TP_SIZE = 8 # maximum TF threadpool size, only doing jpeg decodes and queuing activities +SHUFFLE_SIZE = 20480 # samples to shuffle in DS queue +PREFETCH_SIZE = 2048 # samples to prefetch + + +def even_split_indices(split, n, num_samples): + partitions = [round(i * num_samples / n) for i in range(n + 1)] + return [f"{split}[{partitions[i]}:{partitions[i+1]}]" for i in range(n)] + + +class ParserTfds(Parser): + """ Wrap Tensorflow Datasets for use in PyTorch + + There several things to be aware of: + * To prevent excessive samples being dropped per epoch w/ distributed training or multiplicity of + dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last + https://github.com/pytorch/pytorch/issues/33413 + * With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch + from each worker could be a different size. For training this is worked around by option above, for + validation extra samples are inserted iff distributed mode is enabled so that the batches being reduced + across replicas are of same size. This will slightly alter the results, distributed validation will not be + 100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse + since there are up to N * J extra samples with IterableDatasets. + * The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of + replicas and dataloader workers you can use. For really small datasets that only contain a few shards + you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the + benefit of distributed training or fast dataloading should be much less for small datasets. + * This wrapper is currently configured to return individual, decompressed image samples from the TFDS + dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible + to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream + components. + + """ + def __init__(self, root, name, split='train', shuffle=False, is_training=False, batch_size=None, repeats=0): + super().__init__() + self.root = root + self.split = split + self.shuffle = shuffle + self.is_training = is_training + if self.is_training: + assert batch_size is not None,\ + "Must specify batch_size in training mode for reasonable behaviour w/ TFDS wrapper" + self.batch_size = batch_size + self.repeats = repeats + self.subsplit = None + + self.builder = tfds.builder(name, data_dir=root) + # NOTE: please use tfds command line app to download & prepare datasets, I don't want to call + # download_and_prepare() by default here as it's caused issues generating unwanted paths. + self.num_samples = self.builder.info.splits[split].num_examples + self.ds = None # initialized lazily on each dataloader worker process + + self.worker_info = None + self.dist_rank = 0 + self.dist_num_replicas = 1 + if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1: + self.dist_rank = dist.get_rank() + self.dist_num_replicas = dist.get_world_size() + + def _lazy_init(self): + """ Lazily initialize the dataset. + + This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that + will be using the dataset instance. The __init__ method is called on the main process, + this will be called in a dataloader worker process. + + NOTE: There will be problems if you try to re-use this dataset across different loader/worker + instances once it has been initialized. Do not call any dataset methods that can call _lazy_init + before it is passed to dataloader. + """ + worker_info = torch.utils.data.get_worker_info() + + # setup input context to split dataset across distributed processes + split = self.split + num_workers = 1 + if worker_info is not None: + self.worker_info = worker_info + num_workers = worker_info.num_workers + global_num_workers = self.dist_num_replicas * num_workers + worker_id = worker_info.id + + # FIXME I need to spend more time figuring out the best way to distribute/split data across + # combo of distributed replicas + dataloader worker processes + """ + InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used. + My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True) + between the splits each iteration, but that understanding could be wrong. + Possible split options include: + * InputContext for both distributed & worker processes (current) + * InputContext for distributed and sub-splits for worker processes + * sub-splits for both + """ + # split_size = self.num_samples // num_workers + # start = worker_id * split_size + # if worker_id == num_workers - 1: + # split = split + '[{}:]'.format(start) + # else: + # split = split + '[{}:{}]'.format(start, start + split_size) + if not self.is_training and '[' not in self.split: + # If not training, and split doesn't define a subsplit, manually split the dataset + # for more even samples / worker + self.subsplit = even_split_indices(self.split, global_num_workers, self.num_samples)[ + self.dist_rank * num_workers + worker_id] + + if self.subsplit is None: + input_context = tf.distribute.InputContext( + num_input_pipelines=self.dist_num_replicas * num_workers, + input_pipeline_id=self.dist_rank * num_workers + worker_id, + num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact? + ) + else: + input_context = None + + read_config = tfds.ReadConfig( + shuffle_seed=42, + shuffle_reshuffle_each_iteration=True, + input_context=input_context) + ds = self.builder.as_dataset( + split=self.subsplit or self.split, shuffle_files=self.shuffle, read_config=read_config) + # avoid overloading threading w/ combo fo TF ds threads + PyTorch workers + options = tf.data.Options() + options.experimental_threading.private_threadpool_size = max(1, MAX_TP_SIZE // num_workers) + options.experimental_threading.max_intra_op_parallelism = 1 + ds = ds.with_options(options) + if self.is_training or self.repeats > 1: + # to prevent excessive drop_last batch behaviour w/ IterableDatasets + # see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading + ds = ds.repeat() # allow wrap around and break iteration manually + if self.shuffle: + ds = ds.shuffle(min(self.num_samples, SHUFFLE_SIZE) // self._num_pipelines, seed=0) + ds = ds.prefetch(min(self.num_samples // self._num_pipelines, PREFETCH_SIZE)) + self.ds = tfds.as_numpy(ds) + + def __iter__(self): + if self.ds is None: + self._lazy_init() + # compute a rounded up sample count that is used to: + # 1. make batches even cross workers & replicas in distributed validation. + # This adds extra samples and will slightly alter validation results. + # 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size + # batches are produced (underlying tfds iter wraps around) + target_sample_count = math.ceil(max(1, self.repeats) * self.num_samples / self._num_pipelines) + if self.is_training: + # round up to nearest batch_size per worker-replica + target_sample_count = math.ceil(target_sample_count / self.batch_size) * self.batch_size + sample_count = 0 + for sample in self.ds: + img = Image.fromarray(sample['image'], mode='RGB') + yield img, sample['label'] + sample_count += 1 + if self.is_training and sample_count >= target_sample_count: + # Need to break out of loop when repeat() is enabled for training w/ oversampling + # this results in extra samples per epoch but seems more desirable than dropping + # up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes) + break + if not self.is_training and self.dist_num_replicas and 0 < sample_count < target_sample_count: + # Validation batch padding only done for distributed training where results are reduced across nodes. + # For single process case, it won't matter if workers return different batch sizes. + # FIXME if using input_context or % based subsplits, sample count can vary by more than +/- 1 and this + # approach is not optimal + yield img, sample['label'] # yield prev sample again + sample_count += 1 + + @property + def _num_workers(self): + return 1 if self.worker_info is None else self.worker_info.num_workers + + @property + def _num_pipelines(self): + return self._num_workers * self.dist_num_replicas + + def __len__(self): + # this is just an estimate and does not factor in extra samples added to pad batches based on + # complete worker & replica info (not available until init in dataloader). + return math.ceil(max(1, self.repeats) * self.num_samples / self.dist_num_replicas) + + def _filename(self, index, basename=False, absolute=False): + assert False, "Not supported" # no random access to samples + + def filenames(self, basename=False, absolute=False): + """ Return all filenames in dataset, overrides base""" + if self.ds is None: + self._lazy_init() + names = [] + for sample in self.ds: + if len(names) > self.num_samples: + break # safety for ds.repeat() case + if 'file_name' in sample: + name = sample['file_name'] + elif 'filename' in sample: + name = sample['filename'] + elif 'id' in sample: + name = sample['id'] + else: + assert False, "No supported name field present" + names.append(name) + return names diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/random_erasing.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/random_erasing.py new file mode 100644 index 0000000000..78967d105d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/random_erasing.py @@ -0,0 +1,97 @@ +""" Random Erasing (Cutout) + +Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0 +Copyright Zhun Zhong & Liang Zheng + +Hacked together by / Copyright 2020 Ross Wightman +""" +import random +import math +import torch + + +def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + + +class RandomErasing: + """ Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None, + mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + mode = mode.lower() + self.rand_color = False + self.per_pixel = False + if mode == 'rand': + self.rand_color = True # per block random normal + elif mode == 'pixel': + self.per_pixel = True # per pixel random normal + else: + assert not mode or mode == 'const' + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = self.min_count if self.min_count == self.max_count else \ + random.randint(self.min_count, self.max_count) + for _ in range(count): + for attempt in range(10): + target_area = random.uniform(self.min_area, self.max_area) * area / count + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top:top + h, left:left + w] = _get_pixels( + self.per_pixel, self.rand_color, (chan, h, w), + dtype=dtype, device=self.device) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/real_labels.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/real_labels.py new file mode 100644 index 0000000000..939c34867e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/real_labels.py @@ -0,0 +1,42 @@ +""" Real labels evaluator for ImageNet +Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159 +Based on Numpy example at https://github.com/google-research/reassessed-imagenet + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import json +import numpy as np + + +class RealLabelsImagenet: + + def __init__(self, filenames, real_json='real.json', topk=(1, 5)): + with open(real_json) as real_labels: + real_labels = json.load(real_labels) + real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)} + self.real_labels = real_labels + self.filenames = filenames + assert len(self.filenames) == len(self.real_labels) + self.topk = topk + self.is_correct = {k: [] for k in topk} + self.sample_idx = 0 + + def add_result(self, output): + maxk = max(self.topk) + _, pred_batch = output.topk(maxk, 1, True, True) + pred_batch = pred_batch.cpu().numpy() + for pred in pred_batch: + filename = self.filenames[self.sample_idx] + filename = os.path.basename(filename) + if self.real_labels[filename]: + for k in self.topk: + self.is_correct[k].append( + any([p in self.real_labels[filename] for p in pred[:k]])) + self.sample_idx += 1 + + def get_accuracy(self, k=None): + if k is None: + return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk} + else: + return float(np.mean(self.is_correct[k])) * 100 diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/tf_preprocessing.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/tf_preprocessing.py new file mode 100644 index 0000000000..44b4a3af73 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/tf_preprocessing.py @@ -0,0 +1,232 @@ +""" Tensorflow Preprocessing Adapter + +Allows use of Tensorflow preprocessing pipeline in PyTorch Transform + +Copyright of original Tensorflow code below. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ImageNet preprocessing for MnasNet.""" +import tensorflow as tf +import numpy as np + +IMAGE_SIZE = 224 +CROP_PADDING = 32 + + +def distorted_bounding_box_crop(image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image_bytes: `Tensor` of binary image data. + bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` + where each coordinate is [0, 1) and the coordinates are arranged + as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding + box supplied. + aspect_ratio_range: An optional list of `float`s. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `float`s. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional `str` for name scope. + Returns: + cropped image `Tensor` + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): + shape = tf.image.extract_jpeg_shape(image_bytes) + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + shape, + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + offset_y, offset_x, _ = tf.unstack(bbox_begin) + target_height, target_width, _ = tf.unstack(bbox_size) + crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + + return image + + +def _at_least_x_are_equal(a, b, x): + """At least `x` of `a` and `b` `Tensors` are equal.""" + match = tf.equal(a, b) + match = tf.cast(match, tf.int32) + return tf.greater_equal(tf.reduce_sum(match), x) + + +def _decode_and_random_crop(image_bytes, image_size, resize_method): + """Make a random crop of image_size.""" + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + image = distorted_bounding_box_crop( + image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(3. / 4, 4. / 3.), + area_range=(0.08, 1.0), + max_attempts=10, + scope=None) + original_shape = tf.image.extract_jpeg_shape(image_bytes) + bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) + + image = tf.cond( + bad, + lambda: _decode_and_center_crop(image_bytes, image_size), + lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0]) + + return image + + +def _decode_and_center_crop(image_bytes, image_size, resize_method): + """Crops to center of image with padding then scales image_size.""" + shape = tf.image.extract_jpeg_shape(image_bytes) + image_height = shape[0] + image_width = shape[1] + + padded_center_crop_size = tf.cast( + ((image_size / (image_size + CROP_PADDING)) * + tf.cast(tf.minimum(image_height, image_width), tf.float32)), + tf.int32) + + offset_height = ((image_height - padded_center_crop_size) + 1) // 2 + offset_width = ((image_width - padded_center_crop_size) + 1) // 2 + crop_window = tf.stack([offset_height, offset_width, + padded_center_crop_size, padded_center_crop_size]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + image = tf.image.resize([image], [image_size, image_size], resize_method)[0] + + return image + + +def _flip(image): + """Random horizontal image flip.""" + image = tf.image.random_flip_left_right(image) + return image + + +def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_random_crop(image_bytes, image_size, resize_method) + image = _flip(image) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_center_crop(image_bytes, image_size, resize_method) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_image(image_bytes, + is_training=False, + use_bfloat16=False, + image_size=IMAGE_SIZE, + interpolation='bicubic'): + """Preprocesses the given image. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + is_training: `bool` for whether the preprocessing is for training. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor` with value range of [0, 255]. + """ + if is_training: + return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) + else: + return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) + + +class TfPreprocessTransform: + + def __init__(self, is_training=False, size=224, interpolation='bicubic'): + self.is_training = is_training + self.size = size[0] if isinstance(size, tuple) else size + self.interpolation = interpolation + self._image_bytes = None + self.process_image = self._build_tf_graph() + self.sess = None + + def _build_tf_graph(self): + with tf.device('/cpu:0'): + self._image_bytes = tf.placeholder( + shape=[], + dtype=tf.string, + ) + img = preprocess_image( + self._image_bytes, self.is_training, False, self.size, self.interpolation) + return img + + def __call__(self, image_bytes): + if self.sess is None: + self.sess = tf.Session() + img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) + img = img.round().clip(0, 255).astype(np.uint8) + if img.ndim < 3: + img = np.expand_dims(img, axis=-1) + img = np.rollaxis(img, 2) # HWC to CHW + return img diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/transforms.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/transforms.py new file mode 100644 index 0000000000..4220304f66 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/transforms.py @@ -0,0 +1,158 @@ +import torch +import torchvision.transforms.functional as F +from PIL import Image +import warnings +import math +import random +import numpy as np + + +class ToNumpy: + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return np_img + + +class ToTensor: + + def __init__(self, dtype=torch.float32): + self.dtype = dtype + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return torch.from_numpy(np_img).to(dtype=self.dtype) + + +_pil_interpolation_to_str = { + Image.NEAREST: 'PIL.Image.NEAREST', + Image.BILINEAR: 'PIL.Image.BILINEAR', + Image.BICUBIC: 'PIL.Image.BICUBIC', + Image.LANCZOS: 'PIL.Image.LANCZOS', + Image.HAMMING: 'PIL.Image.HAMMING', + Image.BOX: 'PIL.Image.BOX', +} + + +def _pil_interp(method): + if method == 'bicubic': + return Image.BICUBIC + elif method == 'lanczos': + return Image.LANCZOS + elif method == 'hamming': + return Image.HAMMING + else: + # default bilinear, do we want to allow nearest? + return Image.BILINEAR + + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), + interpolation='bilinear'): + if isinstance(size, (list, tuple)): + self.size = tuple(size) + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("range should be of kind (min, max)") + + if interpolation == 'random': + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = _pil_interp(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for attempt in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation]) + else: + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string + + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/data/transforms_factory.py b/PyTorch/contrib/cv/classification/convmixer/timm/data/transforms_factory.py new file mode 100644 index 0000000000..df6e0de033 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/data/transforms_factory.py @@ -0,0 +1,236 @@ +""" Transforms Factory +Factory methods for building image transforms for use with TIMM (PyTorch Image Models) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +import torch +from torchvision import transforms + +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT +from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform +from timm.data.transforms import _pil_interp, RandomResizedCropAndInterpolation, ToNumpy, ToTensor +from timm.data.random_erasing import RandomErasing + + +def transforms_noaug_train( + img_size=224, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, +): + if interpolation == 'random': + # random interpolation not supported with no-aug + interpolation = 'bilinear' + tfl = [ + transforms.Resize(img_size, _pil_interp(interpolation)), + transforms.CenterCrop(img_size) + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + return transforms.Compose(tfl) + + +def transforms_imagenet_train( + img_size=224, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + interpolation='random', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + separate=False, +): + """ + If separate==True, the transforms are returned as a tuple of 3 separate transforms + for use in a mixing dataset that passes + * all data through the first (primary) transform, called the 'clean' data + * a portion of the data through the secondary transform + * normalizes and converts the branches above with the third, final transform + """ + scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range + ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range + primary_tfl = [ + RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)] + if hflip > 0.: + primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] + if vflip > 0.: + primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] + + secondary_tfl = [] + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, (tuple, list)): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), + ) + if interpolation and interpolation != 'random': + aa_params['interpolation'] = _pil_interp(interpolation) + if auto_augment.startswith('rand'): + secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] + elif auto_augment.startswith('augmix'): + aa_params['translate_pct'] = 0.3 + secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] + else: + secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] + elif color_jitter is not None: + # color jitter is enabled when not using AA + if isinstance(color_jitter, (list, tuple)): + # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation + # or 4 if also augmenting hue + assert len(color_jitter) in (3, 4) + else: + # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue + color_jitter = (float(color_jitter),) * 3 + secondary_tfl += [transforms.ColorJitter(*color_jitter)] + + final_tfl = [] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + final_tfl += [ToNumpy()] + else: + final_tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + if re_prob > 0.: + final_tfl.append( + RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu')) + + if separate: + return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl) + else: + return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) + + +def transforms_imagenet_eval( + img_size=224, + crop_pct=None, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD): + crop_pct = crop_pct or DEFAULT_CROP_PCT + + if isinstance(img_size, (tuple, list)): + assert len(img_size) == 2 + if img_size[-1] == img_size[-2]: + # fall-back to older behaviour so Resize scales to shortest edge if target is square + scale_size = int(math.floor(img_size[0] / crop_pct)) + else: + scale_size = tuple([int(x / crop_pct) for x in img_size]) + else: + scale_size = int(math.floor(img_size / crop_pct)) + + tfl = [ + transforms.Resize(scale_size, _pil_interp(interpolation)), + transforms.CenterCrop(img_size), + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + + return transforms.Compose(tfl) + + +def create_transform( + input_size, + is_training=False, + use_prefetcher=False, + no_aug=False, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + crop_pct=None, + tf_preprocessing=False, + separate=False): + + if isinstance(input_size, (tuple, list)): + img_size = input_size[-2:] + else: + img_size = input_size + + if tf_preprocessing and use_prefetcher: + assert not separate, "Separate transforms not supported for TF preprocessing" + from timm.data.tf_preprocessing import TfPreprocessTransform + transform = TfPreprocessTransform( + is_training=is_training, size=img_size, interpolation=interpolation) + else: + if is_training and no_aug: + assert not separate, "Cannot perform split augmentation with no_aug" + transform = transforms_noaug_train( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std) + elif is_training: + transform = transforms_imagenet_train( + img_size, + scale=scale, + ratio=ratio, + hflip=hflip, + vflip=vflip, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=separate) + else: + assert not separate, "Separate transforms not supported for validation preprocessing" + transform = transforms_imagenet_eval( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + crop_pct=crop_pct) + + return transform diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/loss/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/loss/__init__.py new file mode 100644 index 0000000000..ea7f15f2f7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/loss/__init__.py @@ -0,0 +1,4 @@ +from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel +from .binary_cross_entropy import BinaryCrossEntropy +from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from .jsd import JsdCrossEntropy diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/loss/asymmetric_loss.py b/PyTorch/contrib/cv/classification/convmixer/timm/loss/asymmetric_loss.py new file mode 100644 index 0000000000..a8b10f9c79 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/loss/asymmetric_loss.py @@ -0,0 +1,97 @@ +import torch +import torch.nn as nn + + +class AsymmetricLossMultiLabel(nn.Module): + def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False): + super(AsymmetricLossMultiLabel, self).__init__() + + self.gamma_neg = gamma_neg + self.gamma_pos = gamma_pos + self.clip = clip + self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss + self.eps = eps + + def forward(self, x, y): + """" + Parameters + ---------- + x: input logits + y: targets (multi-label binarized vector) + """ + + # Calculating Probabilities + x_sigmoid = torch.sigmoid(x) + xs_pos = x_sigmoid + xs_neg = 1 - x_sigmoid + + # Asymmetric Clipping + if self.clip is not None and self.clip > 0: + xs_neg = (xs_neg + self.clip).clamp(max=1) + + # Basic CE calculation + los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) + los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) + loss = los_pos + los_neg + + # Asymmetric Focusing + if self.gamma_neg > 0 or self.gamma_pos > 0: + if self.disable_torch_grad_focal_loss: + torch._C.set_grad_enabled(False) + pt0 = xs_pos * y + pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p + pt = pt0 + pt1 + one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) + one_sided_w = torch.pow(1 - pt, one_sided_gamma) + if self.disable_torch_grad_focal_loss: + torch._C.set_grad_enabled(True) + loss *= one_sided_w + + return -loss.sum() + + +class AsymmetricLossSingleLabel(nn.Module): + def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'): + super(AsymmetricLossSingleLabel, self).__init__() + + self.eps = eps + self.logsoftmax = nn.LogSoftmax(dim=-1) + self.targets_classes = [] # prevent gpu repeated memory allocation + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.reduction = reduction + + def forward(self, inputs, target, reduction=None): + """" + Parameters + ---------- + x: input logits + y: targets (1-hot vector) + """ + + num_classes = inputs.size()[-1] + log_preds = self.logsoftmax(inputs) + self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) + + # ASL weights + targets = self.targets_classes + anti_targets = 1 - targets + xs_pos = torch.exp(log_preds) + xs_neg = 1 - xs_pos + xs_pos = xs_pos * targets + xs_neg = xs_neg * anti_targets + asymmetric_w = torch.pow(1 - xs_pos - xs_neg, + self.gamma_pos * targets + self.gamma_neg * anti_targets) + log_preds = log_preds * asymmetric_w + + if self.eps > 0: # label smoothing + self.targets_classes.mul_(1 - self.eps).add_(self.eps / num_classes) + + # loss calculation + loss = - self.targets_classes.mul(log_preds) + + loss = loss.sum(dim=-1) + if self.reduction == 'mean': + loss = loss.mean() + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/loss/binary_cross_entropy.py b/PyTorch/contrib/cv/classification/convmixer/timm/loss/binary_cross_entropy.py new file mode 100644 index 0000000000..ed76c1e8e0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/loss/binary_cross_entropy.py @@ -0,0 +1,47 @@ +""" Binary Cross Entropy w/ a few extras + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BinaryCrossEntropy(nn.Module): + """ BCE with optional one-hot from dense targets, label smoothing, thresholding + NOTE for experiments comparing CE to BCE /w label smoothing, may remove + """ + def __init__( + self, smoothing=0.1, target_threshold: Optional[float] = None, weight: Optional[torch.Tensor] = None, + reduction: str = 'mean', pos_weight: Optional[torch.Tensor] = None): + super(BinaryCrossEntropy, self).__init__() + assert 0. <= smoothing < 1.0 + self.smoothing = smoothing + self.target_threshold = target_threshold + self.reduction = reduction + self.register_buffer('weight', weight) + self.register_buffer('pos_weight', pos_weight) + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + assert x.shape[0] == target.shape[0] + if target.shape != x.shape: + # NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse + num_classes = x.shape[-1] + # FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ + off_value = self.smoothing / num_classes + on_value = 1. - self.smoothing + off_value + target = target.long().view(-1, 1) + target = torch.full( + (target.size()[0], num_classes), + off_value, + device=x.device, dtype=x.dtype).scatter_(1, target, on_value) + if self.target_threshold is not None: + # Make target 0, or 1 if threshold set + target = target.gt(self.target_threshold).to(dtype=target.dtype) + return F.binary_cross_entropy_with_logits( + x, target, + self.weight, + pos_weight=self.pos_weight, + reduction=self.reduction) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/loss/cross_entropy.py b/PyTorch/contrib/cv/classification/convmixer/timm/loss/cross_entropy.py new file mode 100644 index 0000000000..85198107f3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/loss/cross_entropy.py @@ -0,0 +1,36 @@ +""" Cross Entropy w/ smoothing or soft targets + +Hacked together by / Copyright 2021 Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LabelSmoothingCrossEntropy(nn.Module): + """ NLL loss with label smoothing. + """ + def __init__(self, smoothing=0.1): + super(LabelSmoothingCrossEntropy, self).__init__() + assert smoothing < 1.0 + self.smoothing = smoothing + self.confidence = 1. - smoothing + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + logprobs = F.log_softmax(x, dim=-1) + nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) + nll_loss = nll_loss.squeeze(1) + smooth_loss = -logprobs.mean(dim=-1) + loss = self.confidence * nll_loss + self.smoothing * smooth_loss + return loss.mean() + + +class SoftTargetCrossEntropy(nn.Module): + + def __init__(self): + super(SoftTargetCrossEntropy, self).__init__() + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) + return loss.mean() diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/loss/jsd.py b/PyTorch/contrib/cv/classification/convmixer/timm/loss/jsd.py new file mode 100644 index 0000000000..dd64e156c2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/loss/jsd.py @@ -0,0 +1,39 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .cross_entropy import LabelSmoothingCrossEntropy + + +class JsdCrossEntropy(nn.Module): + """ Jensen-Shannon Divergence + Cross-Entropy Loss + + Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + + Hacked together by / Copyright 2020 Ross Wightman + """ + def __init__(self, num_splits=3, alpha=12, smoothing=0.1): + super().__init__() + self.num_splits = num_splits + self.alpha = alpha + if smoothing is not None and smoothing > 0: + self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) + else: + self.cross_entropy_loss = torch.nn.CrossEntropyLoss() + + def __call__(self, output, target): + split_size = output.shape[0] // self.num_splits + assert split_size * self.num_splits == output.shape[0] + logits_split = torch.split(output, split_size) + + # Cross-entropy is only computed on clean images + loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) + probs = [F.softmax(logits, dim=1) for logits in logits_split] + + # Clamp mixture distribution to avoid exploding KL divergence + logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() + loss += self.alpha * sum([F.kl_div( + logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/__init__.py new file mode 100644 index 0000000000..0982b6e132 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/__init__.py @@ -0,0 +1,58 @@ +from .beit import * +from .byoanet import * +from .byobnet import * +from .cait import * +from .coat import * +from .convit import * +from .convmixer import * +from .crossvit import * +from .cspnet import * +from .densenet import * +from .dla import * +from .dpn import * +from .efficientnet import * +from .ghostnet import * +from .gluon_resnet import * +from .gluon_xception import * +from .hardcorenas import * +from .hrnet import * +from .inception_resnet_v2 import * +from .inception_v3 import * +from .inception_v4 import * +from .levit import * +from .mlp_mixer import * +from .mobilenetv3 import * +from .nasnet import * +from .nest import * +from .nfnet import * +from .pit import * +from .pnasnet import * +from .regnet import * +from .res2net import * +from .resnest import * +from .resnet import * +from .resnetv2 import * +from .rexnet import * +from .selecsls import * +from .senet import * +from .sknet import * +from .swin_transformer import * +from .tnt import * +from .tresnet import * +from .twins import * +from .vgg import * +from .visformer import * +from .vision_transformer import * +from .vision_transformer_hybrid import * +from .vovnet import * +from .xception import * +from .xception_aligned import * +from .xcit import * + +from .factory import create_model, split_model_name, safe_model_name +from .helpers import load_checkpoint, resume_checkpoint, model_parameters +from .layers import TestTimePoolHead, apply_test_time_pool +from .layers import convert_splitbn_model +from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable, is_no_jit, set_no_jit +from .registry import register_model, model_entrypoint, list_models, is_model, list_modules, is_model_in_modules,\ + has_model_default_key, is_model_default_key, get_model_default_value, is_model_pretrained diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/beit.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/beit.py new file mode 100644 index 0000000000..e8d1dd2c7e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/beit.py @@ -0,0 +1,420 @@ +""" BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) + +Model from official source: https://github.com/microsoft/unilm/tree/master/beit + +At this point only the 1k fine-tuned classification weights and model configs have been added, +see original source above for pre-training models and procedure. + +Modifications by / Copyright 2021 Ross Wightman, original copyrights below +""" +# -------------------------------------------------------- +# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) +# Github source: https://github.com/microsoft/unilm/tree/master/beit +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# By Hangbo Bao +# Based on timm and DeiT code bases +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit/ +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +from functools import partial +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_ +from .registry import register_model +from .vision_transformer import checkpoint_filter_fn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'beit_base_patch16_224': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth'), + 'beit_base_patch16_384': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'beit_base_patch16_224_in22k': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_base_patch16_224_pt22k_ft22k.pth', + num_classes=21841, + ), + 'beit_large_patch16_224': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth'), + 'beit_large_patch16_384': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0, + ), + 'beit_large_patch16_512': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth', + input_size=(3, 512, 512), crop_pct=1.0, + ), + 'beit_large_patch16_224_in22k': _cfg( + url='https://unilm.blob.core.windows.net/beit/beit_large_patch16_224_pt22k_ft22k.pth', + num_classes=21841, + ), +} + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, attn_drop=0., + proj_drop=0., window_size=None, attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, rel_pos_bias: Optional[torch.Tensor] = None): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + if torch.jit.is_scripting(): + # FIXME requires_grad breaks w/ torchscript + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias), self.v_bias)) + else: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if rel_pos_bias is not None: + attn = attn + rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + window_size=None, attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + window_size=window_size, attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, rel_pos_bias: Optional[torch.Tensor] = None): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + + # trunc_normal_(self.relative_position_bias_table, std=.02) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class Beit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, + use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, + use_mean_pooling=True, init_scale=0.001): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_abs_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.grid_size, num_heads=num_heads) + else: + self.rel_pos_bias = None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.use_rel_pos_bias = use_rel_pos_bias + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values, window_size=self.patch_embed.grid_size if use_rel_pos_bias else None) + for i in range(depth)]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.mask_token, std=.02) + self.fix_init_weight() + if isinstance(self.head, nn.Linear): + trunc_normal_(self.head.weight, std=.02) + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + x = blk(x, rel_pos_bias=rel_pos_bias) + + x = self.norm(x) + if self.fc_norm is not None: + t = x[:, 1:, :] + return self.fc_norm(t.mean(1)) + else: + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_beit(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Beit models.') + + model = build_model_with_cfg( + Beit, variant, pretrained, + default_cfg=default_cfg, + # FIXME an updated filter fn needed to interpolate rel pos emb if fine tuning to diff model sizes + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def beit_base_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_base_patch16_384(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_base_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_384(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_512(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/byoanet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/byoanet.py new file mode 100644 index 0000000000..61f94490be --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/byoanet.py @@ -0,0 +1,329 @@ +""" Bring-Your-Own-Attention Network + +A flexible network w/ dataclass based config for stacking NN blocks including +self-attention (or similar) layers. + +Currently used to implement experimental variants of: + * Bottleneck Transformers + * Lambda ResNets + * HaloNets + +Consider all of the models definitions here as experimental WIP and likely to change. + +Hacked together by / copyright Ross Wightman, 2021. +""" +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks +from .helpers import build_model_with_cfg +from .registry import register_model + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + 'fixed_input_size': False, 'min_input_size': (3, 224, 224), + **kwargs + } + + +default_cfgs = { + # GPU-Efficient (ResNet) weights + 'botnet26t_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet26t_c1_256-167a0e9f.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'botnet50ts_256': _cfg( + url='', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'eca_botnext26ts_256': _cfg( + url='', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + + 'halonet_h1': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'halonet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_256-9b4bf0b3.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'sehalonet33ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'halonet50ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet50ts_256_ra3-f07eab9f.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'eca_halonext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_256-1e55880b.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + + 'lambda_resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_a2h_256-25ded63d.pth', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), + 'lambda_resnet50ts': _cfg( + url='', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), + 'lambda_resnet26rpt_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26rpt_a2h_256-482adad8.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), +} + + +model_cfgs = dict( + + botnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + botnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + eca_botnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + act_layer='silu', + attn_layer='eca', + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + + halonet_h1=ByoModelCfg( + blocks=( + ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0), + ), + stem_chs=64, + stem_type='7x7', + stem_pool='maxpool', + + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3), + ), + halonet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) + ), + sehalonet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + num_features=1280, + attn_layer='se', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + halonet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + eca_halonext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) + ), + + lambda_resnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), + lambda_resnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), + lambda_resnet26rpt_256=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=None) + ), +) + + +def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def botnet26t_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone. + NOTE: this isn't performing well, may remove + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def botnet50ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet50-T backbone, silu act. + NOTE: this isn't performing well, may remove + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_botnext26ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone, silu act. + NOTE: this isn't performing well, may remove + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet_h1(pretrained=False, **kwargs): + """ HaloNet-H1. Halo attention in all stages as per the paper. + NOTE: This runs very slowly! + """ + return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs) + + +@register_model +def halonet26t(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone. Halo attention in final two stages + """ + return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs) + + +@register_model +def sehalonet33ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, 1-2 Halo in stage 2,3,4. + """ + return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet50ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet50-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_halonext26ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26t(pretrained=False, **kwargs): + """ Lambda-ResNet-26-T. Lambda layers w/ conv pos in last two stages. + """ + return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet50ts(pretrained=False, **kwargs): + """ Lambda-ResNet-50-TS. SiLU act. Lambda layers w/ conv pos in last two stages. + """ + return _create_byoanet('lambda_resnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26rpt_256(pretrained=False, **kwargs): + """ Lambda-ResNet-26-R-T. Lambda layers w/ rel pos embed in last two stages. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('lambda_resnet26rpt_256', pretrained=pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/byobnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/byobnet.py new file mode 100644 index 0000000000..515f2073e6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/byobnet.py @@ -0,0 +1,1422 @@ +""" Bring-Your-Own-Blocks Network + +A flexible network w/ dataclass based config for stacking those NN blocks. + +This model is currently used to implement the following networks: + +GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)). +Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 +Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0 + +RepVGG - repvgg_* +Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 +Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT + +In all cases the models have been modified to fit within the design of ByobNet. I've remapped +the original weights and verified accuracies. + +For GPU Efficient nets, I used the original names for the blocks since they were for the most part +the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some +changes introduced in RegNet were also present in the stem and bottleneck blocks for this model. + +A significant number of different network archs can be implemented here, including variants of the +above nets that include attention. + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field, replace +from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply +from .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \ + create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple +from .registry import register_model + +__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +def _cfgr(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # GPU-Efficient (ResNet) weights + 'gernet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'), + 'gernet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'), + 'gernet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + + # RepVGG weights + 'repvgg_a2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b1g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b2g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b3g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + + # experimental configs + 'resnet51q': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', + first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), + test_input_size=(3, 288, 288), crop_pct=1.0), + 'resnet61q': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', + test_input_size=(3, 288, 288), crop_pct=1.0), + + 'resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth'), + 'gcresnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth'), + 'seresnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth'), + 'eca_resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth'), + 'bat_resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', + min_input_size=(3, 256, 256)), + + 'resnet32ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth'), + 'resnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth'), + 'gcresnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth'), + 'seresnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth'), + 'eca_resnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth'), + + 'gcresnet50t': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth'), + + 'gcresnext50ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth'), + + # experimental models, likely to change ot be removed + 'regnetz_b': _cfgr( + url='', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), pool_size=(7, 7), first_conv='stem.conv'), + 'regnetz_c': _cfgr( + url='', + imean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), first_conv='stem.conv'), + 'regnetz_d': _cfgr( + url='', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), +} + + +@dataclass +class ByoBlockCfg: + type: Union[str, nn.Module] + d: int # block depth (number of block repeats in stage) + c: int # number of output channels for each block in stage + s: int = 2 # stride of stage (first block) + gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 + br: float = 1. # bottleneck-ratio of blocks in stage + + # NOTE: these config items override the model cfgs that are applied to all blocks by default + attn_layer: Optional[str] = None + attn_kwargs: Optional[Dict[str, Any]] = None + self_attn_layer: Optional[str] = None + self_attn_kwargs: Optional[Dict[str, Any]] = None + block_kwargs: Optional[Dict[str, Any]] = None + + +@dataclass +class ByoModelCfg: + blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] + downsample: str = 'conv1x1' + stem_type: str = '3x3' + stem_pool: Optional[str] = 'maxpool' + stem_chs: int = 32 + width_factor: float = 1.0 + num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0 + zero_init_last: bool = True # zero init last weight (usually bn) in residual path + fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation + + act_layer: str = 'relu' + norm_layer: str = 'batchnorm' + + # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there + attn_layer: Optional[str] = None + attn_kwargs: dict = field(default_factory=lambda: dict()) + self_attn_layer: Optional[str] = None + self_attn_kwargs: dict = field(default_factory=lambda: dict()) + block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict()) + + +def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0): + c = (64, 128, 256, 512) + group_size = 0 + if groups > 0: + group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 + bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)]) + return bcfg + + +def interleave_blocks( + types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs +) -> Tuple[ByoBlockCfg]: + """ interleave 2 block types in stack + """ + assert len(types) == 2 + if isinstance(every, int): + every = list(range(0 if first else every, d, every + 1)) + if not every: + every = [d - 1] + set(every) + blocks = [] + for i in range(d): + block_type = types[1] if i in every else types[0] + blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] + return tuple(blocks) + + +model_cfgs = dict( + gernet_l=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_m=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_s=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.), + ), + stem_chs=13, + stem_pool=None, + num_features=1920, + ), + + repvgg_a2=ByoModelCfg( + blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b0=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + + # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks + # DW convs in last block, 2048 pre-FC, silu act + resnet51q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad2', + stem_pool=None, + num_features=2048, + act_layer='silu', + ), + + # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks + # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act + resnet61q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad', + stem_pool=None, + num_features=2048, + act_layer='silu', + block_kwargs=dict(extra_conv=True), + ), + + # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act, + # and a tiered stem w/ maxpool + resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + ), + gcresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='gca', + ), + seresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='se', + ), + eca_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + ), + bat_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='bat', + attn_kwargs=dict(block_size=8) + ), + + # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool + resnet32ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=0, + act_layer='silu', + ), + + # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool + resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + ), + + # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat + # and a tiered stem w/ no maxpool + gcresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='gca', + ), + seresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='se', + ), + eca_resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='eca', + ), + + gcresnet50t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + attn_layer='gca', + ), + + gcresnext50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + # stem_pool=None, + act_layer='silu', + attn_layer='gca', + ), + + # experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW + regnetz_b=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_c=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), +) + + +@register_model +def gernet_l(pretrained=False, **kwargs): + """ GEResNet-Large (GENet-Large from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_m(pretrained=False, **kwargs): + """ GEResNet-Medium (GENet-Normal from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_s(pretrained=False, **kwargs): + """ EResNet-Small (GENet-Small from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_a2(pretrained=False, **kwargs): + """ RepVGG-A2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b0(pretrained=False, **kwargs): + """ RepVGG-B0 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1(pretrained=False, **kwargs): + """ RepVGG-B1 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1g4(pretrained=False, **kwargs): + """ RepVGG-B1g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2(pretrained=False, **kwargs): + """ RepVGG-B2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2g4(pretrained=False, **kwargs): + """ RepVGG-B2g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3(pretrained=False, **kwargs): + """ RepVGG-B3 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3g4(pretrained=False, **kwargs): + """ RepVGG-B3g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) + + +@register_model +def resnet51q(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) + + +@register_model +def resnet61q(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) + + +@register_model +def resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def bat_resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet32ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet50t(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext50ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_b(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_b', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_c(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_c', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_d', pretrained=pretrained, **kwargs) + + +def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: + if not isinstance(stage_blocks_cfg, Sequence): + stage_blocks_cfg = (stage_blocks_cfg,) + block_cfgs = [] + for i, cfg in enumerate(stage_blocks_cfg): + block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] + return block_cfgs + + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +@dataclass +class LayerFn: + conv_norm_act: Callable = ConvBnAct + norm_act: Callable = BatchNormAct2d + act: Callable = nn.ReLU + attn: Optional[Callable] = None + self_attn: Optional[Callable] = None + + +class DownsampleAvg(nn.Module): + def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None): + """ AvgPool Downsampling as in 'D' ResNet variants.""" + super(DownsampleAvg, self).__init__() + layers = layers or LayerFn() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) + + def forward(self, x): + return self.conv(self.pool(x)) + + +def create_shortcut(downsample_type, layers: LayerFn, in_chs, out_chs, stride, dilation, **kwargs): + assert downsample_type in ('avg', 'conv1x1', '') + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + if not downsample_type: + return None # no shortcut + elif downsample_type == 'avg': + return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs) + else: + return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs) + else: + return nn.Identity() # identity shortcut + + +class BasicBlock(nn.Module): + """ ResNet Basic Block - kxk + kxk + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0, + downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None, + drop_path_rate=0.): + super(BasicBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_kxk(x) + x = self.conv2_kxk(x) + x = self.attn(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class BottleneckBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - kxk - 1x1 + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None, + downsample='avg', attn_last=False, linear_out=False, extra_conv=False, bottle_in=False, + layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(BottleneckBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + if extra_conv: + self.conv2b_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block) + else: + self.conv2b_kxk = nn.Identity() + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv3_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.conv2b_kxk(x) + x = self.attn(x) + x = self.conv3_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class DarkBlock(nn.Module): + """ DarkNet-like (1x1 + 3x3 w/ stride) block + + The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models. + This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet + uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats). + + If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1) + for more optimal compute. + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None, + drop_path_rate=0.): + super(DarkBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.attn(x) + x = self.conv2_kxk(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class EdgeBlock(nn.Module): + """ EdgeResidual-like (3x3 + 1x1) block + + A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed. + Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is + intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs. + + FIXME is there a more common 3x3 + 1x1 conv block to name this after? + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None, + drop_block=None, drop_path_rate=0.): + super(EdgeBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_kxk = layers.conv_norm_act( + in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_kxk(x) + x = self.attn(x) + x = self.conv2_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class RepVggBlock(nn.Module): + """ RepVGG Block. + + Adapted from impl at https://github.com/DingXiaoH/RepVGG + + This version does not currently support the deploy optimization. It is currently fixed in 'train' mode. + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(RepVggBlock, self).__init__() + layers = layers or LayerFn() + groups = num_groups(group_size, in_chs) + + use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] + self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None + self.conv_kxk = layers.conv_norm_act( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block, apply_act=False) + self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) + self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() + self.act = layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + # NOTE this init overrides that base model init with specific changes for the block type + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + nn.init.normal_(m.weight, .1, .1) + nn.init.normal_(m.bias, 0, .1) + if hasattr(self.attn, 'reset_parameters'): + self.attn.reset_parameters() + + def forward(self, x): + if self.identity is None: + x = self.conv_1x1(x) + self.conv_kxk(x) + else: + identity = self.identity(x) + x = self.conv_1x1(x) + self.conv_kxk(x) + x = self.drop_path(x) # not in the paper / official impl, experimental + x = x + identity + x = self.attn(x) # no attn in the paper / official impl, experimental + return self.act(x) + + +class SelfAttnBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1 + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None, + downsample='avg', extra_conv=False, linear_out=False, bottle_in=False, post_attn_na=True, + feat_size=None, layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(SelfAttnBlock, self).__init__() + assert layers is not None + mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + if extra_conv: + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + stride = 1 # striding done via conv if enabled + else: + self.conv2_kxk = nn.Identity() + opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) + # FIXME need to dilate self attn to have dilated network support, moop moop + self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) + self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv3_1x1.bn.weight) + if hasattr(self.self_attn, 'reset_parameters'): + self.self_attn.reset_parameters() + + def forward(self, x): + shortcut = self.shortcut(x) + + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.self_attn(x) + x = self.post_attn(x) + x = self.conv3_1x1(x) + x = self.drop_path(x) + + x = self.act(x + shortcut) + return x + + +_block_registry = dict( + basic=BasicBlock, + bottle=BottleneckBlock, + dark=DarkBlock, + edge=EdgeBlock, + rep=RepVggBlock, + self_attn=SelfAttnBlock, +) + + +def register_block(block_type:str, block_fn: nn.Module): + _block_registry[block_type] = block_fn + + +def create_block(block: Union[str, nn.Module], **kwargs): + if isinstance(block, (nn.Module, partial)): + return block(**kwargs) + assert block in _block_registry, f'Unknown block type ({block}' + return _block_registry[block](**kwargs) + + +class Stem(nn.Sequential): + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool', + num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None): + super().__init__() + assert stride in (2, 4) + layers = layers or LayerFn() + + if isinstance(out_chs, (list, tuple)): + num_rep = len(out_chs) + stem_chs = out_chs + else: + stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] + + self.stride = stride + self.feature_info = [] # track intermediate features + prev_feat = '' + stem_strides = [2] + [1] * (num_rep - 1) + if stride == 4 and not pool: + # set last conv in stack to be strided if stride == 4 and no pooling layer + stem_strides[-1] = 2 + + num_act = num_rep if num_act is None else num_act + # if num_act < num_rep, first convs in stack won't have bn + act + stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act + prev_chs = in_chs + curr_stride = 1 + for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): + layer_fn = layers.conv_norm_act if na else create_conv2d + conv_name = f'conv{i + 1}' + if i > 0 and s > 1: + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) + prev_chs = ch + curr_stride *= s + prev_feat = conv_name + + if pool and 'max' in pool.lower(): + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + self.add_module('pool', nn.MaxPool2d(3, 2, 1)) + curr_stride *= 2 + prev_feat = 'pool' + + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + assert curr_stride == stride + + +def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None): + layers = layers or LayerFn() + assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3') + if 'quad' in stem_type: + # based on NFNet stem, stack of 4 3x3 convs + num_act = 2 if 'quad2' in stem_type else None + stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) + elif 'tiered' in stem_type: + # 3x3 stack of 3 convs as in my ResNet-T + stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) + elif 'deep' in stem_type: + # 3x3 stack of 3 convs as in ResNet-D + stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) + elif 'rep' in stem_type: + stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) + elif '7x7' in stem_type: + # 7x7 stem conv as in ResNet + if pool_type: + stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) + else: + # 3x3 stem conv as in RegNet is the default + if pool_type: + stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) + + if isinstance(stem, Stem): + feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] + else: + feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)] + return stem, feature_info + + +def reduce_feat_size(feat_size, stride=2): + return None if feat_size is None else tuple([s // stride for s in feat_size]) + + +def override_kwargs(block_kwargs, model_kwargs): + """ Override model level attn/self-attn/block kwargs w/ block level + + NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs + for the block if set to anything that isn't None. + + i.e. an empty block_kwargs dict will remove kwargs set at model level for that block + """ + out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs + return out_kwargs or {} # make sure None isn't returned + + +def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ): + layer_fns = block_kwargs['layers'] + + # override attn layer / args with block local config + attn_set = block_cfg.attn_layer is not None + if attn_set or block_cfg.attn_kwargs is not None: + # override attn layer config + if attn_set and not block_cfg.attn_layer: + # empty string for attn_layer type will disable attn for this block + attn_layer = None + else: + attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) + attn_layer = block_cfg.attn_layer or model_cfg.attn_layer + attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None + layer_fns = replace(layer_fns, attn=attn_layer) + + # override self-attn layer / args with block local cfg + self_attn_set = block_cfg.self_attn_layer is not None + if self_attn_set or block_cfg.self_attn_kwargs is not None: + # override attn layer config + if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == '' + # empty string for self_attn_layer type will disable attn for this block + self_attn_layer = None + else: + self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) + self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer + self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \ + if self_attn_layer is not None else None + layer_fns = replace(layer_fns, self_attn=self_attn_layer) + + block_kwargs['layers'] = layer_fns + + # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set + block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) + + +def create_byob_stages( + cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], + feat_size: Optional[int] = None, + layers: Optional[LayerFn] = None, + block_kwargs_fn: Optional[Callable] = update_block_kwargs): + + layers = layers or LayerFn() + feature_info = [] + block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] + depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + dilation = 1 + net_stride = stem_feat['reduction'] + prev_chs = stem_feat['num_chs'] + prev_feat = stem_feat + stages = [] + for stage_idx, stage_block_cfgs in enumerate(block_cfgs): + stride = stage_block_cfgs[0].s + if stride != 1 and prev_feat: + feature_info.append(prev_feat) + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx, block_cfg in enumerate(stage_block_cfgs): + out_chs = make_divisible(block_cfg.c * cfg.width_factor) + group_size = block_cfg.gs + if isinstance(group_size, Callable): + group_size = group_size(out_chs, block_idx) + block_kwargs = dict( # Blocks used in this model must accept these arguments + in_chs=prev_chs, + out_chs=out_chs, + stride=stride if block_idx == 0 else 1, + dilation=(first_dilation, dilation), + group_size=group_size, + bottle_ratio=block_cfg.br, + downsample=cfg.downsample, + drop_path_rate=dpr[stage_idx][block_idx], + layers=layers, + ) + if block_cfg.type in ('self_attn',): + # add feat_size arg for blocks that support/need it + block_kwargs['feat_size'] = feat_size + block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) + blocks += [create_block(block_cfg.type, **block_kwargs)] + first_dilation = dilation + prev_chs = out_chs + if stride > 1 and block_idx == 0: + feat_size = reduce_feat_size(feat_size, stride) + + stages += [nn.Sequential(*blocks)] + prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') + + feature_info.append(prev_feat) + return nn.Sequential(*stages), feature_info + + +def get_layer_fns(cfg: ByoModelCfg): + act = get_act_layer(cfg.act_layer) + norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act) + conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act) + attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None + layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) + return layer_fn + + +class ByobNet(nn.Module): + """ 'Bring-your-own-blocks' Net + + A flexible network backbone that allows building model stem + blocks via + dataclass cfg definition w/ factory functions for module instantiation. + + Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act). + """ + def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + layers = get_layer_fns(cfg) + if cfg.fixed_input_size: + assert img_size is not None, 'img_size argument is required for fixed input size model' + feat_size = to_2tuple(img_size) if img_size is not None else None + + self.feature_info = [] + stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) + self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers) + self.feature_info.extend(stem_feat[:-1]) + feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) + + self.stages, stage_feat = create_byob_stages( + cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size) + self.feature_info.extend(stage_feat[:-1]) + + prev_chs = stage_feat[-1]['num_chs'] + if cfg.num_features: + self.num_features = int(round(cfg.width_factor * cfg.num_features)) + self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1) + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.feature_info += [ + dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')] + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + # init weights + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.final_conv(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module, name='', zero_init_last=False): + if isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights(zero_init_last=zero_init_last) + + +def _create_byobnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/cait.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/cait.py new file mode 100644 index 0000000000..69b4ba06c8 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/cait.py @@ -0,0 +1,394 @@ +""" Class-Attention in Image Transformers (CaiT) + +Paper: 'Going deeper with Image Transformers' - https://arxiv.org/abs/2103.17239 + +Original code and weights from https://github.com/facebookresearch/deit, copyright below + +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +from copy import deepcopy + +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_ +from .registry import register_model + + +__all__ = ['Cait', 'ClassAttn', 'LayerScaleBlockClassAttn', 'LayerScaleBlock', 'TalkingHeadAttn'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 384, 384), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + cait_xxs24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS24_224.pth', + input_size=(3, 224, 224), + ), + cait_xxs24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS24_384.pth', + ), + cait_xxs36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS36_224.pth', + input_size=(3, 224, 224), + ), + cait_xxs36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS36_384.pth', + ), + cait_xs24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XS24_384.pth', + ), + cait_s24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S24_224.pth', + input_size=(3, 224, 224), + ), + cait_s24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S24_384.pth', + ), + cait_s36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S36_384.pth', + ), + cait_m36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/M36_384.pth', + ), + cait_m48_448=_cfg( + url='https://dl.fbaipublicfiles.com/deit/M48_448.pth', + input_size=(3, 448, 448), + ), +) + + +class ClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to do CA + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + q = q * self.scale + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C) + x_cls = self.proj(x_cls) + x_cls = self.proj_drop(x_cls) + + return x_cls + + +class LayerScaleBlockClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add CA and LayerScale + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=ClassAttn, + mlp_block=Mlp, init_values=1e-4): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + + def forward(self, x, x_cls): + u = torch.cat((x_cls, x), dim=1) + x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u))) + x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls))) + return x_cls + + +class TalkingHeadAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf) + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + + self.num_heads = num_heads + + head_dim = dim // num_heads + + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + + self.proj = nn.Linear(dim, dim) + + self.proj_l = nn.Linear(num_heads, num_heads) + self.proj_w = nn.Linear(num_heads, num_heads) + + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) + + attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + attn = attn.softmax(dim=-1) + + attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScaleBlock(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add layerScale + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, + mlp_block=Mlp, init_values=1e-4): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + + def forward(self, x): + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class Cait(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to adapt to our cait models + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), + global_pool=None, + block_layers=LayerScaleBlock, + block_layers_token=LayerScaleBlockClassAttn, + patch_layer=PatchEmbed, + act_layer=nn.GELU, + attn_block=TalkingHeadAttn, + mlp_block=Mlp, + init_scale=1e-4, + attn_block_token_only=ClassAttn, + mlp_block_token_only=Mlp, + depth_token_only=2, + mlp_ratio_clstk=4.0 + ): + super().__init__() + + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = patch_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [drop_path_rate for i in range(depth)] + self.blocks = nn.ModuleList([ + block_layers( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + act_layer=act_layer, attn_block=attn_block, mlp_block=mlp_block, init_values=init_scale) + for i in range(depth)]) + + self.blocks_token_only = nn.ModuleList([ + block_layers_token( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio_clstk, qkv_bias=qkv_bias, + drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=norm_layer, + act_layer=act_layer, attn_block=attn_block_token_only, + mlp_block=mlp_block_token_only, init_values=init_scale) + for i in range(depth_token_only)]) + + self.norm = norm_layer(embed_dim) + + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) + + x = x + self.pos_embed + x = self.pos_drop(x) + + for i, blk in enumerate(self.blocks): + x = blk(x) + + for i, blk in enumerate(self.blocks_token_only): + cls_tokens = blk(x, cls_tokens) + + x = torch.cat((cls_tokens, x), dim=1) + + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model=None): + if 'model' in state_dict: + state_dict = state_dict['model'] + checkpoint_no_module = {} + for k, v in state_dict.items(): + checkpoint_no_module[k.replace('module.', '')] = v + return checkpoint_no_module + + +def _create_cait(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + Cait, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def cait_xxs24_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs36_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xs24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=288, depth=24, num_heads=6, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xs24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s24_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_scale=1e-5, **kwargs) + model = _create_cait('cait_s24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_scale=1e-5, **kwargs) + model = _create_cait('cait_s24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=8, init_scale=1e-6, **kwargs) + model = _create_cait('cait_s36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_m36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=768, depth=36, num_heads=16, init_scale=1e-6, **kwargs) + model = _create_cait('cait_m36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_m48_448(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=768, depth=48, num_heads=16, init_scale=1e-6, **kwargs) + model = _create_cait('cait_m48_448', pretrained=pretrained, **model_args) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/coat.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/coat.py new file mode 100644 index 0000000000..f071715a34 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/coat.py @@ -0,0 +1,660 @@ +""" +CoaT architecture. + +Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399 + +Official CoaT code at: https://github.com/mlpc-ucsd/CoaT + +Modified from timm/models/vision_transformer.py +""" +from copy import deepcopy +from functools import partial +from typing import Tuple, List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_ +from .registry import register_model + + +__all__ = [ + "coat_tiny", + "coat_mini", + "coat_lite_tiny", + "coat_lite_mini", + "coat_lite_small" +] + + +def _cfg_coat(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed1.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'coat_tiny': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_tiny-473c2a20.pth' + ), + 'coat_mini': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_mini-2c6baf49.pth' + ), + 'coat_lite_tiny': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_tiny-461b07a7.pth' + ), + 'coat_lite_mini': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_mini-d7842000.pth' + ), + 'coat_lite_small': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_small-fea1d5a1.pth' + ), +} + + +class ConvRelPosEnc(nn.Module): + """ Convolutional relative position encoding. """ + def __init__(self, Ch, h, window): + """ + Initialization. + Ch: Channels per head. + h: Number of heads. + window: Window size(s) in convolutional relative positional encoding. It can have two forms: + 1. An integer of window size, which assigns all attention heads with the same window s + size in ConvRelPosEnc. + 2. A dict mapping window size to #attention head splits ( + e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2}) + It will apply different window size to the attention head splits. + """ + super().__init__() + + if isinstance(window, int): + # Set the same window size for all attention heads. + window = {window: h} + self.window = window + elif isinstance(window, dict): + self.window = window + else: + raise ValueError() + + self.conv_list = nn.ModuleList() + self.head_splits = [] + for cur_window, cur_head_split in window.items(): + dilation = 1 + # Determine padding size. + # Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338 + padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2 + cur_conv = nn.Conv2d(cur_head_split*Ch, cur_head_split*Ch, + kernel_size=(cur_window, cur_window), + padding=(padding_size, padding_size), + dilation=(dilation, dilation), + groups=cur_head_split*Ch, + ) + self.conv_list.append(cur_conv) + self.head_splits.append(cur_head_split) + self.channel_splits = [x*Ch for x in self.head_splits] + + def forward(self, q, v, size: Tuple[int, int]): + B, h, N, Ch = q.shape + H, W = size + assert N == 1 + H * W + + # Convolutional relative position encoding. + q_img = q[:, :, 1:, :] # [B, h, H*W, Ch] + v_img = v[:, :, 1:, :] # [B, h, H*W, Ch] + + v_img = v_img.transpose(-1, -2).reshape(B, h * Ch, H, W) + v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels + conv_v_img_list = [] + for i, conv in enumerate(self.conv_list): + conv_v_img_list.append(conv(v_img_list[i])) + conv_v_img = torch.cat(conv_v_img_list, dim=1) + conv_v_img = conv_v_img.reshape(B, h, Ch, H * W).transpose(-1, -2) + + EV_hat = q_img * conv_v_img + EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch]. + return EV_hat + + +class FactorAtt_ConvRelPosEnc(nn.Module): + """ Factorized attention with convolutional relative position encoding class. """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., shared_crpe=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used. + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + # Shared convolutional relative position encoding. + self.crpe = shared_crpe + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + + # Generate Q, K, V. + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # [B, h, N, Ch] + + # Factorized attention. + k_softmax = k.softmax(dim=2) + factor_att = k_softmax.transpose(-1, -2) @ v + factor_att = q @ factor_att + + # Convolutional relative position encoding. + crpe = self.crpe(q, v, size=size) # [B, h, N, Ch] + + # Merge and reshape. + x = self.scale * factor_att + crpe + x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C] + + # Output projection. + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class ConvPosEnc(nn.Module): + """ Convolutional Position Encoding. + Note: This module is similar to the conditional position encoding in CPVT. + """ + def __init__(self, dim, k=3): + super(ConvPosEnc, self).__init__() + self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim) + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + H, W = size + assert N == 1 + H * W + + # Extract CLS token and image tokens. + cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C] + + # Depthwise convolution. + feat = img_tokens.transpose(1, 2).view(B, C, H, W) + x = self.proj(feat) + feat + x = x.flatten(2).transpose(1, 2) + + # Combine with CLS token. + x = torch.cat((cls_token, x), dim=1) + + return x + + +class SerialBlock(nn.Module): + """ Serial block class. + Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_cpe=None, shared_crpe=None): + super().__init__() + + # Conv-Attention. + self.cpe = shared_cpe + + self.norm1 = norm_layer(dim) + self.factoratt_crpe = FactorAtt_ConvRelPosEnc( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, shared_crpe=shared_crpe) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Tuple[int, int]): + # Conv-Attention. + x = self.cpe(x, size) + cur = self.norm1(x) + cur = self.factoratt_crpe(cur, size) + x = x + self.drop_path(cur) + + # MLP. + cur = self.norm2(x) + cur = self.mlp(cur) + x = x + self.drop_path(cur) + + return x + + +class ParallelBlock(nn.Module): + """ Parallel block class. """ + def __init__(self, dims, num_heads, mlp_ratios=[], qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_crpes=None): + super().__init__() + + # Conv-Attention. + self.norm12 = norm_layer(dims[1]) + self.norm13 = norm_layer(dims[2]) + self.norm14 = norm_layer(dims[3]) + self.factoratt_crpe2 = FactorAtt_ConvRelPosEnc( + dims[1], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[1] + ) + self.factoratt_crpe3 = FactorAtt_ConvRelPosEnc( + dims[2], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[2] + ) + self.factoratt_crpe4 = FactorAtt_ConvRelPosEnc( + dims[3], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[3] + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm22 = norm_layer(dims[1]) + self.norm23 = norm_layer(dims[2]) + self.norm24 = norm_layer(dims[3]) + # In parallel block, we assume dimensions are the same and share the linear transformation. + assert dims[1] == dims[2] == dims[3] + assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3] + mlp_hidden_dim = int(dims[1] * mlp_ratios[1]) + self.mlp2 = self.mlp3 = self.mlp4 = Mlp( + in_features=dims[1], hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def upsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map up-sampling. """ + return self.interpolate(x, scale_factor=factor, size=size) + + def downsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map down-sampling. """ + return self.interpolate(x, scale_factor=1.0/factor, size=size) + + def interpolate(self, x, scale_factor: float, size: Tuple[int, int]): + """ Feature map interpolation. """ + B, N, C = x.shape + H, W = size + assert N == 1 + H * W + + cls_token = x[:, :1, :] + img_tokens = x[:, 1:, :] + + img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W) + img_tokens = F.interpolate( + img_tokens, scale_factor=scale_factor, recompute_scale_factor=False, mode='bilinear', align_corners=False) + img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2) + + out = torch.cat((cls_token, img_tokens), dim=1) + + return out + + def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]): + _, S2, S3, S4 = sizes + cur2 = self.norm12(x2) + cur3 = self.norm13(x3) + cur4 = self.norm14(x4) + cur2 = self.factoratt_crpe2(cur2, size=S2) + cur3 = self.factoratt_crpe3(cur3, size=S3) + cur4 = self.factoratt_crpe4(cur4, size=S4) + upsample3_2 = self.upsample(cur3, factor=2., size=S3) + upsample4_3 = self.upsample(cur4, factor=2., size=S4) + upsample4_2 = self.upsample(cur4, factor=4., size=S4) + downsample2_3 = self.downsample(cur2, factor=2., size=S2) + downsample3_4 = self.downsample(cur3, factor=2., size=S3) + downsample2_4 = self.downsample(cur2, factor=4., size=S2) + cur2 = cur2 + upsample3_2 + upsample4_2 + cur3 = cur3 + upsample4_3 + downsample2_3 + cur4 = cur4 + downsample3_4 + downsample2_4 + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + # MLP. + cur2 = self.norm22(x2) + cur3 = self.norm23(x3) + cur4 = self.norm24(x4) + cur2 = self.mlp2(cur2) + cur3 = self.mlp3(cur3) + cur4 = self.mlp4(cur4) + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + return x1, x2, x3, x4 + + +class CoaT(nn.Module): + """ CoaT class. """ + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=(0, 0, 0, 0), + serial_depths=(0, 0, 0, 0), parallel_depth=0, num_heads=0, mlp_ratios=(0, 0, 0, 0), qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), + return_interm_layers=False, out_features=None, crpe_window=None, **kwargs): + super().__init__() + crpe_window = crpe_window or {3: 2, 5: 3, 7: 3} + self.return_interm_layers = return_interm_layers + self.out_features = out_features + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + self.num_classes = num_classes + + # Patch embeddings. + img_size = to_2tuple(img_size) + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dims[0], norm_layer=nn.LayerNorm) + self.patch_embed2 = PatchEmbed( + img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0], + embed_dim=embed_dims[1], norm_layer=nn.LayerNorm) + self.patch_embed3 = PatchEmbed( + img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1], + embed_dim=embed_dims[2], norm_layer=nn.LayerNorm) + self.patch_embed4 = PatchEmbed( + img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2], + embed_dim=embed_dims[3], norm_layer=nn.LayerNorm) + + # Class tokens. + self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0])) + self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1])) + self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2])) + self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3])) + + # Convolutional position encodings. + self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3) + self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3) + self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3) + self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3) + + # Convolutional relative position encodings. + self.crpe1 = ConvRelPosEnc(Ch=embed_dims[0] // num_heads, h=num_heads, window=crpe_window) + self.crpe2 = ConvRelPosEnc(Ch=embed_dims[1] // num_heads, h=num_heads, window=crpe_window) + self.crpe3 = ConvRelPosEnc(Ch=embed_dims[2] // num_heads, h=num_heads, window=crpe_window) + self.crpe4 = ConvRelPosEnc(Ch=embed_dims[3] // num_heads, h=num_heads, window=crpe_window) + + # Disable stochastic depth. + dpr = drop_path_rate + assert dpr == 0.0 + + # Serial blocks 1. + self.serial_blocks1 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[0], num_heads=num_heads, mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe1, shared_crpe=self.crpe1 + ) + for _ in range(serial_depths[0])] + ) + + # Serial blocks 2. + self.serial_blocks2 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[1], num_heads=num_heads, mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe2, shared_crpe=self.crpe2 + ) + for _ in range(serial_depths[1])] + ) + + # Serial blocks 3. + self.serial_blocks3 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[2], num_heads=num_heads, mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe3, shared_crpe=self.crpe3 + ) + for _ in range(serial_depths[2])] + ) + + # Serial blocks 4. + self.serial_blocks4 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[3], num_heads=num_heads, mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe4, shared_crpe=self.crpe4 + ) + for _ in range(serial_depths[3])] + ) + + # Parallel blocks. + self.parallel_depth = parallel_depth + if self.parallel_depth > 0: + self.parallel_blocks = nn.ModuleList([ + ParallelBlock( + dims=embed_dims, num_heads=num_heads, mlp_ratios=mlp_ratios, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4) + ) + for _ in range(parallel_depth)] + ) + else: + self.parallel_blocks = None + + # Classification head(s). + if not self.return_interm_layers: + if self.parallel_blocks is not None: + self.norm2 = norm_layer(embed_dims[1]) + self.norm3 = norm_layer(embed_dims[2]) + else: + self.norm2 = self.norm3 = None + self.norm4 = norm_layer(embed_dims[3]) + + if self.parallel_depth > 0: + # CoaT series: Aggregate features of last three scales for classification. + assert embed_dims[1] == embed_dims[2] == embed_dims[3] + self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + else: + # CoaT-Lite series: Use feature of last scale for classification. + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Initialize weights. + trunc_normal_(self.cls_token1, std=.02) + trunc_normal_(self.cls_token2, std=.02) + trunc_normal_(self.cls_token3, std=.02) + trunc_normal_(self.cls_token4, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def insert_cls(self, x, cls_token): + """ Insert CLS token. """ + cls_tokens = cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + return x + + def remove_cls(self, x): + """ Remove CLS token. """ + return x[:, 1:, :] + + def forward_features(self, x0): + B = x0.shape[0] + + # Serial blocks 1. + x1 = self.patch_embed1(x0) + H1, W1 = self.patch_embed1.grid_size + x1 = self.insert_cls(x1, self.cls_token1) + for blk in self.serial_blocks1: + x1 = blk(x1, size=(H1, W1)) + x1_nocls = self.remove_cls(x1) + x1_nocls = x1_nocls.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 2. + x2 = self.patch_embed2(x1_nocls) + H2, W2 = self.patch_embed2.grid_size + x2 = self.insert_cls(x2, self.cls_token2) + for blk in self.serial_blocks2: + x2 = blk(x2, size=(H2, W2)) + x2_nocls = self.remove_cls(x2) + x2_nocls = x2_nocls.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 3. + x3 = self.patch_embed3(x2_nocls) + H3, W3 = self.patch_embed3.grid_size + x3 = self.insert_cls(x3, self.cls_token3) + for blk in self.serial_blocks3: + x3 = blk(x3, size=(H3, W3)) + x3_nocls = self.remove_cls(x3) + x3_nocls = x3_nocls.reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 4. + x4 = self.patch_embed4(x3_nocls) + H4, W4 = self.patch_embed4.grid_size + x4 = self.insert_cls(x4, self.cls_token4) + for blk in self.serial_blocks4: + x4 = blk(x4, size=(H4, W4)) + x4_nocls = self.remove_cls(x4) + x4_nocls = x4_nocls.reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + + # Only serial blocks: Early return. + if self.parallel_blocks is None: + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + # Return features for classification. + x4 = self.norm4(x4) + x4_cls = x4[:, 0] + return x4_cls + + # Parallel blocks. + for blk in self.parallel_blocks: + x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4)) + x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)]) + + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + x1_nocls = self.remove_cls(x1) + x1_nocls = x1_nocls.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + x2_nocls = self.remove_cls(x2) + x2_nocls = x2_nocls.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + x3_nocls = self.remove_cls(x3) + x3_nocls = x3_nocls.reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + x4_nocls = self.remove_cls(x4) + x4_nocls = x4_nocls.reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + x2 = self.norm2(x2) + x3 = self.norm3(x3) + x4 = self.norm4(x4) + x2_cls = x2[:, :1] # [B, 1, C] + x3_cls = x3[:, :1] + x4_cls = x4[:, :1] + merged_cls = torch.cat((x2_cls, x3_cls, x4_cls), dim=1) # [B, 3, C] + merged_cls = self.aggregate(merged_cls).squeeze(dim=1) # Shape: [B, C] + return merged_cls + + def forward(self, x): + if self.return_interm_layers: + # Return intermediate features (for down-stream tasks). + return self.forward_features(x) + else: + # Return features for classification. + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + out_dict = {} + for k, v in state_dict.items(): + # original model had unused norm layers, removing them requires filtering pretrained checkpoints + if k.startswith('norm1') or \ + (model.norm2 is None and k.startswith('norm2')) or \ + (model.norm3 is None and k.startswith('norm3')): + continue + out_dict[k] = v + return out_dict + + +def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + CoaT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def coat_tiny(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6, + num_heads=8, mlp_ratios=[4, 4, 4, 4], **kwargs) + model = _create_coat('coat_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_mini(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6, + num_heads=8, mlp_ratios=[4, 4, 4, 4], **kwargs) + model = _create_coat('coat_mini', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_tiny(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_mini(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_mini', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_small(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_small', pretrained=pretrained, **model_cfg) + return model \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/convit.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/convit.py new file mode 100644 index 0000000000..f58249ec97 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/convit.py @@ -0,0 +1,349 @@ +""" ConViT Model + +@article{d2021convit, + title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases}, + author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent}, + journal={arXiv preprint arXiv:2103.10697}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.10697 +Original code: https://github.com/facebookresearch/convit, original copyright below +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the CC-by-NC license found in the +# LICENSE file in the root directory of this source tree. +# +'''These modules are adapted from those of timm, see +https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +''' + +import torch +import torch.nn as nn +from functools import partial +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_, PatchEmbed, Mlp +from .registry import register_model +from .vision_transformer_hybrid import HybridEmbed + +import torch +import torch.nn as nn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # ConViT + 'convit_tiny': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth"), + 'convit_small': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_small.pth"), + 'convit_base': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_base.pth") +} + + +class GPSA(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., + locality_strength=1.): + super().__init__() + self.num_heads = num_heads + self.dim = dim + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.locality_strength = locality_strength + + self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.pos_proj = nn.Linear(3, num_heads) + self.proj_drop = nn.Dropout(proj_drop) + self.gating_param = nn.Parameter(torch.ones(self.num_heads)) + self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None + + def forward(self, x): + B, N, C = x.shape + if self.rel_indices is None or self.rel_indices.shape[1] != N: + self.rel_indices = self.get_rel_indices(N) + attn = self.get_attention(x) + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def get_attention(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k = qk[0], qk[1] + pos_score = self.rel_indices.expand(B, -1, -1, -1) + pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) + patch_score = (q @ k.transpose(-2, -1)) * self.scale + patch_score = patch_score.softmax(dim=-1) + pos_score = pos_score.softmax(dim=-1) + + gating = self.gating_param.view(1, -1, 1, 1) + attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score + attn /= attn.sum(dim=-1).unsqueeze(-1) + attn = self.attn_drop(attn) + return attn + + def get_attention_map(self, x, return_map=False): + attn_map = self.get_attention(x).mean(0) # average over batch + distances = self.rel_indices.squeeze()[:, :, -1] ** .5 + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) + if return_map: + return dist, attn_map + else: + return dist + + def local_init(self): + self.v.weight.data.copy_(torch.eye(self.dim)) + locality_distance = 1 # max(1,1/locality_strength**.5) + + kernel_size = int(self.num_heads ** .5) + center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 + for h1 in range(kernel_size): + for h2 in range(kernel_size): + position = h1 + kernel_size * h2 + self.pos_proj.weight.data[position, 2] = -1 + self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance + self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance + self.pos_proj.weight.data *= self.locality_strength + + def get_rel_indices(self, num_patches: int) -> torch.Tensor: + img_size = int(num_patches ** .5) + rel_indices = torch.zeros(1, num_patches, num_patches, 3) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + rel_indices[:, :, :, 2] = indd.unsqueeze(0) + rel_indices[:, :, :, 1] = indy.unsqueeze(0) + rel_indices[:, :, :, 0] = indx.unsqueeze(0) + device = self.qk.weight.device + return rel_indices.to(device) + + +class MHSA(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def get_attention_map(self, x, return_map=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + attn_map = (q @ k.transpose(-2, -1)) * self.scale + attn_map = attn_map.softmax(dim=-1).mean(0) + + img_size = int(N ** .5) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + distances = indd ** .5 + distances = distances.to('cuda') + + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N + if return_map: + return dist, attn_map + else: + return dist + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs): + super().__init__() + self.norm1 = norm_layer(dim) + self.use_gpsa = use_gpsa + if self.use_gpsa: + self.attn = GPSA( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, **kwargs) + else: + self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, global_pool=None, + local_up_to_layer=3, locality_strength=1., use_pos_embed=True): + super().__init__() + embed_dim *= num_heads + self.num_classes = num_classes + self.local_up_to_layer = local_up_to_layer + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.locality_strength = locality_strength + self.use_pos_embed = use_pos_embed + + if hybrid_backbone is not None: + self.patch_embed = HybridEmbed( + hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) + else: + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + self.num_patches = num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + if self.use_pos_embed: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.pos_embed, std=.02) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_gpsa=True, + locality_strength=locality_strength) + if i < local_up_to_layer else + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_gpsa=False) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Classifier head + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + for n, m in self.named_modules(): + if hasattr(m, 'local_init'): + m.local_init() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) + + if self.use_pos_embed: + x = x + self.pos_embed + x = self.pos_drop(x) + + for u, blk in enumerate(self.blocks): + if u == self.local_up_to_layer: + x = torch.cat((cls_tokens, x), dim=1) + x = blk(x) + + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_convit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + return build_model_with_cfg( + ConViT, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def convit_tiny(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_tiny', pretrained=pretrained, **model_args) + return model + + +@register_model +def convit_small(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=9, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_small', pretrained=pretrained, **model_args) + return model + + +@register_model +def convit_base(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=16, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_base', pretrained=pretrained, **model_args) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/convmixer.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/convmixer.py new file mode 100644 index 0000000000..45c347e77f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/convmixer.py @@ -0,0 +1,27 @@ +import sys +sys.path.append('../../../') +from convmixer import ConvMixer +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.registry import register_model + + +_cfg = { + 'url': '', + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .96, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head' +} + + +@register_model +def convmixer_1536_20(pretrained=False, **kwargs): + model = ConvMixer(1536, 20, kernel_size=9, patch_size=7, n_classes=1000) + model.default_cfg = _cfg + return model + + +@register_model +def convmixer_768_32(pretrained=False, **kwargs): + model = ConvMixer(768, 32, kernel_size=7, patch_size=7, n_classes=1000) + model.default_cfg = _cfg + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/crossvit.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/crossvit.py new file mode 100644 index 0000000000..6e0160f9e6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/crossvit.py @@ -0,0 +1,497 @@ +""" CrossViT Model + +@inproceedings{ + chen2021crossvit, + title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}}, + author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda}, + booktitle={International Conference on Computer Vision (ICCV)}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.14899 +Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py + +NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408 +""" + +# Copyright IBM All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +""" +Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.hub +from functools import partial +from typing import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_ +from .registry import register_model +from .vision_transformer import Mlp, Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'), + 'classifier': ('head.0', 'head.1'), + **kwargs + } + + +default_cfgs = { + 'crossvit_15_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_224.pth'), + 'crossvit_15_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_15_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_18_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_224.pth'), + 'crossvit_18_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_18_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_9_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_224.pth'), + 'crossvit_9_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_base_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_base_224.pth'), + 'crossvit_small_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_small_224.pth'), + 'crossvit_tiny_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_tiny_224.pth'), +} + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + if multi_conv: + if patch_size[0] == 12: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1), + ) + elif patch_size[0] == 16: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1), + ) + else: + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class CrossAttention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.wq = nn.Linear(dim, dim, bias=qkv_bias) + self.wk = nn.Linear(dim, dim, bias=qkv_bias) + self.wv = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # B1C -> B1H(C/H) -> BH1(C/H) + q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class CrossAttentionBlock(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = CrossAttention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x))) + + return x + + +class MultiScaleBlock(nn.Module): + + def __init__(self, dim, patches, depth, num_heads, mlp_ratio, qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + + num_branches = len(dim) + self.num_branches = num_branches + # different branch could have different embedding size, the first one is the base + self.blocks = nn.ModuleList() + for d in range(num_branches): + tmp = [] + for i in range(depth[d]): + tmp.append(Block( + dim=dim[d], num_heads=num_heads[d], mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer)) + if len(tmp) != 0: + self.blocks.append(nn.Sequential(*tmp)) + + if len(self.blocks) == 0: + self.blocks = None + + self.projs = nn.ModuleList() + for d in range(num_branches): + if dim[d] == dim[(d + 1) % num_branches] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])] + self.projs.append(nn.Sequential(*tmp)) + + self.fusion = nn.ModuleList() + for d in range(num_branches): + d_ = (d + 1) % num_branches + nh = num_heads[d_] + if depth[-1] == 0: # backward capability: + self.fusion.append( + CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + else: + tmp = [] + for _ in range(depth[-1]): + tmp.append(CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + self.fusion.append(nn.Sequential(*tmp)) + + self.revert_projs = nn.ModuleList() + for d in range(num_branches): + if dim[(d + 1) % num_branches] == dim[d] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(), + nn.Linear(dim[(d + 1) % num_branches], dim[d])] + self.revert_projs.append(nn.Sequential(*tmp)) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + + outs_b = [] + for i, block in enumerate(self.blocks): + outs_b.append(block(x[i])) + + # only take the cls token out + proj_cls_token = torch.jit.annotate(List[torch.Tensor], []) + for i, proj in enumerate(self.projs): + proj_cls_token.append(proj(outs_b[i][:, 0:1, ...])) + + # cross attention + outs = [] + for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)): + tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1) + tmp = fusion(tmp) + reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...]) + tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1) + outs.append(tmp) + return outs + + +def _compute_num_patches(img_size, patches): + return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)] + + +class CrossViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, img_size=224, img_scale=(1.0, 1.0), patch_size=(8, 16), in_chans=3, num_classes=1000, + embed_dim=(192, 384), depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), num_heads=(6, 12), mlp_ratio=(2., 2., 4.), + qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), multi_conv=False, crop_scale=False, + ): + super().__init__() + + self.num_classes = num_classes + self.img_size = to_2tuple(img_size) + img_scale = to_2tuple(img_scale) + self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale] + self.crop_scale = crop_scale # crop instead of interpolate for scale + num_patches = _compute_num_patches(self.img_size_scaled, patch_size) + self.num_branches = len(patch_size) + self.embed_dim = embed_dim + self.num_features = embed_dim[0] # to pass the tests + self.patch_embed = nn.ModuleList() + + # hard-coded for torch jit script + for i in range(self.num_branches): + setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i]))) + setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i]))) + + for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim): + self.patch_embed.append( + PatchEmbed(img_size=im_s, patch_size=p, in_chans=in_chans, embed_dim=d, multi_conv=multi_conv)) + + self.pos_drop = nn.Dropout(p=drop_rate) + + total_depth = sum([sum(x[-2:]) for x in depth]) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule + dpr_ptr = 0 + self.blocks = nn.ModuleList() + for idx, block_cfg in enumerate(depth): + curr_depth = max(block_cfg[:-1]) + block_cfg[-1] + dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth] + blk = MultiScaleBlock( + embed_dim, num_patches, block_cfg, num_heads=num_heads, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer) + dpr_ptr += curr_depth + self.blocks.append(blk) + + self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)]) + self.head = nn.ModuleList([ + nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() + for i in range(self.num_branches)]) + + for i in range(self.num_branches): + trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02) + trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + out = set() + for i in range(self.num_branches): + out.add(f'cls_token_{i}') + pe = getattr(self, f'pos_embed_{i}', None) + if pe is not None and pe.requires_grad: + out.add(f'pos_embed_{i}') + return out + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.ModuleList( + [nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in + range(self.num_branches)]) + + def forward_features(self, x): + B, C, H, W = x.shape + xs = [] + for i, patch_embed in enumerate(self.patch_embed): + x_ = x + ss = self.img_size_scaled[i] + if H != ss[0] or W != ss[1]: + if self.crop_scale and ss[0] <= H and ss[1] <= W: + cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.)) + x_ = x_[:, :, cu:cu + ss[0], cl:cl + ss[1]] + else: + x_ = torch.nn.functional.interpolate(x_, size=ss, mode='bicubic', align_corners=False) + x_ = patch_embed(x_) + cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script + cls_tokens = cls_tokens.expand(B, -1, -1) + x_ = torch.cat((cls_tokens, x_), dim=1) + pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script + x_ = x_ + pos_embed + x_ = self.pos_drop(x_) + xs.append(x_) + + for i, blk in enumerate(self.blocks): + xs = blk(xs) + + # NOTE: was before branch token section, move to here to assure all branch token are before layer norm + xs = [norm(xs[i]) for i, norm in enumerate(self.norm)] + return [xo[:, 0] for xo in xs] + + def forward(self, x): + xs = self.forward_features(x) + ce_logits = [head(xs[i]) for i, head in enumerate(self.head)] + if not isinstance(self.head[0], nn.Identity): + ce_logits = torch.mean(torch.stack(ce_logits, dim=0), dim=0) + return ce_logits + + +def _create_crossvit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + def pretrained_filter_fn(state_dict): + new_state_dict = {} + for key in state_dict.keys(): + if 'pos_embed' in key or 'cls_token' in key: + new_key = key.replace(".", "_") + else: + new_key = key + new_state_dict[new_key] = state_dict[key] + return new_state_dict + + return build_model_with_cfg( + CrossViT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=pretrained_filter_fn, + **kwargs) + + +@register_model +def crossvit_tiny_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[3, 3], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_small_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[6, 6], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_base_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[12, 12], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **model_args) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/cspnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/cspnet.py new file mode 100644 index 0000000000..39d16200f8 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/cspnet.py @@ -0,0 +1,457 @@ +"""PyTorch CspNet + +A PyTorch implementation of Cross Stage Partial Networks including: +* CSPResNet50 +* CSPResNeXt50 +* CSPDarkNet53 +* and DarkNet53 for good measure + +Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + +Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct, DropPath, create_attn, get_norm_act_layer +from .registry import register_model + + +__all__ = ['CspNet'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.887, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'cspresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'), + 'cspresnet50d': _cfg(url=''), + 'cspresnet50w': _cfg(url=''), + 'cspresnext50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth', + input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.875 # FIXME I trained this at 224x224, not 256 like ref impl + ), + 'cspresnext50_iabn': _cfg(url=''), + 'cspdarknet53': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'), + 'cspdarknet53_iabn': _cfg(url=''), + 'darknet53': _cfg(url=''), +} + + +model_cfgs = dict( + cspresnet50=dict( + stem=dict(out_chs=64, kernel_size=7, stride=2, pool='max'), + stage=dict( + out_chs=(128, 256, 512, 1024), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(2.,) * 4, + bottle_ratio=(0.5,) * 4, + block_ratio=(1.,) * 4, + cross_linear=True, + ) + ), + cspresnet50d=dict( + stem=dict(out_chs=[32, 32, 64], kernel_size=3, stride=2, pool='max'), + stage=dict( + out_chs=(128, 256, 512, 1024), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(2.,) * 4, + bottle_ratio=(0.5,) * 4, + block_ratio=(1.,) * 4, + cross_linear=True, + ) + ), + cspresnet50w=dict( + stem=dict(out_chs=[32, 32, 64], kernel_size=3, stride=2, pool='max'), + stage=dict( + out_chs=(256, 512, 1024, 2048), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(1.,) * 4, + bottle_ratio=(0.25,) * 4, + block_ratio=(0.5,) * 4, + cross_linear=True, + ) + ), + cspresnext50=dict( + stem=dict(out_chs=64, kernel_size=7, stride=2, pool='max'), + stage=dict( + out_chs=(256, 512, 1024, 2048), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + groups=(32,) * 4, + exp_ratio=(1.,) * 4, + bottle_ratio=(1.,) * 4, + block_ratio=(0.5,) * 4, + cross_linear=True, + ) + ), + cspdarknet53=dict( + stem=dict(out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2,) * 5, + exp_ratio=(2.,) + (1.,) * 4, + bottle_ratio=(0.5,) + (1.0,) * 4, + block_ratio=(1.,) + (0.5,) * 4, + down_growth=True, + ) + ), + darknet53=dict( + stem=dict(out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2,) * 5, + bottle_ratio=(0.5,) * 5, + block_ratio=(1.,) * 5, + ) + ) +) + + +def create_stem( + in_chans=3, out_chs=32, kernel_size=3, stride=2, pool='', + act_layer=None, norm_layer=None, aa_layer=None): + stem = nn.Sequential() + if not isinstance(out_chs, (tuple, list)): + out_chs = [out_chs] + assert len(out_chs) + in_c = in_chans + for i, out_c in enumerate(out_chs): + conv_name = f'conv{i + 1}' + stem.add_module(conv_name, ConvBnAct( + in_c, out_c, kernel_size, stride=stride if i == 0 else 1, + act_layer=act_layer, norm_layer=norm_layer)) + in_c = out_c + last_conv = conv_name + if pool: + if aa_layer is not None: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + stem.add_module('aa', aa_layer(channels=in_c, stride=2)) + else: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + return stem, dict(num_chs=in_c, reduction=stride, module='.'.join(['stem', last_conv])) + + +class ResBottleneck(nn.Module): + """ ResNe(X)t Bottleneck Block + """ + + def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.25, groups=1, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_last=False, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResBottleneck, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct(mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs) + self.attn2 = create_attn(attn_layer, channels=mid_chs) if not attn_last else None + self.conv3 = ConvBnAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs) + self.attn3 = create_attn(attn_layer, channels=out_chs) if attn_last else None + self.drop_path = drop_path + self.act3 = act_layer(inplace=True) + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.attn2 is not None: + x = self.attn2(x) + x = self.conv3(x) + if self.attn3 is not None: + x = self.attn3(x) + if self.drop_path is not None: + x = self.drop_path(x) + x = x + shortcut + # FIXME partial shortcut needed if first block handled as per original, not used for my current impl + #x[:, :shortcut.size(1)] += shortcut + x = self.act3(x) + return x + + +class DarkBlock(nn.Module): + """ DarkNet Block + """ + + def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, + drop_block=None, drop_path=None): + super(DarkBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct(mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs) + self.attn = create_attn(attn_layer, channels=out_chs) + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + x = x + shortcut + return x + + +class CrossStage(nn.Module): + """Cross Stage.""" + def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., exp_ratio=1., + groups=1, first_dilation=None, down_growth=False, cross_linear=False, block_dpr=None, + block_fn=ResBottleneck, **block_kwargs): + super(CrossStage, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels + exp_chs = int(round(out_chs * exp_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + + if stride != 1 or first_dilation != dilation: + self.conv_down = ConvBnAct( + in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=block_kwargs.get('aa_layer', None), **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = None + prev_chs = in_chs + + # FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also, + # there is also special case for the first stage for some of the model that results in uneven split + # across the two paths. I did it this way for simplicity for now. + self.conv_exp = ConvBnAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) + prev_chs = exp_chs // 2 # output of conv_exp is always split in two + + self.blocks = nn.Sequential() + for i in range(depth): + drop_path = DropPath(block_dpr[i]) if block_dpr and block_dpr[i] else None + self.blocks.add_module(str(i), block_fn( + prev_chs, block_out_chs, dilation, bottle_ratio, groups, drop_path=drop_path, **block_kwargs)) + prev_chs = block_out_chs + + # transition convs + self.conv_transition_b = ConvBnAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) + self.conv_transition = ConvBnAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + if self.conv_down is not None: + x = self.conv_down(x) + x = self.conv_exp(x) + split = x.shape[1] // 2 + xs, xb = x[:, :split], x[:, split:] + xb = self.blocks(xb) + xb = self.conv_transition_b(xb).contiguous() + out = self.conv_transition(torch.cat([xs, xb], dim=1)) + return out + + +class DarkStage(nn.Module): + """DarkNet stage.""" + + def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., groups=1, + first_dilation=None, block_fn=ResBottleneck, block_dpr=None, **block_kwargs): + super(DarkStage, self).__init__() + first_dilation = first_dilation or dilation + + self.conv_down = ConvBnAct( + in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'), + aa_layer=block_kwargs.get('aa_layer', None)) + + prev_chs = out_chs + block_out_chs = int(round(out_chs * block_ratio)) + self.blocks = nn.Sequential() + for i in range(depth): + drop_path = DropPath(block_dpr[i]) if block_dpr and block_dpr[i] else None + self.blocks.add_module(str(i), block_fn( + prev_chs, block_out_chs, dilation, bottle_ratio, groups, drop_path=drop_path, **block_kwargs)) + prev_chs = block_out_chs + + def forward(self, x): + x = self.conv_down(x) + x = self.blocks(x) + return x + + +def _cfg_to_stage_args(cfg, curr_stride=2, output_stride=32, drop_path_rate=0.): + # get per stage args for stage and containing blocks, calculate strides to meet target output_stride + num_stages = len(cfg['depth']) + if 'groups' not in cfg: + cfg['groups'] = (1,) * num_stages + if 'down_growth' in cfg and not isinstance(cfg['down_growth'], (list, tuple)): + cfg['down_growth'] = (cfg['down_growth'],) * num_stages + if 'cross_linear' in cfg and not isinstance(cfg['cross_linear'], (list, tuple)): + cfg['cross_linear'] = (cfg['cross_linear'],) * num_stages + cfg['block_dpr'] = [None] * num_stages if not drop_path_rate else \ + [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg['depth'])).split(cfg['depth'])] + stage_strides = [] + stage_dilations = [] + stage_first_dilations = [] + dilation = 1 + for cfg_stride in cfg['stride']: + stage_first_dilations.append(dilation) + if curr_stride >= output_stride: + dilation *= cfg_stride + stride = 1 + else: + stride = cfg_stride + curr_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + cfg['stride'] = stage_strides + cfg['dilation'] = stage_dilations + cfg['first_dilation'] = stage_first_dilations + stage_args = [dict(zip(cfg.keys(), values)) for values in zip(*cfg.values())] + return stage_args + + +class CspNet(nn.Module): + """Cross Stage Partial base model. + + Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks + + NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the + darknet impl. I did it this way for simplicity and less special cases. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., + act_layer=nn.LeakyReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_path_rate=0., + zero_init_last_bn=True, stage_fn=CrossStage, block_fn=ResBottleneck): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + + # Construct the stem + self.stem, stem_feat_info = create_stem(in_chans, **cfg['stem'], **layer_args) + self.feature_info = [stem_feat_info] + prev_chs = stem_feat_info['num_chs'] + curr_stride = stem_feat_info['reduction'] # reduction does not include pool + if cfg['stem']['pool']: + curr_stride *= 2 + + # Construct the stages + per_stage_args = _cfg_to_stage_args( + cfg['stage'], curr_stride=curr_stride, output_stride=output_stride, drop_path_rate=drop_path_rate) + self.stages = nn.Sequential() + for i, sa in enumerate(per_stage_args): + self.stages.add_module( + str(i), stage_fn(prev_chs, **sa, **layer_args, block_fn=block_fn)) + prev_chs = sa['out_chs'] + curr_stride *= sa['stride'] + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] + + # Construct the head + self.num_features = prev_chs + self.head = ClassifierHead( + in_chs=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, mean=0.0, std=0.01) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_cspnet(variant, pretrained=False, **kwargs): + cfg_variant = variant.split('_')[0] + return build_model_with_cfg( + CspNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), model_cfg=model_cfgs[cfg_variant], + **kwargs) + + +@register_model +def cspresnet50(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50d(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50w(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50(pretrained=False, **kwargs): + return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_cspnet('cspresnext50_iabn', pretrained=pretrained, norm_layer=norm_layer, **kwargs) + + +@register_model +def cspdarknet53(pretrained=False, **kwargs): + return _create_cspnet('cspdarknet53', pretrained=pretrained, block_fn=DarkBlock, **kwargs) + + +@register_model +def cspdarknet53_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_cspnet('cspdarknet53_iabn', pretrained=pretrained, block_fn=DarkBlock, norm_layer=norm_layer, **kwargs) + + +@register_model +def darknet53(pretrained=False, **kwargs): + return _create_cspnet('darknet53', pretrained=pretrained, block_fn=DarkBlock, stage_fn=DarkStage, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/densenet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/densenet.py new file mode 100644 index 0000000000..38a1972787 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/densenet.py @@ -0,0 +1,387 @@ +"""Pytorch Densenet implementation w/ tweaks +This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with +fixed kwargs passthrough and addition of dynamic global avg/max pool. +""" +import re +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from torch.jit.annotations import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, create_norm_act, BlurPool2d, create_classifier +from .registry import register_model + +__all__ = ['DenseNet'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.conv0', 'classifier': 'classifier', + } + + +default_cfgs = { + 'densenet121': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth'), + 'densenet121d': _cfg(url=''), + 'densenetblur121d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth'), + 'densenet169': _cfg(url='https://download.pytorch.org/models/densenet169-b2777c0a.pth'), + 'densenet201': _cfg(url='https://download.pytorch.org/models/densenet201-c1103571.pth'), + 'densenet161': _cfg(url='https://download.pytorch.org/models/densenet161-8d451a50.pth'), + 'densenet264': _cfg(url=''), + 'densenet264d_iabn': _cfg(url=''), + 'tv_densenet121': _cfg(url='https://download.pytorch.org/models/densenet121-a639ec97.pth'), +} + + +class DenseLayer(nn.Module): + def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, + drop_rate=0., memory_efficient=False): + super(DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('conv1', nn.Conv2d( + num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('conv2', nn.Conv2d( + bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = float(drop_rate) + self.memory_efficient = memory_efficient + + def bottleneck_fn(self, xs): + # type: (List[torch.Tensor]) -> torch.Tensor + concated_features = torch.cat(xs, 1) + bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, x): + # type: (List[torch.Tensor]) -> bool + for tensor in x: + if tensor.requires_grad: + return True + return False + + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, x): + # type: (List[torch.Tensor]) -> torch.Tensor + def closure(*xs): + return self.bottleneck_fn(xs) + + return cp.checkpoint(closure, *x) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + # torchscript does not yet support *args, so we overload method + # allowing it to take either a List[Tensor] or single Tensor + def forward(self, x): # noqa: F811 + if isinstance(x, torch.Tensor): + prev_features = [x] + else: + prev_features = x + + if self.memory_efficient and self.any_requires_grad(prev_features): + if torch.jit.is_scripting(): + raise Exception("Memory Efficient not supported in JIT") + bottleneck_output = self.call_checkpoint_bottleneck(prev_features) + else: + bottleneck_output = self.bottleneck_fn(prev_features) + + new_features = self.conv2(self.norm2(bottleneck_output)) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return new_features + + +class DenseBlock(nn.ModuleDict): + _version = 2 + + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=nn.ReLU, + drop_rate=0., memory_efficient=False): + super(DenseBlock, self).__init__() + for i in range(num_layers): + layer = DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features): + features = [init_features] + for name, layer in self.items(): + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class DenseTransition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d, aa_layer=None): + super(DenseTransition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('conv', nn.Conv2d( + num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) + if aa_layer is not None: + self.add_module('pool', aa_layer(num_output_features, stride=2)) + else: + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + + def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), bn_size=4, stem_type='', + num_classes=1000, in_chans=3, global_pool='avg', + norm_layer=BatchNormAct2d, aa_layer=None, drop_rate=0, memory_efficient=False, + aa_stem_only=True): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(DenseNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type # 3x3 deep stem + num_init_features = growth_rate * 2 + if aa_layer is None: + stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + else: + stem_pool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=num_init_features, stride=2)]) + if deep_stem: + stem_chs_1 = stem_chs_2 = growth_rate + if 'tiered' in stem_type: + stem_chs_1 = 3 * (growth_rate // 4) + stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), + ('norm0', norm_layer(stem_chs_1)), + ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), + ('norm1', norm_layer(stem_chs_2)), + ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), + ('norm2', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + else: + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + self.feature_info = [ + dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] + current_stride = 4 + + # DenseBlocks + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient + ) + module_name = f'denseblock{(i + 1)}' + self.features.add_module(module_name, block) + num_features = num_features + num_layers * growth_rate + transition_aa_layer = None if aa_stem_only else aa_layer + if i != len(block_config) - 1: + self.feature_info += [ + dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] + current_stride *= 2 + trans = DenseTransition( + num_input_features=num_features, num_output_features=num_features // 2, + norm_layer=norm_layer, aa_layer=transition_aa_layer) + self.features.add_module(f'transition{i + 1}', trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] + self.num_features = num_features + + # Linear layer + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + # both classifier and block drop? + # if self.drop_rate > 0.: + # x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +def _filter_torchvision_pretrained(state_dict): + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + return state_dict + + +def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): + kwargs['growth_rate'] = growth_rate + kwargs['block_config'] = block_config + return build_model_with_cfg( + DenseNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, + **kwargs) + + +@register_model +def densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenetblur121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep', + aa_layer=BlurPool2d, **kwargs) + return model + + +@register_model +def densenet121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121d', growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', + pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet169(pretrained=False, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet169', growth_rate=32, block_config=(6, 12, 32, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet201(pretrained=False, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet201', growth_rate=32, block_config=(6, 12, 48, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet161(pretrained=False, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet161', growth_rate=48, block_config=(6, 12, 36, 24), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264(pretrained=False, **kwargs): + r"""Densenet-264 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet264', growth_rate=48, block_config=(6, 12, 64, 48), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264d_iabn(pretrained=False, **kwargs): + r"""Densenet-264 model with deep stem and Inplace-ABN + """ + def norm_act_fn(num_features, **kwargs): + return create_norm_act('iabn', num_features, **kwargs) + model = _create_densenet( + 'densenet264d_iabn', growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep', + norm_layer=norm_act_fn, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tv_densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model with original Torchvision weights, from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'tv_densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/dla.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/dla.py new file mode 100644 index 0000000000..f6e4dd285d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/dla.py @@ -0,0 +1,443 @@ +""" Deep Layer Aggregation and DLA w/ Res2Net +DLA original adapted from Official Pytorch impl at: +DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 + +Res2Net additions from: https://github.com/gasvn/Res2Net/ +Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['DLA'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'base_layer.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'), + 'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'), + 'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'), + 'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'), + 'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'), + 'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'), + 'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'), + 'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'), + 'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'), + 'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'), + 'dla60_res2net': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'), + 'dla60_res2next': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'), +} + + +class DlaBasic(nn.Module): + """DLA Basic""" + + def __init__(self, inplanes, planes, stride=1, dilation=1, **_): + super(DlaBasic, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) + self.bn2 = nn.BatchNorm2d(planes) + self.stride = stride + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottleneck(nn.Module): + """DLA/DLA-X Bottleneck""" + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): + super(DlaBottleneck, self).__init__() + self.stride = stride + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + + self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes) + self.conv2 = nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, + bias=False, dilation=dilation, groups=cardinality) + self.bn2 = nn.BatchNorm2d(mid_planes) + self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottle2neck(nn.Module): + """ Res2Net/Res2NeXT DLA Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py + """ + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): + super(DlaBottle2neck, self).__init__() + self.is_first = stride > 1 + self.scale = scale + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + self.width = mid_planes + + self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes * scale) + + num_scale_convs = max(1, scale - 1) + convs = [] + bns = [] + for _ in range(num_scale_convs): + convs.append(nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, + padding=dilation, dilation=dilation, groups=cardinality, bias=False)) + bns.append(nn.BatchNorm2d(mid_planes)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + + self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + sp = spx[i] if i == 0 or self.is_first else sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + spo.append(self.pool(spx[-1]) if self.is_first else spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaRoot(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, shortcut): + super(DlaRoot, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.shortcut = shortcut + + def forward(self, *x): + children = x + x = self.conv(torch.cat(x, 1)) + x = self.bn(x) + if self.shortcut: + x += children[0] + x = self.relu(x) + + return x + + +class DlaTree(nn.Module): + def __init__(self, levels, block, in_channels, out_channels, stride=1, + dilation=1, cardinality=1, base_width=64, + level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False): + super(DlaTree, self).__init__() + if root_dim == 0: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() + self.project = nn.Identity() + cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) + if levels == 1: + self.tree1 = block(in_channels, out_channels, stride, **cargs) + self.tree2 = block(out_channels, out_channels, 1, **cargs) + if in_channels != out_channels: + # NOTE the official impl/weights have project layers in levels > 1 case that are never + # used, I've moved the project layer here to avoid wasted params but old checkpoints will + # need strict=False while loading. + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), + nn.BatchNorm2d(out_channels)) + else: + cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) + self.tree1 = DlaTree( + levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs) + self.tree2 = DlaTree( + levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs) + if levels == 1: + self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) + self.level_root = level_root + self.root_dim = root_dim + self.levels = levels + + def forward(self, x, shortcut=None, children=None): + children = [] if children is None else children + bottom = self.downsample(x) + shortcut = self.project(bottom) + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, shortcut) + if self.levels == 1: + x2 = self.tree2(x1) + x = self.root(x2, x1, *children) + else: + children.append(x1) + x = self.tree2(x1, children=children) + return x + + +class DLA(nn.Module): + def __init__(self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, + cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, + drop_rate=0.0, global_pool='avg'): + super(DLA, self).__init__() + self.channels = channels + self.num_classes = num_classes + self.cardinality = cardinality + self.base_width = base_width + self.drop_rate = drop_rate + assert output_stride == 32 # FIXME support dilation + + self.base_layer = nn.Sequential( + nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), + nn.BatchNorm2d(channels[0]), + nn.ReLU(inplace=True)) + self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) + self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) + cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) + self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) + self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) + self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) + self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) + self.feature_info = [ + dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level + dict(num_chs=channels[1], reduction=2, module='level1'), + dict(num_chs=channels[2], reduction=4, module='level2'), + dict(num_chs=channels[3], reduction=8, module='level3'), + dict(num_chs=channels[4], reduction=16, module='level4'), + dict(num_chs=channels[5], reduction=32, module='level5'), + ] + + self.num_features = channels[-1] + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): + modules = [] + for i in range(convs): + modules.extend([ + nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, + padding=dilation, bias=False, dilation=dilation), + nn.BatchNorm2d(planes), + nn.ReLU(inplace=True)]) + inplanes = planes + return nn.Sequential(*modules) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + x = self.base_layer(x) + x = self.level0(x) + x = self.level1(x) + x = self.level2(x) + x = self.level3(x) + x = self.level4(x) + x = self.level5(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + x = self.flatten(x) + return x + + +def _create_dla(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DLA, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=False, + feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), + **kwargs) + + +@register_model +def dla60_res2net(pretrained=False, **kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=1, base_width=28, **kwargs) + return _create_dla('dla60_res2net', pretrained, **model_kwargs) + + +@register_model +def dla60_res2next(pretrained=False,**kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=8, base_width=4, **kwargs) + return _create_dla('dla60_res2next', pretrained, **model_kwargs) + + +@register_model +def dla34(pretrained=False, **kwargs): # DLA-34 + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], + block=DlaBasic, **kwargs) + return _create_dla('dla34', pretrained, **model_kwargs) + + +@register_model +def dla46_c(pretrained=False, **kwargs): # DLA-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, **kwargs) + return _create_dla('dla46_c', pretrained, **model_kwargs) + + +@register_model +def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla46x_c', pretrained, **model_kwargs) + + +@register_model +def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x_c', pretrained, **model_kwargs) + + +@register_model +def dla60(pretrained=False, **kwargs): # DLA-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, **kwargs) + return _create_dla('dla60', pretrained, **model_kwargs) + + +@register_model +def dla60x(pretrained=False, **kwargs): # DLA-X-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x', pretrained, **model_kwargs) + + +@register_model +def dla102(pretrained=False, **kwargs): # DLA-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla102', pretrained, **model_kwargs) + + +@register_model +def dla102x(pretrained=False, **kwargs): # DLA-X-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x', pretrained, **model_kwargs) + + +@register_model +def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x2', pretrained, **model_kwargs) + + +@register_model +def dla169(pretrained=False, **kwargs): # DLA-169 + model_kwargs = dict( + levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla169', pretrained, **model_kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/dpn.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/dpn.py new file mode 100644 index 0000000000..c4e380b1e3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/dpn.py @@ -0,0 +1,317 @@ +""" PyTorch implementation of DualPathNetworks +Based on original MXNet implementation https://github.com/cypw/DPNs with +many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. + +This implementation is compatible with the pretrained weights from cypw's MXNet implementation. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict +from functools import partial +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, ConvBnAct, create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['DPN'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, + 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'dpn68': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn68-66bebafa7.pth'), + 'dpn68b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dpn68b_ra-a31ca160.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'dpn92': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn92_extra-b040e4a9b.pth'), + 'dpn98': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn98-5b90dec4d.pth'), + 'dpn131': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn131-71dfe43e0.pth'), + 'dpn107': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn107_extra-1ac7121e2.pth') +} + + +class CatBnAct(nn.Module): + def __init__(self, in_chs, norm_layer=BatchNormAct2d): + super(CatBnAct, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + def forward(self, x): + if isinstance(x, tuple): + x = torch.cat(x, dim=1) + return self.bn(x) + + +class BnActConv2d(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): + super(BnActConv2d, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) + + def forward(self, x): + return self.conv(self.bn(x)) + + +class DualPathBlock(nn.Module): + def __init__( + self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False): + super(DualPathBlock, self).__init__() + self.num_1x1_c = num_1x1_c + self.inc = inc + self.b = b + if block_type == 'proj': + self.key_stride = 1 + self.has_proj = True + elif block_type == 'down': + self.key_stride = 2 + self.has_proj = True + else: + assert block_type == 'normal' + self.key_stride = 1 + self.has_proj = False + + self.c1x1_w_s1 = None + self.c1x1_w_s2 = None + if self.has_proj: + # Using different member names here to allow easier parameter key matching for conversion + if self.key_stride == 2: + self.c1x1_w_s2 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) + else: + self.c1x1_w_s1 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) + + self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) + self.c3x3_b = BnActConv2d( + in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) + if b: + self.c1x1_c = CatBnAct(in_chs=num_3x3_b) + self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) + self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) + else: + self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) + self.c1x1_c1 = None + self.c1x1_c2 = None + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + pass + + def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: + if isinstance(x, tuple): + x_in = torch.cat(x, dim=1) + else: + x_in = x + if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: + # self.has_proj == False, torchscript requires condition on module == None + x_s1 = x[0] + x_s2 = x[1] + else: + # self.has_proj == True + if self.c1x1_w_s1 is not None: + # self.key_stride = 1 + x_s = self.c1x1_w_s1(x_in) + else: + # self.key_stride = 2 + x_s = self.c1x1_w_s2(x_in) + x_s1 = x_s[:, :self.num_1x1_c, :, :] + x_s2 = x_s[:, self.num_1x1_c:, :, :] + x_in = self.c1x1_a(x_in) + x_in = self.c3x3_b(x_in) + x_in = self.c1x1_c(x_in) + if self.c1x1_c1 is not None: + # self.b == True, using None check for torchscript compat + out1 = self.c1x1_c1(x_in) + out2 = self.c1x1_c2(x_in) + else: + out1 = x_in[:, :self.num_1x1_c, :, :] + out2 = x_in[:, self.num_1x1_c:, :, :] + resid = x_s1 + out1 + dense = torch.cat([x_s2, out2], dim=1) + return resid, dense + + +class DPN(nn.Module): + def __init__(self, small=False, num_init_features=64, k_r=96, groups=32, + b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), output_stride=32, + num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', fc_act=nn.ELU): + super(DPN, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.b = b + assert output_stride == 32 # FIXME look into dilation support + norm_layer = partial(BatchNormAct2d, eps=.001) + fc_norm_layer = partial(BatchNormAct2d, eps=.001, act_layer=fc_act, inplace=False) + bw_factor = 1 if small else 4 + blocks = OrderedDict() + + # conv1 + blocks['conv1_1'] = ConvBnAct( + in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) + blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] + + # conv2 + bw = 64 * bw_factor + inc = inc_sec[0] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[0] + 1): + blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] + + # conv3 + bw = 128 * bw_factor + inc = inc_sec[1] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[1] + 1): + blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] + + # conv4 + bw = 256 * bw_factor + inc = inc_sec[2] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[2] + 1): + blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] + + # conv5 + bw = 512 * bw_factor + inc = inc_sec[3] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[3] + 1): + blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] + + blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) + + self.num_features = in_chs + self.features = nn.Sequential(blocks) + + # Using 1x1 conv for the FC layer to allow the extra pooling scheme + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + x = self.flatten(x) + return x + + +def _create_dpn(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DPN, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_concat=True, flatten_sequential=True), + **kwargs) + + +@register_model +def dpn68(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn68b(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68b', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn92(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=64, k_r=96, groups=32, + k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), **kwargs) + return _create_dpn('dpn92', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn98(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=96, k_r=160, groups=40, + k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn98', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn131(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=160, groups=40, + k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn131', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn107(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=200, groups=50, + k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128), **kwargs) + return _create_dpn('dpn107', pretrained=pretrained, **model_kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet.py new file mode 100644 index 0000000000..3d50b704cd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet.py @@ -0,0 +1,2211 @@ +""" The EfficientNet Family in PyTorch + +An implementation of EfficienNet that covers variety of related models with efficient architectures: + +* EfficientNet-V2 + - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + +* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) + - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 + - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 + - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 + - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 + +* MixNet (Small, Medium, and Large) + - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 + +* MNasNet B1, A1 (SE), Small + - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 + +* FBNet-C + - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 + +* Single-Path NAS Pixel1 + - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 + +* And likely more... + +The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available +by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing +the models and weights open source! + +Hacked together by / Copyright 2021 Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['EfficientNet', 'EfficientNetFeatures'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mnasnet_050': _cfg(url=''), + 'mnasnet_075': _cfg(url=''), + 'mnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth'), + 'mnasnet_140': _cfg(url=''), + + 'semnasnet_050': _cfg(url=''), + 'semnasnet_075': _cfg(url=''), + 'semnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth'), + 'semnasnet_140': _cfg(url=''), + 'mnasnet_small': _cfg(url=''), + + 'mobilenetv2_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth'), + 'mobilenetv2_110d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth'), + 'mobilenetv2_120d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth'), + 'mobilenetv2_140': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth'), + + 'fbnetc_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', + interpolation='bilinear'), + 'spnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', + interpolation='bilinear'), + + # NOTE experimenting with alternate attention + 'efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth'), + 'efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', + test_input_size=(3, 256, 256), crop_pct=1.0), + 'efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), crop_pct=1.0), + 'efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + 'efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', + input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), crop_pct=1.0), + 'efficientnet_b5': _cfg( + url='', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'efficientnet_b6': _cfg( + url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'efficientnet_b7': _cfg( + url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'efficientnet_b8': _cfg( + url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + 'efficientnet_l2': _cfg( + url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), + + 'efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth'), + 'efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_el': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_es_pruned': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_es_pruned75.pth'), + 'efficientnet_el_pruned': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el_pruned70.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_cc_b0_4e': _cfg(url=''), + 'efficientnet_cc_b0_8e': _cfg(url=''), + 'efficientnet_cc_b1_8e': _cfg(url='', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth'), + 'efficientnet_lite1': _cfg( + url='', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_lite2': _cfg( + url='', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'efficientnet_lite3': _cfg( + url='', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'efficientnet_lite4': _cfg( + url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + + 'efficientnet_b1_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b2_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b3_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'gc_efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'efficientnetv2_rw_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_rw_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + + 'efficientnetv2_s': _cfg( + url='', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_m': _cfg( + url='', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + 'efficientnetv2_l': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'efficientnetv2_xl': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_l2_ns_475': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', + input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), + 'tf_efficientnet_l2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', + input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), + + 'tf_efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), ), + 'tf_efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_el': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'tf_efficientnet_cc_b0_4e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b0_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b1_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'tf_efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), + 'tf_efficientnet_lite4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), + + 'tf_efficientnetv2_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', + input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), + 'tf_efficientnetv2_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', + input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), + 'tf_efficientnetv2_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', + input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), + 'tf_efficientnetv2_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', + input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), + + 'mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth'), + 'mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth'), + 'mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth'), + 'mixnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth'), + 'mixnet_xxl': _cfg(), + + 'tf_mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'), + 'tf_mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'), + 'tf_mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'), +} + + +class EfficientNet(nn.Module): + """ (Generic) EfficientNet + + A flexible and performant PyTorch implementation of efficient network architectures, including: + * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 + * EfficientNet B0-B8, L2 + * EfficientNet-EdgeTPU + * EfficientNet-CondConv + * MixNet S, M, L, XL + * MnasNet A1, B1, and small + * FBNet C + * Single-Path NAS Pixel1 + + """ + + def __init__(self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False, + output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, + se_layer=None, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(EfficientNet, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type) + self.bn2 = norm_layer(self.num_features) + self.act2 = act_layer(inplace=True) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool]) + layers.extend([nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.conv_head(x) + x = self.bn2(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class EfficientNetFeatures(nn.Module): + """ EfficientNet Feature Extractor + + A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, + act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(EfficientNetFeatures, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, + feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_effnet(variant, pretrained=False, **kwargs): + features_only = False + model_cls = EfficientNet + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') + model_cls = EfficientNetFeatures + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-a1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r2_k3_s2_e6_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r4_k3_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r3_k5_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + ['ds_r1_k3_s1_c8'], + ['ir_r1_k3_s2_e3_c16'], + ['ir_r2_k3_s2_e6_c16'], + ['ir_r4_k5_s2_e6_c32_se0.25'], + ['ir_r3_k3_s1_e6_c32_se0.25'], + ['ir_r3_k5_s2_e6_c88_se0.25'], + ['ir_r1_k3_s1_e6_c144'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=8, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v2( + variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): + """ Generate MobileNet-V2 network + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['ds_r1_k3_s1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r3_k3_s2_e6_c32'], + ['ir_r4_k3_s2_e6_c64'], + ['ir_r3_k3_s1_e6_c96'], + ['ir_r3_k3_s2_e6_c160'], + ['ir_r1_k3_s1_e6_c320'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), + num_features=1280 if fix_stem_head else round_chs_fn(1280), + stem_size=32, + fix_stem=fix_stem_head, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu6'), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNet-C + + Paper: https://arxiv.org/abs/1812.03443 + Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py + + NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, + it was used to confirm some building block details + """ + arch_def = [ + ['ir_r1_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], + ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], + ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], + ['ir_r4_k5_s2_e6_c184'], + ['ir_r1_k3_s1_e6_c352'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + num_features=1984, # paper suggests this, but is not 100% clear + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates the Single-Path NAS model from search targeted for Pixel1 phone. + + Paper: https://arxiv.org/abs/1904.02877 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], + # stage 4, 14x14in + ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-EdgeTPU model + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu + """ + + arch_def = [ + # NOTE `fc` is present to override a mismatch between stem channels and in chs not + # present in other models + ['er_r1_k3_s1_e4_c24_fc24_noskip'], + ['er_r2_k3_s2_e8_c32'], + ['er_r4_k3_s2_e8_c48'], + ['ir_r5_k5_s2_e8_c96'], + ['ir_r4_k5_s1_e8_c144'], + ['ir_r2_k5_s2_e8_c192'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_condconv( + variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): + """Creates an EfficientNet-CondConv model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], + ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], + ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], + ] + # NOTE unlike official impl, this one uses `cc` option where x is the base number of experts for each stage and + # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'swish'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet-Lite model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), + 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), + 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), + 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), + 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r2_k5_s2_e6_c40'], + ['ir_r3_k3_s2_e6_c80'], + ['ir_r3_k5_s1_e6_c112'], + ['ir_r4_k5_s2_e6_c192'], + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), + num_features=1280, + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + act_layer=resolve_act_layer(kwargs, 'relu6'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_base( + variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 base model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + arch_def = [ + ['cn_r1_k3_s1_e1_c16_skip'], + ['er_r2_k3_s2_e4_c32'], + ['er_r2_k3_s2_e4_c48'], + ['ir_r3_k3_s2_e4_c96_se0.25'], + ['ir_r5_k3_s1_e6_c112_se0.25'], + ['ir_r8_k3_s2_e6_c192_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_s( + variant, channel_multiplier=1.0, depth_multiplier=1.0, rw=False, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Small model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + + NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, + before ref the impl was released. + """ + arch_def = [ + ['cn_r2_k3_s1_e1_c24_skip'], + ['er_r4_k3_s2_e4_c48'], + ['er_r4_k3_s2_e4_c64'], + ['ir_r6_k3_s2_e4_c128_se0.25'], + ['ir_r9_k3_s1_e6_c160_se0.25'], + ['ir_r15_k3_s2_e6_c256_se0.25'], + ] + num_features = 1280 + if rw: + # my original variant, based on paper figure differs from the official release + arch_def[0] = ['er_r2_k3_s1_e1_c24'] + arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] + num_features = 1792 + + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(num_features), + stem_size=24, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Medium model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r3_k3_s1_e1_c24_skip'], + ['er_r5_k3_s2_e4_c48'], + ['er_r5_k3_s2_e4_c80'], + ['ir_r7_k3_s2_e4_c160_se0.25'], + ['ir_r14_k3_s1_e6_c176_se0.25'], + ['ir_r18_k3_s2_e6_c304_se0.25'], + ['ir_r5_k3_s1_e6_c512_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r7_k3_s2_e4_c64'], + ['er_r7_k3_s2_e4_c96'], + ['ir_r10_k3_s2_e4_c192_se0.25'], + ['ir_r19_k3_s1_e6_c224_se0.25'], + ['ir_r25_k3_s2_e6_c384_se0.25'], + ['ir_r7_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Xtra-Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r8_k3_s2_e4_c64'], + ['er_r8_k3_s2_e4_c96'], + ['ir_r16_k3_s2_e4_c192_se0.25'], + ['ir_r24_k3_s1_e6_c256_se0.25'], + ['ir_r32_k3_s2_e6_c512_se0.25'], + ['ir_r8_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Small model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1536, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Medium-Large model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c24'], # relu + # stage 1, 112x112 in + ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=1536, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mnasnet_050(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.5. """ + model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_075(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.75. """ + model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_100(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_b1(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + return mnasnet_100(pretrained, **kwargs) + + +@register_model +def mnasnet_140(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.4 """ + model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_050(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ + model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_075(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ + model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_100(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_a1(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + return semnasnet_100(pretrained, **kwargs) + + +@register_model +def semnasnet_140(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ + model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_small(pretrained=False, **kwargs): + """ MNASNet Small, depth multiplier of 1.0. """ + model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_100(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.0 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_140(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.4 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_110d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" + model = _gen_mobilenet_v2( + 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_120d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ + model = _gen_mobilenet_v2( + 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetc_100(pretrained=False, **kwargs): + """ FBNet-C """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def spnasnet_100(pretrained=False, **kwargs): + """ Single-Path NAS Pixel1""" + model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2a(pretrained=False, **kwargs): + """ EfficientNet-B2 @ 288x288 w/ 1.0 test crop""" + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b2(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3a(pretrained=False, **kwargs): + """ EfficientNet-B3 @ 320x320 w/ 1.0 test crop-pct """ + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b3(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_l2(pretrained=False, **kwargs): + """ EfficientNet-L2.""" + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. """ + model = _gen_efficientnet_edge( + 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_es_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. """ + model = _gen_efficientnet_edge( + 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. """ + model = _gen_efficientnet_edge( + 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_el_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1_pruned(pretrained=False, **kwargs): + """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + variant = 'efficientnet_b1_pruned' + model = _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2_pruned(pretrained=False, **kwargs): + """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3_pruned(pretrained=False, **kwargs): + """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) + return model + + +@register_model +def gc_efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, + rw=False, se_layer='gc', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small (RW variant). + NOTE: This is my initial (pre official code release) w/ some differences. + See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding + """ + model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium (RW variant). + """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. """ + model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. """ + model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. """ + model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_xl(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. """ + model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ap(pretrained=False, **kwargs): + """ EfficientNet-B0 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ap(pretrained=False, **kwargs): + """ EfficientNet-B1 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ap(pretrained=False, **kwargs): + """ EfficientNet-B2 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ap(pretrained=False, **kwargs): + """ EfficientNet-B3 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ap(pretrained=False, **kwargs): + """ EfficientNet-B4 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ap(pretrained=False, **kwargs): + """ EfficientNet-B5 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ap(pretrained=False, **kwargs): + """ EfficientNet-B6 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ap(pretrained=False, **kwargs): + """ EfficientNet-B7 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8_ap(pretrained=False, **kwargs): + """ EfficientNet-B8 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ns(pretrained=False, **kwargs): + """ EfficientNet-B0 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ns(pretrained=False, **kwargs): + """ EfficientNet-B1 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ns(pretrained=False, **kwargs): + """ EfficientNet-B2 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ns(pretrained=False, **kwargs): + """ EfficientNet-B3 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ns(pretrained=False, **kwargs): + """ EfficientNet-B4 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ns(pretrained=False, **kwargs): + """ EfficientNet-B5 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ns(pretrained=False, **kwargs): + """ EfficientNet-B6 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ns(pretrained=False, **kwargs): + """ EfficientNet-B7 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + + +@register_model +def tf_efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b0(pretrained=False, **kwargs): + """ EfficientNet-V2-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b1(pretrained=False, **kwargs): + """ EfficientNet-V2-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b2(pretrained=False, **kwargs): + """ EfficientNet-V2-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b3(pretrained=False, **kwargs): + """ EfficientNet-V2-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. + """ + model = _gen_mixnet_s( + 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. + """ + model = _gen_mixnet_m( + 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. + """ + model = _gen_mixnet_m( + 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xl(pretrained=False, **kwargs): + """Creates a MixNet Extra-Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xxl(pretrained=False, **kwargs): + """Creates a MixNet Double Extra Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_s( + 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet_blocks.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet_blocks.py new file mode 100644 index 0000000000..b1fec449c4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet_blocks.py @@ -0,0 +1,323 @@ +""" EfficientNet, MobileNetV3, etc Blocks + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from .layers import create_conv2d, drop_path, make_divisible, create_act_layer +from .layers.activations import sigmoid + +__all__ = [ + 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual'] + + +class SqueezeExcite(nn.Module): + """ Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family + + Args: + in_chs (int): input channels to layer + rd_ratio (float): ratio of squeeze reduction + act_layer (nn.Module): activation layer of containing block + gate_layer (Callable): attention gate function + force_act_layer (nn.Module): override block's activation fn if this is set/bound + rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs + """ + + def __init__( + self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU, + gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None): + super(SqueezeExcite, self).__init__() + if rd_channels is None: + rd_round_fn = rd_round_fn or round + rd_channels = rd_round_fn(in_chs * rd_ratio) + act_layer = force_act_layer or act_layer + self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True) + self.act1 = create_act_layer(act_layer, inplace=True) + self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + return x * self.gate(x_se) + + +class ConvBnAct(nn.Module): + """ Conv + Norm Layer + Activation w/ optional skip connection + """ + def __init__( + self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='', + skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.): + super(ConvBnAct, self).__init__() + self.has_residual = skip and stride == 1 and in_chs == out_chs + self.drop_path_rate = drop_path_rate + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(out_chs) + self.act1 = act_layer(inplace=True) + + def feature_info(self, location): + if location == 'expansion': # output of conv after act, same as block coutput + info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv.out_channels) + return info + + def forward(self, x): + shortcut = x + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + se_layer=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.drop_path_rate = drop_path_rate + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity() + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs) + self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PW + info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + return info + + def forward(self, x): + shortcut = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE + + Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often + referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in + * MNasNet - https://arxiv.org/abs/1807.11626 + * EfficientNet - https://arxiv.org/abs/1905.11946 + * MobileNet-V3 - https://arxiv.org/abs/1905.02244 + """ + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs) + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + shortcut = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + + return x + + +class CondConvResidual(InvertedResidual): + """ Inverted residual block w/ CondConv routing""" + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.): + + self.num_experts = num_experts + conv_kwargs = dict(num_experts=self.num_experts) + + super(CondConvResidual, self).__init__( + in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type, + act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs, + drop_path_rate=drop_path_rate) + + self.routing_fn = nn.Linear(in_chs, self.num_experts) + + def forward(self, x): + shortcut = x + + # CondConv routing + pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) + routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) + + # Point-wise expansion + x = self.conv_pw(x, routing_weights) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x, routing_weights) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x, routing_weights) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class EdgeResidual(nn.Module): + """ Residual block with expansion convolution followed by pointwise-linear w/ stride + + Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML` + - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + + This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers + * MobileDet - https://arxiv.org/abs/2004.14525 + * EfficientNet-X - https://arxiv.org/abs/2102.05610 + * EfficientNet-V2 - https://arxiv.org/abs/2104.00298 + """ + + def __init__( + self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, pad_type='', + force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.): + super(EdgeResidual, self).__init__() + if force_in_chs > 0: + mid_chs = make_divisible(force_in_chs * exp_ratio) + else: + mid_chs = make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Expansion convolution + self.conv_exp = create_conv2d( + in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(mid_chs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs) + + def feature_info(self, location): + if location == 'expansion': # after SE, before PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + shortcut = x + + # Expansion convolution + x = self.conv_exp(x) + x = self.bn1(x) + x = self.act1(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn2(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet_builder.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet_builder.py new file mode 100644 index 0000000000..a23e8273d9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/efficientnet_builder.py @@ -0,0 +1,463 @@ +""" EfficientNet, MobileNetV3, etc Builder + +Assembles EfficieNet and related network feature blocks from string definitions. +Handles stride, dilation calculations, and selects feature extraction points. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +import math +import re +from copy import deepcopy +from functools import partial + +import torch.nn as nn + +from .efficientnet_blocks import * +from .layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible + +__all__ = ["EfficientNetBuilder", "decode_arch_def", "efficientnet_init_weights", + 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] + +_logger = logging.getLogger(__name__) + + +_DEBUG_BUILDER = False + +# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per +# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) +# NOTE: momentum varies btw .99 and .9997 depending on source +# .99 in official TF TPU impl +# .9997 (/w .999 in search space) for paper +BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 +BN_EPS_TF_DEFAULT = 1e-3 +_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) + + +def get_bn_args_tf(): + return _BN_ARGS_TF.copy() + + +def resolve_bn_args(kwargs): + bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + + +def resolve_act_layer(kwargs, default='relu'): + return get_act_layer(kwargs.pop('act_layer', default)) + + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) + + +def _log_info_if(msg, condition): + if condition: + _logger.info(msg) + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str): + """ Decode block definition string + + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + skip = None + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + skip = False # force no skip connection + elif op == 'skip': + skip = True # force a skip connection + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = get_act_layer('relu') + elif v == 'r6': + value = get_act_layer('relu6') + elif v == 'hs': + value = get_act_layer('hard_swish') + elif v == 'sw': + value = get_act_layer('swish') # aka SiLU + elif v == 'mi': + value = get_act_layer('mish') + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + noskip=skip is False, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or skip is False, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + force_in_chs=force_in_chs, + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + noskip=skip is False, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + skip=skip is True, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): + arch_args = [] + if isinstance(depth_multiplier, tuple): + assert len(depth_multiplier) == len(arch_def) + else: + depth_multiplier = (depth_multiplier,) * len(arch_def) + for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): + arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) + else: + arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) + return arch_args + + +class EfficientNetBuilder: + """ Build Trunk Blocks + + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + + """ + def __init__(self, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=False, + act_layer=None, norm_layer=None, se_layer=None, drop_path_rate=0., feature_location=''): + self.output_stride = output_stride + self.pad_type = pad_type + self.round_chs_fn = round_chs_fn + self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs + self.act_layer = act_layer + self.norm_layer = norm_layer + self.se_layer = get_attn(se_layer) + try: + self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg + self.se_has_ratio = True + except TypeError: + self.se_has_ratio = False + self.drop_path_rate = drop_path_rate + if feature_location == 'depthwise': + # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense + _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") + feature_location = 'expansion' + self.feature_location = feature_location + assert feature_location in ('bottleneck', 'expansion', '') + self.verbose = _DEBUG_BUILDER + + # state updated during build, consumed by model + self.in_chs = None + self.features = [] + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self.round_chs_fn(ba['out_chs']) + if 'force_in_chs' in ba and ba['force_in_chs']: + # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl + ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + ba['norm_layer'] = self.norm_layer + ba['drop_path_rate'] = drop_path_rate + if bt != 'cn': + se_ratio = ba.pop('se_ratio') + if se_ratio and self.se_layer is not None: + if not self.se_from_exp: + # adjust se_ratio by expansion ratio if calculating se channels from block input + se_ratio /= ba.get('exp_ratio', 1.0) + if self.se_has_ratio: + ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) + else: + ba['se_layer'] = self.se_layer + + if bt == 'ir': + _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = EdgeResidual(**ba) + elif bt == 'cn': + _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + stages = [] + if model_block_args[0][0]['stride'] > 1: + # if the first block starts with a stride, we need to extract first level feat from stem + feature_info = dict( + module='act1', num_chs=in_chs, stage=0, reduction=current_stride, + hook_type='forward' if self.feature_location != 'bottleneck' else '') + self.features.append(feature_info) + + # outer list of block_args defines the stacks + for stack_idx, stack_args in enumerate(model_block_args): + last_stack = stack_idx + 1 == len(model_block_args) + _log_info_if('Stack: {}'.format(stack_idx), self.verbose) + assert isinstance(stack_args, list) + + blocks = [] + # each stack (stage of blocks) contains a list of block arguments + for block_idx, block_args in enumerate(stack_args): + last_block = block_idx + 1 == len(stack_args) + _log_info_if(' Block: {}'.format(block_idx), self.verbose) + + assert block_args['stride'] in (1, 2) + if block_idx >= 1: # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + extract_features = False + if last_block: + next_stack_idx = stack_idx + 1 + extract_features = next_stack_idx >= len(model_block_args) or \ + model_block_args[next_stack_idx][0]['stride'] > 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride), self.verbose) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_info = dict( + stage=stack_idx + 1, reduction=current_stride, **block.feature_info(self.feature_location)) + module_name = f'blocks.{stack_idx}.{block_idx}' + leaf_name = feature_info.get('module', '') + feature_info['module'] = '.'.join([module_name, leaf_name]) if leaf_name else module_name + self.features.append(feature_info) + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + + +def _init_weight_goog(m, n='', fix_group_fanout=True): + """ Weight initialization as per Tensorflow official implementations. + + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + nn.init.uniform_(m.weight, -init_range, init_range) + nn.init.zeros_(m.bias) + + +def efficientnet_init_weights(model: nn.Module, init_fn=None): + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n) + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/factory.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/factory.py new file mode 100644 index 0000000000..d040a9ff62 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/factory.py @@ -0,0 +1,86 @@ +from .registry import is_model, is_model_in_modules, model_entrypoint +from .helpers import load_checkpoint +from .layers import set_layer_config +from .hub import load_model_config_from_hf + + +def split_model_name(model_name): + model_split = model_name.split(':', 1) + if len(model_split) == 1: + return '', model_split[0] + else: + source_name, model_name = model_split + assert source_name in ('timm', 'hf_hub') + return source_name, model_name + + +def safe_model_name(model_name, remove_source=True): + def make_safe(name): + return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') + if remove_source: + model_name = split_model_name(model_name)[-1] + return make_safe(model_name) + + +def create_model( + model_name, + pretrained=False, + checkpoint_path='', + scriptable=None, + exportable=None, + no_jit=None, + **kwargs): + """Create a model + + Args: + model_name (str): name of model to instantiate + pretrained (bool): load pretrained ImageNet-1k weights if true + checkpoint_path (str): path of checkpoint to load after model is initialized + scriptable (bool): set layer config so that model is jit scriptable (not working for all models yet) + exportable (bool): set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet) + no_jit (bool): set layer config so that model doesn't utilize jit scripted layers (so far activations only) + + Keyword Args: + drop_rate (float): dropout rate for training (default: 0.0) + global_pool (str): global pool type (default: 'avg') + **: other kwargs are model specific + """ + source_name, model_name = split_model_name(model_name) + + # Only EfficientNet and MobileNetV3 models have support for batchnorm params or drop_connect_rate passed as args + is_efficientnet = is_model_in_modules(model_name, ['efficientnet', 'mobilenetv3']) + if not is_efficientnet: + kwargs.pop('bn_tf', None) + kwargs.pop('bn_momentum', None) + kwargs.pop('bn_eps', None) + + # handle backwards compat with drop_connect -> drop_path change + drop_connect_rate = kwargs.pop('drop_connect_rate', None) + if drop_connect_rate is not None and kwargs.get('drop_path_rate', None) is None: + print("WARNING: 'drop_connect' as an argument is deprecated, please use 'drop_path'." + " Setting drop_path to %f." % drop_connect_rate) + kwargs['drop_path_rate'] = drop_connect_rate + + # Parameters that aren't supported by all models or are intended to only override model defaults if set + # should default to None in command line args/cfg. Remove them if they are present and not set so that + # non-supporting models don't break and default args remain in effect. + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + if source_name == 'hf_hub': + # For model names specified in the form `hf_hub:path/architecture_name#revision`, + # load model weights + default_cfg from Hugging Face hub. + hf_default_cfg, model_name = load_model_config_from_hf(model_name) + kwargs['external_default_cfg'] = hf_default_cfg # FIXME revamp default_cfg interface someday + + if is_model(model_name): + create_fn = model_entrypoint(model_name) + else: + raise RuntimeError('Unknown model (%s)' % model_name) + + with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): + model = create_fn(pretrained=pretrained, **kwargs) + + if checkpoint_path: + load_checkpoint(model, checkpoint_path) + + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/features.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/features.py new file mode 100644 index 0000000000..b1d6890f3e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/features.py @@ -0,0 +1,284 @@ +""" PyTorch Feature Extraction Helpers + +A collection of classes, functions, modules to help extract features from models +and provide a common interface for describing them. + +The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter +https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict, defaultdict +from copy import deepcopy +from functools import partial +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn + + +class FeatureInfo: + + def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]): + prev_reduction = 1 + for fi in feature_info: + # sanity check the mandatory fields, there may be additional fields depending on the model + assert 'num_chs' in fi and fi['num_chs'] > 0 + assert 'reduction' in fi and fi['reduction'] >= prev_reduction + prev_reduction = fi['reduction'] + assert 'module' in fi + self.out_indices = out_indices + self.info = feature_info + + def from_other(self, out_indices: Tuple[int]): + return FeatureInfo(deepcopy(self.info), out_indices) + + def get(self, key, idx=None): + """ Get value by key at specified index (indices) + if idx == None, returns value for key at each output index + if idx is an integer, return value for that feature module index (ignoring output indices) + if idx is a list/tupple, return value for each module index (ignoring output indices) + """ + if idx is None: + return [self.info[i][key] for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i][key] for i in idx] + else: + return self.info[idx][key] + + def get_dicts(self, keys=None, idx=None): + """ return info dicts for specified keys (or all if None) at specified indices (or out_indices if None) + """ + if idx is None: + if keys is None: + return [self.info[i] for i in self.out_indices] + else: + return [{k: self.info[i][k] for k in keys} for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx] + else: + return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys} + + def channels(self, idx=None): + """ feature channels accessor + """ + return self.get('num_chs', idx) + + def reduction(self, idx=None): + """ feature reduction (output stride) accessor + """ + return self.get('reduction', idx) + + def module_name(self, idx=None): + """ feature module name accessor + """ + return self.get('module', idx) + + def __getitem__(self, item): + return self.info[item] + + def __len__(self): + return len(self.info) + + +class FeatureHooks: + """ Feature Hook Helper + + This module helps with the setup and extraction of hooks for extracting features from + internal nodes in a model by node name. This works quite well in eager Python but needs + redesign for torcscript. + """ + + def __init__(self, hooks, named_modules, out_map=None, default_hook_type='forward'): + # setup feature hooks + modules = {k: v for k, v in named_modules} + for i, h in enumerate(hooks): + hook_name = h['module'] + m = modules[hook_name] + hook_id = out_map[i] if out_map else hook_name + hook_fn = partial(self._collect_output_hook, hook_id) + hook_type = h['hook_type'] if 'hook_type' in h else default_hook_type + if hook_type == 'forward_pre': + m.register_forward_pre_hook(hook_fn) + elif hook_type == 'forward': + m.register_forward_hook(hook_fn) + else: + assert False, "Unsupported hook type" + self._feature_outputs = defaultdict(OrderedDict) + + def _collect_output_hook(self, hook_id, *args): + x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre + if isinstance(x, tuple): + x = x[0] # unwrap input tuple + self._feature_outputs[x.device][hook_id] = x + + def get_output(self, device) -> Dict[str, torch.tensor]: + output = self._feature_outputs[device] + self._feature_outputs[device] = OrderedDict() # clear after reading + return output + + +def _module_list(module, flatten_sequential=False): + # a yield/iter would be better for this but wouldn't be compatible with torchscript + ml = [] + for name, module in module.named_children(): + if flatten_sequential and isinstance(module, nn.Sequential): + # first level of Sequential containers is flattened into containing model + for child_name, child_module in module.named_children(): + combined = [name, child_name] + ml.append(('_'.join(combined), '.'.join(combined), child_module)) + else: + ml.append((name, name, module)) + return ml + + +def _get_feature_info(net, out_indices): + feature_info = getattr(net, 'feature_info') + if isinstance(feature_info, FeatureInfo): + return feature_info.from_other(out_indices) + elif isinstance(feature_info, (list, tuple)): + return FeatureInfo(net.feature_info, out_indices) + else: + assert False, "Provided feature_info is not valid" + + +def _get_return_layers(feature_info, out_map): + module_names = feature_info.module_name() + return_layers = {} + for i, name in enumerate(module_names): + return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i] + return return_layers + + +class FeatureDictNet(nn.ModuleDict): + """ Feature extractor with OrderedDict return + + Wrap a model and extract features as specified by the out indices, the network is + partially re-built from contained modules. + + There is a strong assumption that the modules have been registered into the model in the same + order as they are used. There should be no reuse of the same nn.Module more than once, including + trivial modules like `self.relu = nn.ReLU`. + + Only submodules that are directly assigned to the model class (`model.feature1`) or at most + one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured. + All Sequential containers that are directly assigned to the original model will have their + modules assigned to this module with the name `model.features.1` being changed to `model.features_1` + + Arguments: + model (nn.Module): model from which we will extract the features + out_indices (tuple[int]): model output indices to extract features for + out_map (sequence): list or tuple specifying desired return id for each out index, + otherwise str(index) is used + feature_concat (bool): whether to concatenate intermediate features that are lists or tuples + vs select element [0] + flatten_sequential (bool): whether to flatten sequential modules assigned to model + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureDictNet, self).__init__() + self.feature_info = _get_feature_info(model, out_indices) + self.concat = feature_concat + self.return_layers = {} + return_layers = _get_return_layers(self.feature_info, out_map) + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = set(return_layers.keys()) + layers = OrderedDict() + for new_name, old_name, module in modules: + layers[new_name] = module + if old_name in remaining: + # return id has to be consistently str type for torchscript + self.return_layers[new_name] = str(return_layers[old_name]) + remaining.remove(old_name) + if not remaining: + break + assert not remaining and len(self.return_layers) == len(return_layers), \ + f'Return layers ({remaining}) are not present in model' + self.update(layers) + + def _collect(self, x) -> (Dict[str, torch.Tensor]): + out = OrderedDict() + for name, module in self.items(): + x = module(x) + if name in self.return_layers: + out_id = self.return_layers[name] + if isinstance(x, (tuple, list)): + # If model tap is a tuple or list, concat or select first element + # FIXME this may need to be more generic / flexible for some nets + out[out_id] = torch.cat(x, 1) if self.concat else x[0] + else: + out[out_id] = x + return out + + def forward(self, x) -> Dict[str, torch.Tensor]: + return self._collect(x) + + +class FeatureListNet(FeatureDictNet): + """ Feature extractor with list return + + See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints. + In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool. + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureListNet, self).__init__( + model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat, + flatten_sequential=flatten_sequential) + + def forward(self, x) -> (List[torch.Tensor]): + return list(self._collect(x).values()) + + +class FeatureHookNet(nn.ModuleDict): + """ FeatureHookNet + + Wrap a model and extract features specified by the out indices using forward/forward-pre hooks. + + If `no_rewrite` is True, features are extracted via hooks without modifying the underlying + network in any way. + + If `no_rewrite` is False, the model will be re-written as in the + FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one. + + FIXME this does not currently work with Torchscript, see FeatureHooks class + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False, + feature_concat=False, flatten_sequential=False, default_hook_type='forward'): + super(FeatureHookNet, self).__init__() + assert not torch.jit.is_scripting() + self.feature_info = _get_feature_info(model, out_indices) + self.out_as_dict = out_as_dict + layers = OrderedDict() + hooks = [] + if no_rewrite: + assert not flatten_sequential + if hasattr(model, 'reset_classifier'): # make sure classifier is removed? + model.reset_classifier(0) + layers['body'] = model + hooks.extend(self.feature_info.get_dicts()) + else: + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = {f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type + for f in self.feature_info.get_dicts()} + for new_name, old_name, module in modules: + layers[new_name] = module + for fn, fm in module.named_modules(prefix=old_name): + if fn in remaining: + hooks.append(dict(module=fn, hook_type=remaining[fn])) + del remaining[fn] + if not remaining: + break + assert not remaining, f'Return layers ({remaining}) are not present in model' + self.update(layers) + self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map) + + def forward(self, x): + for name, module in self.items(): + x = module(x) + out = self.hooks.get_output(x.device) + return out if self.out_as_dict else list(out.values()) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/ghostnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/ghostnet.py new file mode 100644 index 0000000000..3b6f90a42f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/ghostnet.py @@ -0,0 +1,276 @@ +""" +An implementation of GhostNet Model as defined in: +GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907 +The train script of the model is similar to that of MobileNetV3 +Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import SelectAdaptivePool2d, Linear, make_divisible +from .efficientnet_blocks import SqueezeExcite, ConvBnAct +from .helpers import build_model_with_cfg +from .registry import register_model + + +__all__ = ['GhostNet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'ghostnet_050': _cfg(url=''), + 'ghostnet_100': _cfg( + url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'), + 'ghostnet_130': _cfg(url=''), +} + + +_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) + + +class GhostModule(nn.Module): + def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True): + super(GhostModule, self).__init__() + self.oup = oup + init_channels = math.ceil(oup / ratio) + new_channels = init_channels * (ratio - 1) + + self.primary_conv = nn.Sequential( + nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False), + nn.BatchNorm2d(init_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + self.cheap_operation = nn.Sequential( + nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False), + nn.BatchNorm2d(new_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + def forward(self, x): + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + out = torch.cat([x1, x2], dim=1) + return out[:, :self.oup, :, :] + + +class GhostBottleneck(nn.Module): + """ Ghost bottleneck w/ optional SE""" + + def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3, + stride=1, act_layer=nn.ReLU, se_ratio=0.): + super(GhostBottleneck, self).__init__() + has_se = se_ratio is not None and se_ratio > 0. + self.stride = stride + + # Point-wise expansion + self.ghost1 = GhostModule(in_chs, mid_chs, relu=True) + + # Depth-wise convolution + if self.stride > 1: + self.conv_dw = nn.Conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) + self.bn_dw = nn.BatchNorm2d(mid_chs) + else: + self.conv_dw = None + self.bn_dw = None + + # Squeeze-and-excitation + self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None + + # Point-wise linear projection + self.ghost2 = GhostModule(mid_chs, out_chs, relu=False) + + # shortcut + if in_chs == out_chs and self.stride == 1: + self.shortcut = nn.Sequential() + else: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), + nn.BatchNorm2d(in_chs), + nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(out_chs), + ) + + def forward(self, x): + shortcut = x + + # 1st ghost bottleneck + x = self.ghost1(x) + + # Depth-wise convolution + if self.conv_dw is not None: + x = self.conv_dw(x) + x = self.bn_dw(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # 2nd ghost bottleneck + x = self.ghost2(x) + + x += self.shortcut(shortcut) + return x + + +class GhostNet(nn.Module): + def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2, in_chans=3, output_stride=32, global_pool='avg'): + super(GhostNet, self).__init__() + # setting of inverted residual blocks + assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' + self.cfgs = cfgs + self.num_classes = num_classes + self.dropout = dropout + self.feature_info = [] + + # building first layer + stem_chs = make_divisible(16 * width, 4) + self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) + self.bn1 = nn.BatchNorm2d(stem_chs) + self.act1 = nn.ReLU(inplace=True) + prev_chs = stem_chs + + # building inverted residual blocks + stages = nn.ModuleList([]) + block = GhostBottleneck + stage_idx = 0 + net_stride = 2 + for cfg in self.cfgs: + layers = [] + s = 1 + for k, exp_size, c, se_ratio, s in cfg: + out_chs = make_divisible(c * width, 4) + mid_chs = make_divisible(exp_size * width, 4) + layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio)) + prev_chs = out_chs + if s > 1: + net_stride *= 2 + self.feature_info.append(dict( + num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) + stages.append(nn.Sequential(*layers)) + stage_idx += 1 + + out_chs = make_divisible(exp_size * width, 4) + stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) + self.pool_dim = prev_chs = out_chs + + self.blocks = nn.Sequential(*stages) + + # building last several layers + self.num_features = out_chs = 1280 + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) + self.act2 = nn.ReLU(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.pool_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.flatten(x) + if self.dropout > 0.: + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.classifier(x) + return x + + +def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): + """ + Constructs a GhostNet model + """ + cfgs = [ + # k, t, c, SE, s + # stage1 + [[3, 16, 16, 0, 1]], + # stage2 + [[3, 48, 24, 0, 2]], + [[3, 72, 24, 0, 1]], + # stage3 + [[5, 72, 40, 0.25, 2]], + [[5, 120, 40, 0.25, 1]], + # stage4 + [[3, 240, 80, 0, 2]], + [[3, 200, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 480, 112, 0.25, 1], + [3, 672, 112, 0.25, 1] + ], + # stage5 + [[5, 672, 160, 0.25, 2]], + [[5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1], + [5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1] + ] + ] + model_kwargs = dict( + cfgs=cfgs, + width=width, + **kwargs, + ) + return build_model_with_cfg( + GhostNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **model_kwargs) + + +@register_model +def ghostnet_050(pretrained=False, **kwargs): + """ GhostNet-0.5x """ + model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_100(pretrained=False, **kwargs): + """ GhostNet-1.0x """ + model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_130(pretrained=False, **kwargs): + """ GhostNet-1.3x """ + model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/gluon_resnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/gluon_resnet.py new file mode 100644 index 0000000000..027a10b534 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/gluon_resnet.py @@ -0,0 +1,248 @@ +"""Pytorch impl of MxNet Gluon ResNet/(SE)ResNeXt variants +This file evolved from https://github.com/pytorch/vision 'resnet.py' with (SE)-ResNeXt additions +and ports of Gluon variations (https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnet.py) +by Ross Wightman +""" + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SEModule +from .registry import register_model +from .resnet import ResNet, Bottleneck, BasicBlock + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'gluon_resnet18_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), + 'gluon_resnet34_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), + 'gluon_resnet50_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), + 'gluon_resnet101_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), + 'gluon_resnet152_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), + 'gluon_resnet50_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', + first_conv='conv1.0'), + 'gluon_resnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), + 'gluon_resnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), + 'gluon_resnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), + 'gluon_seresnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), + 'gluon_seresnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), + 'gluon_seresnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), + 'gluon_senet154': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', + first_conv='conv1.0'), +} + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def gluon_resnet18_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('gluon_resnet18_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet34_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet34_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet50_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('gluon_resnet101_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('gluon_resnet152_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet50_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet101_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet152_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnet101_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1s', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt50-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_64x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-64x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_senet154(pretrained=False, **kwargs): + """Constructs an SENet-154 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_senet154', pretrained, **model_args) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/gluon_xception.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/gluon_xception.py new file mode 100644 index 0000000000..fbd668a585 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/gluon_xception.py @@ -0,0 +1,246 @@ +"""Pytorch impl of Gluon Xception +This is a port of the Gluon Xception code and weights, itself ported from a PyTorch DeepLab impl. + +Gluon model: (https://gluon-cv.mxnet.io/_modules/gluoncv/model_zoo/xception.html) +Original PyTorch DeepLab impl: https://github.com/jfzhang95/pytorch-deeplab-xception + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier, get_padding +from .registry import register_model + +__all__ = ['Xception65'] + +default_cfgs = { + 'gluon_xception65': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth', + 'input_size': (3, 299, 299), + 'crop_pct': 0.903, + 'pool_size': (10, 10), + 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + }, +} + +""" PADDING NOTES +The original PyTorch and Gluon impl of these models dutifully reproduced the +aligned padding added to Tensorflow models for Deeplab. This padding was compensating +for Tensorflow 'SAME' padding. PyTorch symmetric padding behaves the way we'd want it to. +""" + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + padding = get_padding(kernel_size, stride, dilation) + self.conv_dw = nn.Conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=inplanes, bias=bias) + self.bn = norm_layer(num_features=inplanes) + # pointwise convolution + self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias) + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn(x) + x = self.conv_pw(x) + return x + + +class Block(nn.Module): + def __init__(self, inplanes, planes, stride=1, dilation=1, start_with_relu=True, norm_layer=None): + super(Block, self).__init__() + if isinstance(planes, (list, tuple)): + assert len(planes) == 3 + else: + planes = (planes,) * 3 + outplanes = planes[-1] + + if outplanes != inplanes or stride != 1: + self.skip = nn.Sequential() + self.skip.add_module('conv1', nn.Conv2d( + inplanes, outplanes, 1, stride=stride, bias=False)), + self.skip.add_module('bn1', norm_layer(num_features=outplanes)) + else: + self.skip = None + + rep = OrderedDict() + for i in range(3): + rep['act%d' % (i + 1)] = nn.ReLU(inplace=True) + rep['conv%d' % (i + 1)] = SeparableConv2d( + inplanes, planes[i], 3, stride=stride if i == 2 else 1, dilation=dilation, norm_layer=norm_layer) + rep['bn%d' % (i + 1)] = norm_layer(planes[i]) + inplanes = planes[i] + + if not start_with_relu: + del rep['act1'] + else: + rep['act1'] = nn.ReLU(inplace=False) + self.rep = nn.Sequential(rep) + + def forward(self, x): + skip = x + if self.skip is not None: + skip = self.skip(skip) + x = self.rep(x) + skip + return x + + +class Xception65(nn.Module): + """Modified Aligned Xception. + + NOTE: only the 65 layer version is included here, the 71 layer variant + was not correct and had no pretrained weights + """ + + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d, + drop_rate=0., global_pool='avg'): + super(Xception65, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_dilation = 1 + exit_dilation = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_dilation = 1 + exit_dilation = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_dilation = 2 + exit_dilation = (2, 4) + else: + raise NotImplementedError + + # Entry flow + self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = norm_layer(num_features=32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = norm_layer(num_features=64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block1_act = nn.ReLU(inplace=True) + self.block2 = Block(128, 256, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block3 = Block(256, 728, stride=entry_block3_stride, norm_layer=norm_layer) + + # Middle flow + self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block( + 728, 728, stride=1, dilation=middle_dilation, norm_layer=norm_layer)) for i in range(4, 20)])) + + # Exit flow + self.block20 = Block( + 728, (728, 1024, 1024), stride=exit_block20_stride, dilation=exit_dilation[0], norm_layer=norm_layer) + self.block20_act = nn.ReLU(inplace=True) + + self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn3 = norm_layer(num_features=1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn4 = norm_layer(num_features=1536) + self.act4 = nn.ReLU(inplace=True) + + self.num_features = 2048 + self.conv5 = SeparableConv2d( + 1536, self.num_features, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn5 = norm_layer(num_features=self.num_features) + self.act5 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block1_act'), + dict(num_chs=256, reduction=8, module='block3.rep.act1'), + dict(num_chs=728, reduction=16, module='block20.rep.act1'), + dict(num_chs=2048, reduction=32, module='act5'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block1_act(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.mid(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.block20_act(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.act5(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _create_gluon_xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception65, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def gluon_xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + return _create_gluon_xception('gluon_xception65', pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/hardcorenas.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/hardcorenas.py new file mode 100644 index 0000000000..9988a04445 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/hardcorenas.py @@ -0,0 +1,152 @@ +from functools import partial + +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import get_act_fn +from .mobilenetv3 import MobileNetV3, MobileNetV3Features +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hardcorenas_a': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_A_Green_38ms_75.9_23474aeb.pth'), + 'hardcorenas_b': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_B_Green_40ms_76.5_1f882d1e.pth'), + 'hardcorenas_c': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_C_Green_44ms_77.1_d4148c9e.pth'), + 'hardcorenas_d': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_D_Green_50ms_77.4_23e3cdde.pth'), + 'hardcorenas_e': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_E_Green_55ms_77.9_90f20e8a.pth'), + 'hardcorenas_f': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_F_Green_60ms_78.1_2855edf1.pth'), +} + + +def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): + """Creates a hardcorenas model + + Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS + Paper: https://arxiv.org/abs/2102.11646 + + """ + num_features = 1280 + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=32, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=se_layer, + **kwargs, + ) + + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if model_kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hardcorenas_a(pretrained=False, **kwargs): + """ hardcorenas_A """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_b(pretrained=False, **kwargs): + """ hardcorenas_B """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], + ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_c(pretrained=False, **kwargs): + """ hardcorenas_C """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', + 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_d(pretrained=False, **kwargs): + """ hardcorenas_D """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], + ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_e(pretrained=False, **kwargs): + """ hardcorenas_E """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', + 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_f(pretrained=False, **kwargs): + """ hardcorenas_F """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k3_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/helpers.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/helpers.py new file mode 100644 index 0000000000..bd97cf2039 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/helpers.py @@ -0,0 +1,515 @@ +""" Model creation / weight loading / state_dict helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import os +import math +from collections import OrderedDict +from copy import deepcopy +from typing import Any, Callable, Optional, Tuple + +import torch +import torch.nn as nn + + +from .features import FeatureListNet, FeatureDictNet, FeatureHookNet +from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf, load_state_dict_from_url +from .layers import Conv2dSame, Linear + + +_logger = logging.getLogger(__name__) + + +def load_state_dict(checkpoint_path, use_ema=False): + if checkpoint_path and os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + state_dict_key = '' + if isinstance(checkpoint, dict): + if use_ema and checkpoint.get('state_dict_ema', None) is not None: + state_dict_key = 'state_dict_ema' + elif use_ema and checkpoint.get('model_ema', None) is not None: + state_dict_key = 'model_ema' + elif 'state_dict' in checkpoint: + state_dict_key = 'state_dict' + elif 'model' in checkpoint: + state_dict_key = 'model' + if state_dict_key: + state_dict = checkpoint[state_dict_key] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + # strip `module.` prefix + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + state_dict = new_state_dict + else: + state_dict = checkpoint + _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) + return state_dict + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True): + if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): + # numpy checkpoint, try to load via model specific load_pretrained fn + if hasattr(model, 'load_pretrained'): + model.load_pretrained(checkpoint_path) + else: + raise NotImplementedError('Model cannot load numpy checkpoint') + return + state_dict = load_state_dict(checkpoint_path, use_ema) + model.load_state_dict(state_dict, strict=strict) + + +def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True): + resume_epoch = None + if os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + if log_info: + _logger.info('Restoring model state from checkpoint...') + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict'].items(): + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + + if optimizer is not None and 'optimizer' in checkpoint: + if log_info: + _logger.info('Restoring optimizer state from checkpoint...') + optimizer.load_state_dict(checkpoint['optimizer']) + + if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: + if log_info: + _logger.info('Restoring AMP loss scaler state from checkpoint...') + loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) + + if 'epoch' in checkpoint: + resume_epoch = checkpoint['epoch'] + if 'version' in checkpoint and checkpoint['version'] > 1: + resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save + + if log_info: + _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) + else: + model.load_state_dict(checkpoint) + if log_info: + _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) + return resume_epoch + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_custom_pretrained(model, default_cfg=None, load_fn=None, progress=False, check_hash=False): + r"""Loads a custom (read non .pth) weight file + + Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls + a passed in custom load fun, or the `load_pretrained` model member fn. + + If the object is already present in `model_dir`, it's deserialized and returned. + The default value of `model_dir` is ``/checkpoints`` where + `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. + + Args: + model: The instantiated model to load weights into + default_cfg (dict): Default pretrained model cfg + load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named + 'laod_pretrained' on the model will be called if it exists + progress (bool, optional): whether or not to display a progress bar to stderr. Default: False + check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention + ``filename-.ext`` where ```` is the first eight or more + digits of the SHA256 hash of the contents of the file. The hash is used to + ensure unique names and to verify the contents of the file. Default: False + """ + default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {} + pretrained_url = default_cfg.get('url', None) + if not pretrained_url: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + cached_file = download_cached_file(default_cfg['url'], check_hash=check_hash, progress=progress) + + if load_fn is not None: + load_fn(model, cached_file) + elif hasattr(model, 'load_pretrained'): + model.load_pretrained(cached_file) + else: + _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") + + +def adapt_input_conv(in_chans, conv_weight): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + conv_weight = conv_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + # NOTE this strategy should be better than random init, but there could be other combinations of + # the original RGB input layer weights that'd work better for specific cases. + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + conv_weight = conv_weight.to(conv_type) + return conv_weight + + +def load_pretrained(model, default_cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False): + """ Load pretrained checkpoint + + Args: + model (nn.Module) : PyTorch model module + default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset + num_classes (int): num_classes for model + in_chans (int): in_chans for model + filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args) + strict (bool): strict load of checkpoint + progress (bool): enable progress bar for weight download + + """ + default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {} + pretrained_url = default_cfg.get('url', None) + hf_hub_id = default_cfg.get('hf_hub', None) + if not pretrained_url and not hf_hub_id: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + if hf_hub_id and has_hf_hub(necessary=not pretrained_url): + _logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})') + state_dict = load_state_dict_from_hf(hf_hub_id) + else: + _logger.info(f'Loading pretrained weights from url ({pretrained_url})') + state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu') + if filter_fn is not None: + # for backwards compat with filter fn that take one arg, try one first, the two + try: + state_dict = filter_fn(state_dict) + except TypeError: + state_dict = filter_fn(state_dict, model) + + input_convs = default_cfg.get('first_conv', None) + if input_convs is not None and in_chans != 3: + if isinstance(input_convs, str): + input_convs = (input_convs,) + for input_conv_name in input_convs: + weight_name = input_conv_name + '.weight' + try: + state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) + _logger.info( + f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') + except NotImplementedError as e: + del state_dict[weight_name] + strict = False + _logger.warning( + f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') + + classifiers = default_cfg.get('classifier', None) + label_offset = default_cfg.get('label_offset', 0) + if classifiers is not None: + if isinstance(classifiers, str): + classifiers = (classifiers,) + if num_classes != default_cfg['num_classes']: + for classifier_name in classifiers: + # completely discard fully connected if model num_classes doesn't match pretrained weights + del state_dict[classifier_name + '.weight'] + del state_dict[classifier_name + '.bias'] + strict = False + elif label_offset > 0: + for classifier_name in classifiers: + # special case for pretrained weights with an extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] + + model.load_state_dict(state_dict, strict=strict) + + +def extract_layer(model, layer): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + if not hasattr(model, 'module') and layer[0] == 'module': + layer = layer[1:] + for l in layer: + if hasattr(module, l): + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + else: + return module + return module + + +def set_layer(model, layer, val): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + lst_index = 0 + module2 = module + for l in layer: + if hasattr(module2, l): + if not l.isdigit(): + module2 = getattr(module2, l) + else: + module2 = module2[int(l)] + lst_index += 1 + lst_index -= 1 + for l in layer[:lst_index]: + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + l = layer[lst_index] + setattr(module, l, val) + + +def adapt_model_from_string(parent_module, model_string): + separator = '***' + state_dict = {} + lst_shape = model_string.split(separator) + for k in lst_shape: + k = k.split(':') + key = k[0] + shape = k[1][1:-1].split(',') + if shape[0] != '': + state_dict[key] = [int(i) for i in shape] + + new_module = deepcopy(parent_module) + for n, m in parent_module.named_modules(): + old_module = extract_layer(parent_module, n) + if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame): + if isinstance(old_module, Conv2dSame): + conv = Conv2dSame + else: + conv = nn.Conv2d + s = state_dict[n + '.weight'] + in_channels = s[1] + out_channels = s[0] + g = 1 + if old_module.groups > 1: + in_channels = out_channels + g = in_channels + new_conv = conv( + in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size, + bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation, + groups=g, stride=old_module.stride) + set_layer(new_module, n, new_conv) + if isinstance(old_module, nn.BatchNorm2d): + new_bn = nn.BatchNorm2d( + num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, + affine=old_module.affine, track_running_stats=True) + set_layer(new_module, n, new_bn) + if isinstance(old_module, nn.Linear): + # FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer? + num_features = state_dict[n + '.weight'][1] + new_fc = Linear( + in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None) + set_layer(new_module, n, new_fc) + if hasattr(new_module, 'num_features'): + new_module.num_features = num_features + new_module.eval() + parent_module.eval() + + return new_module + + +def adapt_model_from_file(parent_module, model_variant): + adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt') + with open(adapt_file, 'r') as f: + return adapt_model_from_string(parent_module, f.read().strip()) + + +def default_cfg_for_features(default_cfg): + default_cfg = deepcopy(default_cfg) + # remove default pretrained cfg fields that don't have much relevance for feature backbone + to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size? + for tr in to_remove: + default_cfg.pop(tr, None) + return default_cfg + + +def overlay_external_default_cfg(default_cfg, kwargs): + """ Overlay 'external_default_cfg' in kwargs on top of default_cfg arg. + """ + external_default_cfg = kwargs.pop('external_default_cfg', None) + if external_default_cfg: + default_cfg.pop('url', None) # url should come from external cfg + default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg + default_cfg.update(external_default_cfg) + + +def set_default_kwargs(kwargs, names, default_cfg): + for n in names: + # for legacy reasons, model __init__args uses img_size + in_chans as separate args while + # default_cfg has one input_size=(C, H ,W) entry + if n == 'img_size': + input_size = default_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[-2:]) + elif n == 'in_chans': + input_size = default_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[0]) + else: + default_val = default_cfg.get(n, None) + if default_val is not None: + kwargs.setdefault(n, default_cfg[n]) + + +def filter_kwargs(kwargs, names): + if not kwargs or not names: + return + for n in names: + kwargs.pop(n, None) + + +def update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter): + """ Update the default_cfg and kwargs before passing to model + + FIXME this sequence of overlay default_cfg, set default kwargs, filter kwargs + could/should be replaced by an improved configuration mechanism + + Args: + default_cfg: input default_cfg (updated in-place) + kwargs: keyword args passed to model build fn (updated in-place) + kwargs_filter: keyword arg keys that must be removed before model __init__ + """ + # Overlay default cfg values from `external_default_cfg` if it exists in kwargs + overlay_external_default_cfg(default_cfg, kwargs) + # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) + default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') + if default_cfg.get('fixed_input_size', False): + # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size + default_kwarg_names += ('img_size',) + set_default_kwargs(kwargs, names=default_kwarg_names, default_cfg=default_cfg) + # Filter keyword args for task specific model variants (some 'features only' models, etc.) + filter_kwargs(kwargs, names=kwargs_filter) + + +def build_model_with_cfg( + model_cls: Callable, + variant: str, + pretrained: bool, + default_cfg: dict, + model_cfg: Optional[Any] = None, + feature_cfg: Optional[dict] = None, + pretrained_strict: bool = True, + pretrained_filter_fn: Optional[Callable] = None, + pretrained_custom_load: bool = False, + kwargs_filter: Optional[Tuple[str]] = None, + **kwargs): + """ Build model with specified default_cfg and optional model_cfg + + This helper fn aids in the construction of a model including: + * handling default_cfg and associated pretained weight loading + * passing through optional model_cfg for models with config based arch spec + * features_only model adaptation + * pruning config / model adaptation + + Args: + model_cls (nn.Module): model class + variant (str): model variant name + pretrained (bool): load pretrained weights + default_cfg (dict): model's default pretrained/task config + model_cfg (Optional[Dict]): model's architecture config + feature_cfg (Optional[Dict]: feature extraction adapter config + pretrained_strict (bool): load pretrained weights strictly + pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights + pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights + kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model + **kwargs: model args passed through to model __init__ + """ + pruned = kwargs.pop('pruned', False) + features = False + feature_cfg = feature_cfg or {} + default_cfg = deepcopy(default_cfg) if default_cfg else {} + update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter) + default_cfg.setdefault('architecture', variant) + + # Setup for feature extraction wrapper done at end of this fn + if kwargs.pop('features_only', False): + features = True + feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) + if 'out_indices' in kwargs: + feature_cfg['out_indices'] = kwargs.pop('out_indices') + + # Build the model + model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs) + model.default_cfg = default_cfg + + if pruned: + model = adapt_model_from_file(model, variant) + + # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats + num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) + if pretrained: + if pretrained_custom_load: + load_custom_pretrained(model) + else: + load_pretrained( + model, + num_classes=num_classes_pretrained, + in_chans=kwargs.get('in_chans', 3), + filter_fn=pretrained_filter_fn, + strict=pretrained_strict) + + # Wrap the model in a feature extraction module if enabled + if features: + feature_cls = FeatureListNet + if 'feature_cls' in feature_cfg: + feature_cls = feature_cfg.pop('feature_cls') + if isinstance(feature_cls, str): + feature_cls = feature_cls.lower() + if 'hook' in feature_cls: + feature_cls = FeatureHookNet + else: + assert False, f'Unknown feature class {feature_cls}' + model = feature_cls(model, **feature_cfg) + model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg + + return model + + +def model_parameters(model, exclude_head=False): + if exclude_head: + # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering + return [p for p in model.parameters()][:-2] + else: + return model.parameters() + + +def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +def named_modules(module: nn.Module, name='', depth_first=True, include_root=False): + if not depth_first and include_root: + yield name, module + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + yield from named_modules( + module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + yield name, module diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/hrnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/hrnet.py new file mode 100644 index 0000000000..c56964f64f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/hrnet.py @@ -0,0 +1,836 @@ +""" HRNet + +Copied from https://github.com/HRNet/HRNet-Image-Classification + +Original header: + Copyright (c) Microsoft + Licensed under the MIT License. + Written by Bin Xiao (Bin.Xiao@microsoft.com) + Modified by Ke Sun (sunk@mail.ustc.edu.cn) +""" +import logging +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .features import FeatureInfo +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import create_classifier +from .registry import register_model +from .resnet import BasicBlock, Bottleneck # leveraging ResNet blocks w/ additional features like SE + +_BN_MOMENTUM = 0.1 +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hrnet_w18_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth'), + 'hrnet_w18_small_v2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth'), + 'hrnet_w18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth'), + 'hrnet_w30': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth'), + 'hrnet_w32': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth'), + 'hrnet_w40': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth'), + 'hrnet_w44': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth'), + 'hrnet_w48': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth'), + 'hrnet_w64': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth'), +} + +cfg_cls = dict( + hrnet_w18_small=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(1,), + NUM_CHANNELS=(32,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(16, 32), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=1, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(16, 32, 64), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=1, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(16, 32, 64, 128), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18_small_v2=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(2,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=3, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=2, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w30=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(30, 60), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(30, 60, 120), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(30, 60, 120, 240), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w32=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(32, 64), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(32, 64, 128), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(32, 64, 128, 256), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w40=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(40, 80), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(40, 80, 160), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(40, 80, 160, 320), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w44=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(44, 88), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(44, 88, 176), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(44, 88, 176, 352), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w48=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(48, 96), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(48, 96, 192), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(48, 96, 192, 384), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w64=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(64, 128), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(64, 128, 256), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(64, 128, 256, 512), + FUSE_METHOD='SUM', + ), + ) +) + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, + num_channels, fuse_method, multi_scale_output=True): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, blocks, num_blocks, num_inchannels, num_channels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.fuse_act = nn.ReLU(False) + + def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): + error_msg = '' + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks)) + elif num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels)) + elif num_branches != len(num_inchannels): + error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(num_inchannels)) + if error_msg: + _logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): + downsample = None + if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.num_inchannels[branch_index], num_channels[branch_index] * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample)] + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + for i in range(num_branches): + branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return nn.Identity() + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), + nn.BatchNorm2d(num_inchannels[i], momentum=_BN_MOMENTUM), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(nn.Identity()) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x: List[torch.Tensor]): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i, branch in enumerate(self.branches): + x[i] = branch(x[i]) + + x_fuse = [] + for i, fuse_outer in enumerate(self.fuse_layers): + y = x[0] if i == 0 else fuse_outer[0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + fuse_outer[j](x[j]) + x_fuse.append(self.fuse_act(y)) + + return x_fuse + + +blocks_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class HighResolutionNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, head='classification'): + super(HighResolutionNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + + stem_width = cfg['STEM_WIDTH'] + self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) + self.act1 = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) + self.act2 = nn.ReLU(inplace=True) + + self.stage1_cfg = cfg['STAGE1'] + num_channels = self.stage1_cfg['NUM_CHANNELS'][0] + block = blocks_dict[self.stage1_cfg['BLOCK']] + num_blocks = self.stage1_cfg['NUM_BLOCKS'][0] + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + stage1_out_channel = block.expansion * num_channels + + self.stage2_cfg = cfg['STAGE2'] + num_channels = self.stage2_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage2_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) + self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg['STAGE3'] + num_channels = self.stage3_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage3_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg['STAGE4'] + num_channels = self.stage4_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage4_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) + + self.head = head + self.head_channels = None # set if _make_head called + if head == 'classification': + # Classification Head + self.num_features = 2048 + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(pre_stage_channels) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + elif head == 'incre': + self.num_features = 2048 + self.incre_modules, _, _ = self._make_head(pre_stage_channels, True) + else: + self.incre_modules = None + self.num_features = 256 + + curr_stride = 2 + # module names aren't actually valid here, hook or FeatureNet based extraction would not work + self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] + for i, c in enumerate(self.head_channels if self.head_channels else num_channels): + curr_stride *= 2 + c = c * 4 if self.head_channels else c # head block expansion factor of 4 + self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] + + self.init_weights() + + def _make_head(self, pre_stage_channels, incre_only=False): + head_block = Bottleneck + self.head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = [] + for i, channels in enumerate(pre_stage_channels): + incre_modules.append(self._make_layer(head_block, channels, self.head_channels[i], 1, stride=1)) + incre_modules = nn.ModuleList(incre_modules) + if incre_only: + return incre_modules, None, None + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = self.head_channels[i] * head_block.expansion + out_channels = self.head_channels[i + 1] * head_block.expansion + downsamp_module = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), + nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d( + in_channels=self.head_channels[3] * head_block.expansion, + out_channels=self.num_features, kernel_size=1, stride=1, padding=0 + ), + nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + + return incre_modules, downsamp_modules, final_layer + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), + nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + else: + transition_layers.append(nn.Identity()) + else: + conv3x3s = [] + for j in range(i + 1 - num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels + conv3x3s.append(nn.Sequential( + nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), + nn.BatchNorm2d(outchannels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(inplanes, planes, stride, downsample)] + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True): + num_modules = layer_config['NUM_MODULES'] + num_branches = layer_config['NUM_BRANCHES'] + num_blocks = layer_config['NUM_BLOCKS'] + num_channels = layer_config['NUM_CHANNELS'] + block = blocks_dict[layer_config['BLOCK']] + fuse_method = layer_config['FUSE_METHOD'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + reset_multi_scale_output = multi_scale_output or i < num_modules - 1 + modules.append(HighResolutionModule( + num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def stages(self, x) -> List[torch.Tensor]: + x = self.layer1(x) + + xl = [t(x) for i, t in enumerate(self.transition1)] + yl = self.stage2(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] + yl = self.stage3(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] + yl = self.stage4(xl) + return yl + + def forward_features(self, x): + # Stem + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + # Stages + yl = self.stages(x) + + # Classification Head + y = self.incre_modules[0](yl[0]) + for i, down in enumerate(self.downsamp_modules): + y = self.incre_modules[i + 1](yl[i + 1]) + down(y) + y = self.final_layer(y) + return y + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +class HighResolutionNetFeatures(HighResolutionNet): + """HighResolutionNet feature extraction + + The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. + It would be more complicated to use the FeatureNet helpers. + + The `feature_location=incre` allows grabbing increased channel count features using part of the + classification head. If `feature_location=''` the default HRNet features are returned. First stem + conv is used for stride 2 features. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, + feature_location='incre', out_indices=(0, 1, 2, 3, 4)): + assert feature_location in ('incre', '') + super(HighResolutionNetFeatures, self).__init__( + cfg, in_chans=in_chans, num_classes=num_classes, global_pool=global_pool, + drop_rate=drop_rate, head=feature_location) + self.feature_info = FeatureInfo(self.feature_info, out_indices) + self._out_idx = {i for i in out_indices} + + def forward_features(self, x): + assert False, 'Not supported' + + def forward(self, x) -> List[torch.tensor]: + out = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + if 0 in self._out_idx: + out.append(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + x = self.stages(x) + if self.incre_modules is not None: + x = [incre(f) for f, incre in zip(x, self.incre_modules)] + for i, f in enumerate(x): + if i + 1 in self._out_idx: + out.append(f) + return out + + +def _create_hrnet(variant, pretrained, **model_kwargs): + model_cls = HighResolutionNet + features_only = False + kwargs_filter = None + if model_kwargs.pop('features_only', False): + model_cls = HighResolutionNetFeatures + kwargs_filter = ('num_classes', 'global_pool') + features_only = True + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfg_cls[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hrnet_w18_small(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) + + +@register_model +def hrnet_w18_small_v2(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) + + +@register_model +def hrnet_w18(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18', pretrained, **kwargs) + + +@register_model +def hrnet_w30(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w30', pretrained, **kwargs) + + +@register_model +def hrnet_w32(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w32', pretrained, **kwargs) + + +@register_model +def hrnet_w40(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w40', pretrained, **kwargs) + + +@register_model +def hrnet_w44(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w44', pretrained, **kwargs) + + +@register_model +def hrnet_w48(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w48', pretrained, **kwargs) + + +@register_model +def hrnet_w64(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w64', pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/hub.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/hub.py new file mode 100644 index 0000000000..9a9b553031 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/hub.py @@ -0,0 +1,96 @@ +import json +import logging +import os +from functools import partial +from typing import Union, Optional + +import torch +from torch.hub import load_state_dict_from_url, download_url_to_file, urlparse, HASH_REGEX +try: + from torch.hub import get_dir +except ImportError: + from torch.hub import _get_torch_home as get_dir + +from timm import __version__ +try: + from huggingface_hub import hf_hub_url + from huggingface_hub import cached_download + cached_download = partial(cached_download, library_name="timm", library_version=__version__) +except ImportError: + hf_hub_url = None + cached_download = None + +_logger = logging.getLogger(__name__) + + +def get_cache_dir(child_dir=''): + """ + Returns the location of the directory where models are cached (and creates it if necessary). + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + _logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') + + hub_dir = get_dir() + child_dir = () if not child_dir else (child_dir,) + model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir) + os.makedirs(model_dir, exist_ok=True) + return model_dir + + +def download_cached_file(url, check_hash=True, progress=False): + parts = urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(get_cache_dir(), filename) + if not os.path.exists(cached_file): + _logger.info('Downloading: "{}" to {}\n'.format(url, cached_file)) + hash_prefix = None + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file(url, cached_file, hash_prefix, progress=progress) + return cached_file + + +def has_hf_hub(necessary=False): + if hf_hub_url is None and necessary: + # if no HF Hub module installed and it is necessary to continue, raise error + raise RuntimeError( + 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') + return hf_hub_url is not None + + +def hf_split(hf_id): + rev_split = hf_id.split('@') + assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.' + hf_model_id = rev_split[0] + hf_revision = rev_split[-1] if len(rev_split) > 1 else None + return hf_model_id, hf_revision + + +def load_cfg_from_json(json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + +def _download_from_hf(model_id: str, filename: str): + hf_model_id, hf_revision = hf_split(model_id) + url = hf_hub_url(hf_model_id, filename, revision=hf_revision) + return cached_download(url, cache_dir=get_cache_dir('hf')) + + +def load_model_config_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'config.json') + default_cfg = load_cfg_from_json(cached_file) + default_cfg['hf_hub'] = model_id # insert hf_hub id for pretrained weight load during model creation + model_name = default_cfg.get('architecture') + return default_cfg, model_name + + +def load_state_dict_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'pytorch_model.bin') + state_dict = torch.load(cached_file, map_location='cpu') + return state_dict diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_resnet_v2.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_resnet_v2.py new file mode 100644 index 0000000000..716728495a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_resnet_v2.py @@ -0,0 +1,358 @@ +""" Pytorch Inception-Resnet-V2 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionResnetV2'] + +default_cfgs = { + # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz + 'inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + }, + # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz + 'ens_adv_inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=.001) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed_5b(nn.Module): + def __init__(self): + super(Mixed_5b, self).__init__() + + self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(192, 48, kernel_size=1, stride=1), + BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(192, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(192, 64, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block35(nn.Module): + def __init__(self, scale=1.0): + super(Block35, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1), + BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1) + ) + + self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_6a(nn.Module): + def __init__(self): + super(Mixed_6a, self).__init__() + + self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Block17(nn.Module): + def __init__(self, scale=1.0): + super(Block17, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 128, kernel_size=1, stride=1), + BasicConv2d(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_7a(nn.Module): + def __init__(self): + super(Mixed_7a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1), + BasicConv2d(288, 320, kernel_size=3, stride=2) + ) + + self.branch3 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block8(nn.Module): + + def __init__(self, scale=1.0, no_relu=False): + super(Block8, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(2080, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), + BasicConv2d(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + ) + + self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) + self.relu = None if no_relu else nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + if self.relu is not None: + out = self.relu(out) + return out + + +class InceptionResnetV2(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg'): + super(InceptionResnetV2, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + assert output_stride == 32 + + self.conv2d_1a = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) + self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1) + self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] + + self.maxpool_3a = nn.MaxPool2d(3, stride=2) + self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) + self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) + self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] + + self.maxpool_5a = nn.MaxPool2d(3, stride=2) + self.mixed_5b = Mixed_5b() + self.repeat = nn.Sequential( + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17) + ) + self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] + + self.mixed_6a = Mixed_6a() + self.repeat_1 = nn.Sequential( + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10) + ) + self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] + + self.mixed_7a = Mixed_7a() + self.repeat_2 = nn.Sequential( + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20) + ) + self.block8 = Block8(no_relu=True) + self.conv2d_7b = BasicConv2d(2080, self.num_features, kernel_size=1, stride=1) + self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] + + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.classif + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv2d_1a(x) + x = self.conv2d_2a(x) + x = self.conv2d_2b(x) + x = self.maxpool_3a(x) + x = self.conv2d_3b(x) + x = self.conv2d_4a(x) + x = self.maxpool_5a(x) + x = self.mixed_5b(x) + x = self.repeat(x) + x = self.mixed_6a(x) + x = self.repeat_1(x) + x = self.mixed_7a(x) + x = self.repeat_2(x) + x = self.block8(x) + x = self.conv2d_7b(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classif(x) + return x + + +def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionResnetV2, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def inception_resnet_v2(pretrained=False, **kwargs): + r"""InceptionResnetV2 model architecture from the + `"InceptionV4, Inception-ResNet..." ` paper. + """ + return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) + + +@register_model +def ens_adv_inception_resnet_v2(pretrained=False, **kwargs): + r""" Ensemble Adversarially trained InceptionResnetV2 model architecture + As per https://arxiv.org/abs/1705.07204 and + https://github.com/tensorflow/models/tree/master/research/adv_imagenet_models. + """ + return _create_inception_resnet_v2('ens_adv_inception_resnet_v2', pretrained=pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_v3.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_v3.py new file mode 100644 index 0000000000..cbb1107b39 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_v3.py @@ -0,0 +1,470 @@ +""" Inception-V3 + +Originally from torchvision Inception3 model +Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .layers import trunc_normal_, create_classifier, Linear + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # original PyTorch weights, ported from Tensorflow but modified + 'inception_v3': _cfg( + url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth', + has_aux=True), # checkpoint has aux logit layer weights + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + 'tf_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_inception_v3-e0069de4.pth', + num_classes=1000, has_aux=False, label_offset=1), + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + 'adv_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth', + num_classes=1000, has_aux=False, label_offset=1), + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + 'gluon_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_inception_v3-9f746940.pth', + mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults + std=IMAGENET_DEFAULT_STD, # also works well with inception defaults + has_aux=False, + ) +} + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features, conv_block=None): + super(InceptionA, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionB, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7, conv_block=None): + super(InceptionC, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionD, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionE, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes, conv_block=None): + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + self.fc = Linear(768, num_classes) + self.fc.stddev = 0.001 + + def forward(self, x): + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) + + +class InceptionV3(nn.Module): + """Inception-V3 with no AuxLogits + FIXME two class defs are redundant, but less screwing around with torchsript fussyness and inconsistent returns + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=False): + super(InceptionV3, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.aux_logits = aux_logits + + self.Conv2d_1a_3x3 = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + else: + self.AuxLogits = None + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), + dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), + dict(num_chs=288, reduction=8, module='Mixed_5d'), + dict(num_chs=768, reduction=16, module='Mixed_6e'), + dict(num_chs=2048, reduction=32, module='Mixed_7c'), + ] + + self.num_features = 2048 + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + trunc_normal_(m.weight, std=stddev) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward_preaux(self, x): + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + x = self.Pool1(x) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = self.Pool2(x) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + return x + + def forward_postaux(self, x): + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + return x + + def forward_features(self, x): + x = self.forward_preaux(x) + x = self.forward_postaux(x) + return x + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +class InceptionV3Aux(InceptionV3): + """InceptionV3 with AuxLogits + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=True): + super(InceptionV3Aux, self).__init__( + num_classes, in_chans, drop_rate, global_pool, aux_logits) + + def forward_features(self, x): + x = self.forward_preaux(x) + aux = self.AuxLogits(x) if self.training else None + x = self.forward_postaux(x) + return x, aux + + def forward(self, x): + x, aux = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x, aux + + +def _create_inception_v3(variant, pretrained=False, **kwargs): + default_cfg = default_cfgs[variant] + aux_logits = kwargs.pop('aux_logits', False) + if aux_logits: + assert not kwargs.pop('features_only', False) + model_cls = InceptionV3Aux + load_strict = default_cfg['has_aux'] + else: + model_cls = InceptionV3 + load_strict = not default_cfg['has_aux'] + return build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfg, + pretrained_strict=load_strict, + **kwargs) + + +@register_model +def inception_v3(pretrained=False, **kwargs): + # original PyTorch weights, ported from Tensorflow but modified + model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + model = _create_inception_v3('tf_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def adv_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + model = _create_inception_v3('adv_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def gluon_inception_v3(pretrained=False, **kwargs): + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + model = _create_inception_v3('gluon_inception_v3', pretrained=pretrained, **kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_v4.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_v4.py new file mode 100644 index 0000000000..cc899e15da --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/inception_v4.py @@ -0,0 +1,316 @@ +""" Pytorch Inception-V4 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionV4'] + +default_cfgs = { + 'inception_v4': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/inceptionv4-8e4777a0.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'features.0.conv', 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=0.001) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed3a(nn.Module): + def __init__(self): + super(Mixed3a, self).__init__() + self.maxpool = nn.MaxPool2d(3, stride=2) + self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) + + def forward(self, x): + x0 = self.maxpool(x) + x1 = self.conv(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed4a(nn.Module): + def __init__(self): + super(Mixed4a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed5a(nn.Module): + def __init__(self): + super(Mixed5a, self).__init__() + self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) + self.maxpool = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.conv(x) + x1 = self.maxpool(x) + out = torch.cat((x0, x1), 1) + return out + + +class InceptionA(nn.Module): + def __init__(self): + super(InceptionA, self).__init__() + self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(384, 96, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionA(nn.Module): + def __init__(self): + super(ReductionA, self).__init__() + self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), + BasicConv2d(224, 256, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionB(nn.Module): + def __init__(self): + super(InceptionB, self).__init__() + self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1024, 128, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionB(nn.Module): + def __init__(self): + super(ReductionB, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(320, 320, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionC(nn.Module): + def __init__(self): + super(InceptionC, self).__init__() + + self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) + + self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)) + self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1536, 256, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + + x1_0 = self.branch1_0(x) + x1_1a = self.branch1_1a(x1_0) + x1_1b = self.branch1_1b(x1_0) + x1 = torch.cat((x1_1a, x1_1b), 1) + + x2_0 = self.branch2_0(x) + x2_1 = self.branch2_1(x2_0) + x2_2 = self.branch2_2(x2_1) + x2_3a = self.branch2_3a(x2_2) + x2_3b = self.branch2_3b(x2_2) + x2 = torch.cat((x2_3a, x2_3b), 1) + + x3 = self.branch3(x) + + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class InceptionV4(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg'): + super(InceptionV4, self).__init__() + assert output_stride == 32 + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + + self.features = nn.Sequential( + BasicConv2d(in_chans, 32, kernel_size=3, stride=2), + BasicConv2d(32, 32, kernel_size=3, stride=1), + BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), + Mixed3a(), + Mixed4a(), + Mixed5a(), + InceptionA(), + InceptionA(), + InceptionA(), + InceptionA(), + ReductionA(), # Mixed6a + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + ReductionB(), # Mixed7a + InceptionC(), + InceptionC(), + InceptionC(), + ) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='features.2'), + dict(num_chs=160, reduction=4, module='features.3'), + dict(num_chs=384, reduction=8, module='features.9'), + dict(num_chs=1024, reduction=16, module='features.17'), + dict(num_chs=1536, reduction=32, module='features.21'), + ] + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_inception_v4(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionV4, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def inception_v4(pretrained=False, **kwargs): + return _create_inception_v4('inception_v4', pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/__init__.py new file mode 100644 index 0000000000..e9a5f18fdd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/__init__.py @@ -0,0 +1,39 @@ +from .activations import * +from .adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from .blur_pool import BlurPool2d +from .classifier import ClassifierHead, create_classifier +from .cond_conv2d import CondConv2d, get_condconv_initializer +from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ + set_layer_config +from .conv2d_same import Conv2dSame, conv2d_same +from .conv_bn_act import ConvBnAct +from .create_act import create_act_layer, get_act_layer, get_act_fn +from .create_attn import get_attn, create_attn +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act +from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from .evo_norm import EvoNormBatch2d, EvoNormSample2d +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible +from .inplace_abn import InplaceAbn +from .linear import Linear +from .mixed_conv2d import MixedConv2d +from .mlp import Mlp, GluMlp, GatedMlp +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .norm import GroupNorm, LayerNorm2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .padding import get_padding, get_same_padding, pad_same +from .patch_embed import PatchEmbed +from .pool2d_same import AvgPool2dSame, create_pool2d +from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from .selective_kernel import SelectiveKernel +from .separable_conv import SeparableConv2d, SeparableConvBnAct +from .space_to_depth import SpaceToDepthModule +from .split_attn import SplitAttn +from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from .test_time_pool import TestTimePoolHead, apply_test_time_pool +from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_ diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations.py new file mode 100644 index 0000000000..e16b3bd3a1 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations.py @@ -0,0 +1,145 @@ +""" Activations + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + + +def mish(x, inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + NOTE: I don't have a working inplace variant + """ + return x.mul(F.softplus(x).tanh()) + + +class Mish(nn.Module): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + def __init__(self, inplace: bool = False): + super(Mish, self).__init__() + + def forward(self, x): + return mish(x) + + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + + +def tanh(x, inplace: bool = False): + return x.tanh_() if inplace else x.tanh() + + +# PyTorch has this, but not with a consistent inplace argmument interface +class Tanh(nn.Module): + def __init__(self, inplace: bool = False): + super(Tanh, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.tanh_() if self.inplace else x.tanh() + + +def hard_swish(x, inplace: bool = False): + inner = F.relu6(x + 3.).div_(6.) + return x.mul_(inner) if inplace else x.mul(inner) + + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + + +def hard_mish(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + if inplace: + return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) + else: + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_mish(x, self.inplace) + + +class PReLU(nn.PReLU): + """Applies PReLU (w/ dummy inplace arg) + """ + def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: + super(PReLU, self).__init__(num_parameters=num_parameters, init=init) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.prelu(input, self.weight) + + +def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: + return F.gelu(x) + + +class GELU(nn.Module): + """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) + """ + def __init__(self, inplace: bool = False): + super(GELU, self).__init__() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.gelu(input) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations_jit.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations_jit.py new file mode 100644 index 0000000000..b4a516530a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations_jit.py @@ -0,0 +1,90 @@ +""" Activations + +A collection of jit-scripted activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not +currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted +versions if they contain in-place ops. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul(x.sigmoid()) + + +@torch.jit.script +def mish_jit(x, _inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class SwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishJit, self).__init__() + + def forward(self, x): + return swish_jit(x) + + +class MishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(MishJit, self).__init__() + + def forward(self, x): + return mish_jit(x) + + +@torch.jit.script +def hard_sigmoid_jit(x, inplace: bool = False): + # return F.relu6(x + 3.) / 6. + return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSigmoidJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidJit, self).__init__() + + def forward(self, x): + return hard_sigmoid_jit(x) + + +@torch.jit.script +def hard_swish_jit(x, inplace: bool = False): + # return x * (F.relu6(x + 3.) / 6) + return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishJit, self).__init__() + + def forward(self, x): + return hard_swish_jit(x) + + +@torch.jit.script +def hard_mish_jit(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishJit, self).__init__() + + def forward(self, x): + return hard_mish_jit(x) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations_me.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations_me.py new file mode 100644 index 0000000000..9a12bb7ebb --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/activations_me.py @@ -0,0 +1,218 @@ +""" Activations (memory-efficient w/ custom autograd) + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +These activations are not compatible with jit scripting or ONNX export of the model, please use either +the JIT or basic versions of the activations. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit_fwd(x): + return x.mul(torch.sigmoid(x)) + + +@torch.jit.script +def swish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) + + +class SwishJitAutoFn(torch.autograd.Function): + """ torch.jit.script optimised Swish w/ memory-efficient checkpoint + Inspired by conversation btw Jeremy Howard & Adam Pazske + https://twitter.com/jeremyphoward/status/1188251041835315200 + """ + @staticmethod + def symbolic(g, x): + return g.op("Mul", x, g.op("Sigmoid", x)) + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_jit_bwd(x, grad_output) + + +def swish_me(x, inplace=False): + return SwishJitAutoFn.apply(x) + + +class SwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishMe, self).__init__() + + def forward(self, x): + return SwishJitAutoFn.apply(x) + + +@torch.jit.script +def mish_jit_fwd(x): + return x.mul(torch.tanh(F.softplus(x))) + + +@torch.jit.script +def mish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + x_tanh_sp = F.softplus(x).tanh() + return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) + + +class MishJitAutoFn(torch.autograd.Function): + """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + A memory efficient, jit scripted variant of Mish + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return mish_jit_bwd(x, grad_output) + + +def mish_me(x, inplace=False): + return MishJitAutoFn.apply(x) + + +class MishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(MishMe, self).__init__() + + def forward(self, x): + return MishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_sigmoid_jit_fwd(x, inplace: bool = False): + return (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_sigmoid_jit_bwd(x, grad_output): + m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. + return grad_output * m + + +class HardSigmoidJitAutoFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_sigmoid_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_sigmoid_jit_bwd(x, grad_output) + + +def hard_sigmoid_me(x, inplace: bool = False): + return HardSigmoidJitAutoFn.apply(x) + + +class HardSigmoidMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidMe, self).__init__() + + def forward(self, x): + return HardSigmoidJitAutoFn.apply(x) + + +@torch.jit.script +def hard_swish_jit_fwd(x): + return x * (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_swish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= 3.) + m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) + return grad_output * m + + +class HardSwishJitAutoFn(torch.autograd.Function): + """A memory efficient, jit-scripted HardSwish activation""" + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_swish_jit_bwd(x, grad_output) + + @staticmethod + def symbolic(g, self): + input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) + hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + return g.op("Mul", self, hardtanh_) + + +def hard_swish_me(x, inplace=False): + return HardSwishJitAutoFn.apply(x) + + +class HardSwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishMe, self).__init__() + + def forward(self, x): + return HardSwishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_mish_jit_fwd(x): + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +@torch.jit.script +def hard_mish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= -2.) + m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) + return grad_output * m + + +class HardMishJitAutoFn(torch.autograd.Function): + """ A memory efficient, jit scripted variant of Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_mish_jit_bwd(x, grad_output) + + +def hard_mish_me(x, inplace: bool = False): + return HardMishJitAutoFn.apply(x) + + +class HardMishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishMe, self).__init__() + + def forward(self, x): + return HardMishJitAutoFn.apply(x) + + + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/adaptive_avgmax_pool.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/adaptive_avgmax_pool.py new file mode 100644 index 0000000000..ebc6ada8c5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/adaptive_avgmax_pool.py @@ -0,0 +1,118 @@ +""" PyTorch selectable adaptive pooling +Adaptive pooling with the ability to select the type of pooling from: + * 'avg' - Average pooling + * 'max' - Max pooling + * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 + * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim + +Both a functional and a nn.Module version of the pooling is provided. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class FastAdaptiveAvgPool2d(nn.Module): + def __init__(self, flatten=False): + super(FastAdaptiveAvgPool2d, self).__init__() + self.flatten = flatten + + def forward(self, x): + return x.mean((2, 3), keepdim=not self.flatten) + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='fast', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing + self.flatten = nn.Flatten(1) if flatten else nn.Identity() + if pool_type == '': + self.pool = nn.Identity() # pass through + elif pool_type == 'fast': + assert output_size == 1 + self.pool = FastAdaptiveAvgPool2d(flatten) + self.flatten = nn.Identity() + elif pool_type == 'avg': + self.pool = nn.AdaptiveAvgPool2d(output_size) + elif pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + + def is_identity(self): + return not self.pool_type + + def forward(self, x): + x = self.pool(x) + x = self.flatten(x) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'pool_type=' + self.pool_type \ + + ', flatten=' + str(self.flatten) + ')' + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/attention_pool2d.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/attention_pool2d.py new file mode 100644 index 0000000000..66e49b8a93 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/attention_pool2d.py @@ -0,0 +1,182 @@ +""" Attention Pool 2D + +Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. + +Based on idea in CLIP by OpenAI, licensed Apache 2.0 +https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from typing import List, Union, Tuple + +import torch +import torch.nn as nn + +from .helpers import to_2tuple +from .weight_init import trunc_normal_ + + +def rot(x): + return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) + + +def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): + return x * cos_emb + rot(x) * sin_emb + + +def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): + if isinstance(x, torch.Tensor): + x = [x] + return [t * cos_emb + rot(t) * sin_emb for t in x] + + +class RotaryEmbedding(nn.Module): + """ Rotary position embedding + + NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not + been well tested, and will likely change. It will be moved to its own file. + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + def __init__(self, dim, max_freq=4): + super().__init__() + self.dim = dim + self.register_buffer('bands', 2 ** torch.linspace(0., max_freq - 1, self.dim // 4), persistent=False) + + def get_embed(self, shape: torch.Size, device: torch.device = None, dtype: torch.dtype = None): + """ + NOTE: shape arg should include spatial dim only + """ + device = device or self.bands.device + dtype = dtype or self.bands.dtype + if not isinstance(shape, torch.Size): + shape = torch.Size(shape) + N = shape.numel() + grid = torch.stack(torch.meshgrid( + [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in shape]), dim=-1).unsqueeze(-1) + emb = grid * math.pi * self.bands + sin = emb.sin().reshape(N, -1).repeat_interleave(2, -1) + cos = emb.cos().reshape(N, -1).repeat_interleave(2, -1) + return sin, cos + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + sin_emb, cos_emb = self.get_embed(x.shape[2:]) + return apply_rot_embed(x, sin_emb, cos_emb) + + +class RotAttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from + train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW + """ + def __init__( + self, + in_features: int, + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + embed_dim = embed_dim or in_features + out_features = out_features or in_features + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + assert embed_dim % num_heads == 0 + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + self.pos_embed = RotaryEmbedding(self.head_dim) + + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + sin_emb, cos_emb = self.pos_embed.get_embed(x.shape[2:]) + x = x.reshape(B, -1, N).permute(0, 2, 1) + + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + + qc, q = q[:, :, :1], q[:, :, 1:] + q = apply_rot_embed(q, sin_emb, cos_emb) + q = torch.cat([qc, q], dim=2) + + kc, k = k[:, :, :1], k[:, :, 1:] + k = apply_rot_embed(k, sin_emb, cos_emb) + k = torch.cat([kc, k], dim=2) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] + + +class AttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + It was based on impl in CLIP by OpenAI + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. + """ + def __init__( + self, + in_features: int, + feat_size: Union[int, Tuple[int, int]], + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + + embed_dim = embed_dim or in_features + out_features = out_features or in_features + assert embed_dim % num_heads == 0 + self.feat_size = to_2tuple(feat_size) + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + + spatial_dim = self.feat_size[0] * self.feat_size[1] + self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) + trunc_normal_(self.pos_embed, std=in_features ** -0.5) + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + assert self.feat_size[0] == H + assert self.feat_size[1] == W + x = x.reshape(B, -1, N).permute(0, 2, 1) + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + x = x + self.pos_embed.unsqueeze(0).to(x.dtype) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/blur_pool.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/blur_pool.py new file mode 100644 index 0000000000..ca4ce756e4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/blur_pool.py @@ -0,0 +1,42 @@ +""" +BlurPool layer inspired by + - Kornia's Max_BlurPool2d + - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` + +Hacked together by Chris Ha and Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from .padding import get_padding + + +class BlurPool2d(nn.Module): + r"""Creates a module that computes blurs and downsample a given feature map. + See :cite:`zhang2019shiftinvar` for more details. + Corresponds to the Downsample class, which does blurring and subsampling + + Args: + channels = Number of input channels + filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. + stride (int): downsampling filter stride + + Returns: + torch.Tensor: the transformed tensor. + """ + def __init__(self, channels, filt_size=3, stride=2) -> None: + super(BlurPool2d, self).__init__() + assert filt_size > 1 + self.channels = channels + self.filt_size = filt_size + self.stride = stride + self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 + coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) + blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) + self.register_buffer('filt', blur_filter, persistent=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.pad(x, self.padding, 'reflect') + return F.conv2d(x, self.filt, stride=self.stride, groups=x.shape[1]) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/bottleneck_attn.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/bottleneck_attn.py new file mode 100644 index 0000000000..61859f9c36 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/bottleneck_attn.py @@ -0,0 +1,129 @@ +""" Bottleneck Self Attention (Bottleneck Transformers) + +Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + +@misc{2101.11605, +Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, +Title = {Bottleneck Transformers for Visual Recognition}, +Year = {2021}, +} + +Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + +This impl is a WIP but given that it is based on the ref gist likely not too far off. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import to_2tuple +from .weight_init import trunc_normal_ + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, heads, height, width, dim) + rel_k: (2 * width - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, 2 * W -1) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, W - 1]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) + x = x_pad[:, :W, W - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + """ + def __init__(self, feat_size, dim_head, scale): + super().__init__() + self.height, self.width = to_2tuple(feat_size) + self.dim_head = dim_head + self.scale = scale + self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * self.scale) + self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * self.scale) + + def forward(self, q): + B, num_heads, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(B * num_heads, self.height, self.width, -1) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, num_heads, HW, HW) + return rel_logits + + +class BottleneckAttn(nn.Module): + """ Bottleneck Attention + Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + """ + def __init__(self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, qkv_bias=False): + super().__init__() + assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.num_heads = num_heads + self.dim_out = dim_out + self.dim_head = dim_out // num_heads + self.scale = self.dim_head ** -0.5 + + self.qkv = nn.Conv2d(dim, self.dim_out * 3, 1, bias=qkv_bias) + + # NOTE I'm only supporting relative pos embedding for now + self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.pos_embed.height + assert W == self.pos_embed.width + + x = self.qkv(x) # B, 3 * num_heads * dim_head, H, W + x = x.reshape(B, -1, self.dim_head, H * W).transpose(-1, -2) + q, k, v = torch.split(x, self.num_heads, dim=1) + + attn = (q @ k.transpose(-1, -2)) * self.scale + attn = attn + self.pos_embed(q) # B, num_heads, H * W, H * W + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out, H, W) # B, dim_out, H, W + out = self.pool(out) + return out + + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/cbam.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/cbam.py new file mode 100644 index 0000000000..bacf5cf07b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/cbam.py @@ -0,0 +1,112 @@ +""" CBAM (sort-of) Attention + +Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 + +WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on +some tasks, especially fine-grained it seems. I may end up removing this impl. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn +import torch.nn.functional as F + +from .conv_bn_act import ConvBnAct +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible + + +class ChannelAttn(nn.Module): + """ Original CBAM channel attention module, currently avg + max pool variant only. + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(ChannelAttn, self).__init__() + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) + self.act = act_layer(inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) + x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) + return x * self.gate(x_avg + x_max) + + +class LightChannelAttn(ChannelAttn): + """An experimental 'lightweight' that sums avg + max pool first + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightChannelAttn, self).__init__( + channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) + + def forward(self, x): + x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) + x_attn = self.fc2(self.act(self.fc1(x_pool))) + return x * F.sigmoid(x_attn) + + +class SpatialAttn(nn.Module): + """ Original CBAM spatial attention module + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(SpatialAttn, self).__init__() + self.conv = ConvBnAct(2, 1, kernel_size, act_layer=None) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class LightSpatialAttn(nn.Module): + """An experimental 'lightweight' variant that sums avg_pool and max_pool results. + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(LightSpatialAttn, self).__init__() + self.conv = ConvBnAct(1, 1, kernel_size, act_layer=None) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class CbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(CbamModule, self).__init__() + self.channel = ChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + + +class LightCbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightCbamModule, self).__init__() + self.channel = LightChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = LightSpatialAttn(spatial_kernel_size) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/classifier.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/classifier.py new file mode 100644 index 0000000000..2b74541341 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/classifier.py @@ -0,0 +1,56 @@ +""" Classifier head and layer factory + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn +from torch.nn import functional as F + +from .adaptive_avgmax_pool import SelectAdaptivePool2d +from .linear import Linear + + +def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): + flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling + if not pool_type: + assert num_classes == 0 or use_conv,\ + 'Pooling can only be disabled if classifier is also removed or conv classifier is used' + flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) + global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool) + num_pooled_features = num_features * global_pool.feat_mult() + return global_pool, num_pooled_features + + +def _create_fc(num_features, num_classes, use_conv=False): + if num_classes <= 0: + fc = nn.Identity() # pass-through (no classifier) + elif use_conv: + fc = nn.Conv2d(num_features, num_classes, 1, bias=True) + else: + # NOTE: using my Linear wrapper that fixes AMP + torchscript casting issue + fc = Linear(num_features, num_classes, bias=True) + return fc + + +def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False): + global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv) + fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + return global_pool, fc + + +class ClassifierHead(nn.Module): + """Classifier head w/ configurable global pooling and dropout.""" + + def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False): + super(ClassifierHead, self).__init__() + self.drop_rate = drop_rate + self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv) + self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() + + def forward(self, x): + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + x = self.fc(x) + x = self.flatten(x) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/cond_conv2d.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/cond_conv2d.py new file mode 100644 index 0000000000..8b4bbca84d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/cond_conv2d.py @@ -0,0 +1,122 @@ +""" PyTorch Conditionally Parameterized Convolution (CondConv) + +Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference +(https://arxiv.org/abs/1904.04971) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +from functools import partial +import numpy as np +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .helpers import to_2tuple +from .conv2d_same import conv2d_same +from .padding import get_padding_value + + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = to_2tuple(padding_val) + self.dilation = to_2tuple(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/config.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/config.py new file mode 100644 index 0000000000..f07b9d782b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/config.py @@ -0,0 +1,115 @@ +""" Model / Layer Config singleton state +""" +from typing import Any, Optional + +__all__ = [ + 'is_exportable', 'is_scriptable', 'is_no_jit', + 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' +] + +# Set to True if prefer to have layers with no jit optimization (includes activations) +_NO_JIT = False + +# Set to True if prefer to have activation layers with no jit optimization +# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying +# the jit flags so far are activations. This will change as more layers are updated and/or added. +_NO_ACTIVATION_JIT = False + +# Set to True if exporting a model with Same padding via ONNX +_EXPORTABLE = False + +# Set to True if wanting to use torch.jit.script on a model +_SCRIPTABLE = False + + +def is_no_jit(): + return _NO_JIT + + +class set_no_jit: + def __init__(self, mode: bool) -> None: + global _NO_JIT + self.prev = _NO_JIT + _NO_JIT = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _NO_JIT + _NO_JIT = self.prev + return False + + +def is_exportable(): + return _EXPORTABLE + + +class set_exportable: + def __init__(self, mode: bool) -> None: + global _EXPORTABLE + self.prev = _EXPORTABLE + _EXPORTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _EXPORTABLE + _EXPORTABLE = self.prev + return False + + +def is_scriptable(): + return _SCRIPTABLE + + +class set_scriptable: + def __init__(self, mode: bool) -> None: + global _SCRIPTABLE + self.prev = _SCRIPTABLE + _SCRIPTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + _SCRIPTABLE = self.prev + return False + + +class set_layer_config: + """ Layer config context manager that allows setting all layer config flags at once. + If a flag arg is None, it will not change the current value. + """ + def __init__( + self, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + no_activation_jit: Optional[bool] = None): + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT + if scriptable is not None: + _SCRIPTABLE = scriptable + if exportable is not None: + _EXPORTABLE = exportable + if no_jit is not None: + _NO_JIT = no_jit + if no_activation_jit is not None: + _NO_ACTIVATION_JIT = no_activation_jit + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev + return False diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/conv2d_same.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/conv2d_same.py new file mode 100644 index 0000000000..75f0f98d4e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/conv2d_same.py @@ -0,0 +1,42 @@ +""" Conv2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Optional + +from .padding import pad_same, get_padding_value + + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/conv_bn_act.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/conv_bn_act.py new file mode 100644 index 0000000000..33005c37b7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/conv_bn_act.py @@ -0,0 +1,40 @@ +""" Conv2d + BN + Act + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import convert_norm_act + + +class ConvBnAct(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, + drop_block=None): + super(ConvBnAct, self).__init__() + use_aa = aa_layer is not None + + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = convert_norm_act(norm_layer, act_layer) + self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) + self.aa = aa_layer(channels=out_channels) if stride == 2 and use_aa else None + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.aa is not None: + x = self.aa(x) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_act.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_act.py new file mode 100644 index 0000000000..aa557692ac --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_act.py @@ -0,0 +1,153 @@ +""" Activation Factory +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Union, Callable, Type + +from .activations import * +from .activations_jit import * +from .activations_me import * +from .config import is_exportable, is_scriptable, is_no_jit + +# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. +# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. +# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. +_has_silu = 'silu' in dir(torch.nn.functional) +_has_hardswish = 'hardswish' in dir(torch.nn.functional) +_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) +_has_mish = 'mish' in dir(torch.nn.functional) + + +_ACT_FN_DEFAULT = dict( + silu=F.silu if _has_silu else swish, + swish=F.silu if _has_silu else swish, + mish=F.mish if _has_mish else mish, + relu=F.relu, + relu6=F.relu6, + leaky_relu=F.leaky_relu, + elu=F.elu, + celu=F.celu, + selu=F.selu, + gelu=gelu, + sigmoid=sigmoid, + tanh=tanh, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, + hard_swish=F.hardswish if _has_hardswish else hard_swish, + hard_mish=hard_mish, +) + +_ACT_FN_JIT = dict( + silu=F.silu if _has_silu else swish_jit, + swish=F.silu if _has_silu else swish_jit, + mish=F.mish if _has_mish else mish_jit, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, + hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, + hard_mish=hard_mish_jit +) + +_ACT_FN_ME = dict( + silu=F.silu if _has_silu else swish_me, + swish=F.silu if _has_silu else swish_me, + mish=F.mish if _has_mish else mish_me, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, + hard_swish=F.hardswish if _has_hardswish else hard_swish_me, + hard_mish=hard_mish_me, +) + +_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) +for a in _ACT_FNS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +_ACT_LAYER_DEFAULT = dict( + silu=nn.SiLU if _has_silu else Swish, + swish=nn.SiLU if _has_silu else Swish, + mish=nn.Mish if _has_mish else Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + leaky_relu=nn.LeakyReLU, + elu=nn.ELU, + prelu=PReLU, + celu=nn.CELU, + selu=nn.SELU, + gelu=GELU, + sigmoid=Sigmoid, + tanh=Tanh, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, + hard_swish=nn.Hardswish if _has_hardswish else HardSwish, + hard_mish=HardMish, +) + +_ACT_LAYER_JIT = dict( + silu=nn.SiLU if _has_silu else SwishJit, + swish=nn.SiLU if _has_silu else SwishJit, + mish=nn.Mish if _has_mish else MishJit, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, + hard_mish=HardMishJit +) + +_ACT_LAYER_ME = dict( + silu=nn.SiLU if _has_silu else SwishMe, + swish=nn.SiLU if _has_silu else SwishMe, + mish=nn.Mish if _has_mish else MishMe, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, + hard_mish=HardMishMe, +) + +_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) +for a in _ACT_LAYERS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +def get_act_fn(name: Union[Callable, str] = 'relu'): + """ Activation Function Factory + Fetching activation fns by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, Callable): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + # If not exporting or scripting the model, first look for a memory-efficient version with + # custom autograd, then fallback + if name in _ACT_FN_ME: + return _ACT_FN_ME[name] + if is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return swish + if not (is_no_jit() or is_exportable()): + if name in _ACT_FN_JIT: + return _ACT_FN_JIT[name] + return _ACT_FN_DEFAULT[name] + + +def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): + """ Activation Layer Factory + Fetching activation layers by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, type): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + if name in _ACT_LAYER_ME: + return _ACT_LAYER_ME[name] + if is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return Swish + if not (is_no_jit() or is_exportable()): + if name in _ACT_LAYER_JIT: + return _ACT_LAYER_JIT[name] + return _ACT_LAYER_DEFAULT[name] + + +def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): + act_layer = get_act_layer(name) + if act_layer is None: + return None + return act_layer(**kwargs) if inplace is None else act_layer(inplace=inplace, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_attn.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_attn.py new file mode 100644 index 0000000000..028c0f7596 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_attn.py @@ -0,0 +1,89 @@ +""" Attention Factory + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from functools import partial + +from .bottleneck_attn import BottleneckAttn +from .cbam import CbamModule, LightCbamModule +from .eca import EcaModule, CecaModule +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .halo_attn import HaloAttn +from .lambda_layer import LambdaLayer +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .selective_kernel import SelectiveKernel +from .split_attn import SplitAttn +from .squeeze_excite import SEModule, EffectiveSEModule + + +def get_attn(attn_type): + if isinstance(attn_type, torch.nn.Module): + return attn_type + module_cls = None + if attn_type is not None: + if isinstance(attn_type, str): + attn_type = attn_type.lower() + # Lightweight attention modules (channel and/or coarse spatial). + # Typically added to existing network architecture blocks in addition to existing convolutions. + if attn_type == 'se': + module_cls = SEModule + elif attn_type == 'ese': + module_cls = EffectiveSEModule + elif attn_type == 'eca': + module_cls = EcaModule + elif attn_type == 'ecam': + module_cls = partial(EcaModule, use_mlp=True) + elif attn_type == 'ceca': + module_cls = CecaModule + elif attn_type == 'ge': + module_cls = GatherExcite + elif attn_type == 'gc': + module_cls = GlobalContext + elif attn_type == 'gca': + module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) + elif attn_type == 'cbam': + module_cls = CbamModule + elif attn_type == 'lcbam': + module_cls = LightCbamModule + + # Attention / attention-like modules w/ significant params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'sk': + module_cls = SelectiveKernel + elif attn_type == 'splat': + module_cls = SplitAttn + + # Self-attention / attention-like modules w/ significant compute and/or params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'lambda': + return LambdaLayer + elif attn_type == 'bottleneck': + return BottleneckAttn + elif attn_type == 'halo': + return HaloAttn + elif attn_type == 'nl': + module_cls = NonLocalAttn + elif attn_type == 'bat': + module_cls = BatNonLocalAttn + + # Woops! + else: + assert False, "Invalid attn module (%s)" % attn_type + elif isinstance(attn_type, bool): + if attn_type: + module_cls = SEModule + else: + module_cls = attn_type + return module_cls + + +def create_attn(attn_type, channels, **kwargs): + module_cls = get_attn(attn_type) + if module_cls is not None: + # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels + return module_cls(channels, **kwargs) + return None diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_conv2d.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_conv2d.py new file mode 100644 index 0000000000..3a0cc03a5c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_conv2d.py @@ -0,0 +1,31 @@ +""" Create Conv2d Factory Method + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from .mixed_conv2d import MixedConv2d +from .cond_conv2d import CondConv2d +from .conv2d_same import create_conv2d_pad + + +def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + assert 'groups' not in kwargs # MixedConv groups are defined by kernel list + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 + groups = in_channels if depthwise else kwargs.pop('groups', 1) + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + return m diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_norm_act.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_norm_act.py new file mode 100644 index 0000000000..5b5629457d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/create_norm_act.py @@ -0,0 +1,83 @@ +""" NormAct (Normalizaiton + Activation Layer) Factory + +Create norm + act combo modules that attempt to be backwards compatible with separate norm + act +isntances in models. Where these are used it will be possible to swap separate BN + act layers with +combined modules like IABN or EvoNorms. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import types +import functools + +import torch +import torch.nn as nn + +from .evo_norm import EvoNormBatch2d, EvoNormSample2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .inplace_abn import InplaceAbn + +_NORM_ACT_TYPES = {BatchNormAct2d, GroupNormAct, EvoNormBatch2d, EvoNormSample2d, InplaceAbn} +_NORM_ACT_REQUIRES_ARG = {BatchNormAct2d, GroupNormAct, InplaceAbn} # requires act_layer arg to define act type + + +def get_norm_act_layer(layer_class): + layer_class = layer_class.replace('_', '').lower() + if layer_class.startswith("batchnorm"): + layer = BatchNormAct2d + elif layer_class.startswith("groupnorm"): + layer = GroupNormAct + elif layer_class == "evonormbatch": + layer = EvoNormBatch2d + elif layer_class == "evonormsample": + layer = EvoNormSample2d + elif layer_class == "iabn" or layer_class == "inplaceabn": + layer = InplaceAbn + else: + assert False, "Invalid norm_act layer (%s)" % layer_class + return layer + + +def create_norm_act(layer_type, num_features, apply_act=True, jit=False, **kwargs): + layer_parts = layer_type.split('-') # e.g. batchnorm-leaky_relu + assert len(layer_parts) in (1, 2) + layer = get_norm_act_layer(layer_parts[0]) + #activation_class = layer_parts[1].lower() if len(layer_parts) > 1 else '' # FIXME support string act selection? + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + if jit: + layer_instance = torch.jit.script(layer_instance) + return layer_instance + + +def convert_norm_act(norm_layer, act_layer): + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) + norm_act_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_act_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + norm_act_layer = get_norm_act_layer(norm_layer) + elif norm_layer in _NORM_ACT_TYPES: + norm_act_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, must be a lambda/fn that creates a norm_act layer + norm_act_layer = norm_layer + else: + type_name = norm_layer.__name__.lower() + if type_name.startswith('batchnorm'): + norm_act_layer = BatchNormAct2d + elif type_name.startswith('groupnorm'): + norm_act_layer = GroupNormAct + else: + assert False, f"No equivalent norm_act layer for {type_name}" + + if norm_act_layer in _NORM_ACT_REQUIRES_ARG: + # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. + # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types + norm_act_kwargs.setdefault('act_layer', act_layer) + if norm_act_kwargs: + norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args + return norm_act_layer diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/drop.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/drop.py new file mode 100644 index 0000000000..6de9e3f729 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/drop.py @@ -0,0 +1,168 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def drop_block_2d( + x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, + with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, + gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + if batchwise: + # one mask for whole batch, quite a bit faster + block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma + else: + # mask per batch element + block_mask = torch.rand_like(x) < gamma + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + def __init__(self, + drop_prob=0.1, + block_size=7, + gamma_scale=1.0, + with_noise=False, + inplace=False, + batchwise=False, + fast=True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/eca.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/eca.py new file mode 100644 index 0000000000..e29be6ac3c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/evo_norm.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/evo_norm.py new file mode 100644 index 0000000000..9023afd0e8 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/evo_norm.py @@ -0,0 +1,83 @@ +"""EvoNormB0 (Batched) and EvoNormS0 (Sample) in PyTorch + +An attempt at getting decent performing EvoNorms running in PyTorch. +While currently faster than other impl, still quite a ways off the built-in BN +in terms of memory usage and throughput (roughly 5x mem, 1/2 - 1/3x speed). + +Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn + + +class EvoNormBatch2d(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, drop_block=None): + super(EvoNormBatch2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + param_shape = (1, num_features, 1, 1) + self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True) + self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True) + if apply_act: + self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True) + self.register_buffer('running_var', torch.ones(1, num_features, 1, 1)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.apply_act: + nn.init.ones_(self.v) + + def forward(self, x): + assert x.dim() == 4, 'expected 4D input' + x_type = x.dtype + if self.training: + var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True) + n = x.numel() / x.shape[1] + self.running_var.copy_( + var.detach() * self.momentum * (n / (n - 1)) + self.running_var * (1 - self.momentum)) + else: + var = self.running_var + + if self.apply_act: + v = self.v.to(dtype=x_type) + d = x * v + (x.var(dim=(2, 3), unbiased=False, keepdim=True) + self.eps).sqrt().to(dtype=x_type) + d = d.max((var + self.eps).sqrt().to(dtype=x_type)) + x = x / d + return x * self.weight + self.bias + + +class EvoNormSample2d(nn.Module): + def __init__(self, num_features, apply_act=True, groups=8, eps=1e-5, drop_block=None): + super(EvoNormSample2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.groups = groups + self.eps = eps + param_shape = (1, num_features, 1, 1) + self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True) + self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True) + if apply_act: + self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.apply_act: + nn.init.ones_(self.v) + + def forward(self, x): + assert x.dim() == 4, 'expected 4D input' + B, C, H, W = x.shape + assert C % self.groups == 0 + if self.apply_act: + n = x * (x * self.v).sigmoid() + x = x.reshape(B, self.groups, -1) + x = n.reshape(B, self.groups, -1) / (x.var(dim=-1, unbiased=False, keepdim=True) + self.eps).sqrt() + x = x.reshape(B, C, H, W) + return x * self.weight + self.bias diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/gather_excite.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/gather_excite.py new file mode 100644 index 0000000000..2d60dc961e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/gather_excite.py @@ -0,0 +1,90 @@ +""" Gather-Excite Attention Block + +Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 + +Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet + +I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another +impl that covers all of the cases. + +NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math + +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .create_conv2d import create_conv2d +from .helpers import make_divisible +from .mlp import ConvMlp + + +class GatherExcite(nn.Module): + """ Gather-Excite Attention Module + """ + def __init__( + self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, + rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): + super(GatherExcite, self).__init__() + self.add_maxpool = add_maxpool + act_layer = get_act_layer(act_layer) + self.extent = extent + if extra_params: + self.gather = nn.Sequential() + if extent == 0: + assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' + self.gather.add_module( + 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) + else: + assert extent % 2 == 0 + num_conv = int(math.log2(extent)) + for i in range(num_conv): + self.gather.add_module( + f'conv{i + 1}', + create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) + if i != num_conv - 1: + self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) + else: + self.gather = None + if self.extent == 0: + self.gk = 0 + self.gs = 0 + else: + assert extent % 2 == 0 + self.gk = self.extent * 2 - 1 + self.gs = self.extent + + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + size = x.shape[-2:] + if self.gather is not None: + x_ge = self.gather(x) + else: + if self.extent == 0: + # global extent + x_ge = x.mean(dim=(2, 3), keepdims=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) + else: + x_ge = F.avg_pool2d( + x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) + x_ge = self.mlp(x_ge) + if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: + x_ge = F.interpolate(x_ge, size=size) + return x * self.gate(x_ge) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/global_context.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/global_context.py new file mode 100644 index 0000000000..de7fb5c15f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/global_context.py @@ -0,0 +1,67 @@ +""" Global Context Attention Block + +Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` + - https://arxiv.org/abs/1904.11492 + +Official code consulted as reference: https://github.com/xvjiarui/GCNet + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible +from .mlp import ConvMlp +from .norm import LayerNorm2d + + +class GlobalContext(nn.Module): + + def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, + rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): + super(GlobalContext, self).__init__() + act_layer = get_act_layer(act_layer) + + self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None + + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + if fuse_add: + self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_add = None + if fuse_scale: + self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_scale = None + + self.gate = create_act_layer(gate_layer) + self.init_last_zero = init_last_zero + self.reset_parameters() + + def reset_parameters(self): + if self.conv_attn is not None: + nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') + if self.mlp_add is not None: + nn.init.zeros_(self.mlp_add.fc2.weight) + + def forward(self, x): + B, C, H, W = x.shape + + if self.conv_attn is not None: + attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) + attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) + context = x.reshape(B, C, H * W).unsqueeze(1) @ attn + context = context.view(B, C, 1, 1) + else: + context = x.mean(dim=(2, 3), keepdim=True) + + if self.mlp_scale is not None: + mlp_x = self.mlp_scale(context) + x = x * self.gate(mlp_x) + if self.mlp_add is not None: + mlp_x = self.mlp_add(context) + x = x + mlp_x + + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/halo_attn.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/halo_attn.py new file mode 100644 index 0000000000..034c66a85e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/halo_attn.py @@ -0,0 +1,185 @@ +""" Halo Self Attention + +Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + +@misc{2103.12731, +Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and + Jonathon Shlens}, +Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, +Year = {2021}, +} + +Status: +This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. +The attention mechanism works but it's slow as implemented. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Tuple, List + +import torch +from torch import nn +import torch.nn.functional as F + +from .weight_init import trunc_normal_ + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, height, width, dim) + rel_k: (2 * window - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + rel_size = rel_k.shape[0] + win_size = (rel_size + 1) // 2 + + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, rel_size) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, rel_size - W]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, rel_size) + x = x_pad[:, :W, win_size - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + """ + def __init__(self, block_size, win_size, dim_head, scale): + """ + Args: + block_size (int): block size + win_size (int): neighbourhood window size + dim_head (int): attention head dim + scale (float): scale factor (for init) + """ + super().__init__() + self.block_size = block_size + self.dim_head = dim_head + self.scale = scale + self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * self.scale) + self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * self.scale) + + def forward(self, q): + B, BB, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, BB, HW, -1) + return rel_logits + + +class HaloAttn(nn.Module): + """ Halo Attention + + Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + """ + def __init__( + self, dim, dim_out=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, qkv_bias=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.stride = stride + self.num_heads = num_heads + self.dim_head_qk = dim_head or dim_out // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.block_size = block_size + self.halo_size = halo_size + self.win_size = block_size + halo_size * 2 # neighbourhood window size + self.scale = self.dim_head_qk ** -0.5 + + # FIXME not clear if this stride behaviour is what the paper intended + # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving + # data in unfolded block form. I haven't wrapped my head around how that'd look. + self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.stride, bias=qkv_bias) + self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) + + self.pos_embed = PosEmbedRel( + block_size=block_size // self.stride, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.reset_parameters() + + def reset_parameters(self): + std = self.q.weight.shape[1] ** -0.5 # fan-in + trunc_normal_(self.q.weight, std=std) + trunc_normal_(self.kv.weight, std=std) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + assert H % self.block_size == 0 + assert W % self.block_size == 0 + num_h_blocks = H // self.block_size + num_w_blocks = W // self.block_size + num_blocks = num_h_blocks * num_w_blocks + bs_stride = self.block_size // self.stride + + q = self.q(x) + # unfold + q = q.reshape(-1, self.dim_head_qk, num_h_blocks, bs_stride, num_w_blocks, bs_stride).permute(0, 1, 3, 5, 2, 4) + # B, num_heads * dim_head * block_size ** 2, num_blocks + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) + # B * num_heads, num_blocks, block_size ** 2, dim_head + + kv = self.kv(x) + # generate overlapping windows for kv + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) + kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) + k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) + # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v + + attn = (q @ k.transpose(-1, -2)) * self.scale + attn = attn + self.pos_embed(q) # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks + # fold + out = out.reshape(-1, bs_stride, bs_stride, num_h_blocks, num_w_blocks) + out = out.permute(0, 3, 1, 4, 2).contiguous().view(B, self.dim_out_v, H // self.stride, W // self.stride) + # B, dim_out, H // stride, W // stride + return out + + +""" Two alternatives for overlapping windows. + +`.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() + + if self.stride_tricks: + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() + kv = kv.as_strided(( + B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), + stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) + else: + kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) + kv = kv.reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) +""" diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/helpers.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/helpers.py new file mode 100644 index 0000000000..cc54ca7f8a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/helpers.py @@ -0,0 +1,31 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/inplace_abn.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/inplace_abn.py new file mode 100644 index 0000000000..3aae7cf563 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/inplace_abn.py @@ -0,0 +1,87 @@ +import torch +from torch import nn as nn + +try: + from inplace_abn.functions import inplace_abn, inplace_abn_sync + has_iabn = True +except ImportError: + has_iabn = False + + def inplace_abn(x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): + raise ImportError( + "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") + + def inplace_abn_sync(**kwargs): + inplace_abn(**kwargs) + + +class InplaceAbn(nn.Module): + """Activated Batch Normalization + + This gathers a BatchNorm and an activation function in a single module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + act_layer : str or nn.Module type + Name or type of the activation functions, one of: `leaky_relu`, `elu` + act_param : float + Negative slope for the `leaky_relu` activation. + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, + act_layer="leaky_relu", act_param=0.01, drop_block=None): + super(InplaceAbn, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + if apply_act: + if isinstance(act_layer, str): + assert act_layer in ('leaky_relu', 'elu', 'identity', '') + self.act_name = act_layer if act_layer else 'identity' + else: + # convert act layer passed as type to string + if act_layer == nn.ELU: + self.act_name = 'elu' + elif act_layer == nn.LeakyReLU: + self.act_name = 'leaky_relu' + elif act_layer == nn.Identity: + self.act_name = 'identity' + else: + assert False, f'Invalid act layer {act_layer.__name__} for IABN' + else: + self.act_name = 'identity' + self.act_param = act_param + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + output = inplace_abn( + x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.act_name, self.act_param) + if isinstance(output, tuple): + output = output[0] + return output diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/lambda_layer.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/lambda_layer.py new file mode 100644 index 0000000000..eeb77e45f7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/lambda_layer.py @@ -0,0 +1,115 @@ +""" Lambda Layer + +Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + +@misc{2102.08602, +Author = {Irwan Bello}, +Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, +Year = {2021}, +} + +Status: +This impl is a WIP. Code snippets in the paper were used as reference but +good chance some details are missing/wrong. + +I've only implemented local lambda conv based pos embeddings. + +For a PyTorch impl that includes other embedding options checkout +https://github.com/lucidrains/lambda-networks + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import to_2tuple +from .weight_init import trunc_normal_ + + +def rel_pos_indices(size): + size = to_2tuple(size) + pos = torch.stack(torch.meshgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) + rel_pos = pos[:, None, :] - pos[:, :, None] + rel_pos[0] += size[0] - 1 + rel_pos[1] += size[1] - 1 + return rel_pos # 2, H * W, H * W + + +class LambdaLayer(nn.Module): + """Lambda Layer + + Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + + NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. + """ + def __init__( + self, + dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=7, qkv_bias=False): + super().__init__() + self.dim = dim + self.dim_out = dim_out or dim + self.dim_k = dim_head # query depth 'k' + self.num_heads = num_heads + assert self.dim_out % num_heads == 0, ' should be divided by num_heads' + self.dim_v = self.dim_out // num_heads # value depth 'v' + + self.qkv = nn.Conv2d( + dim, + num_heads * dim_head + dim_head + self.dim_v, + kernel_size=1, bias=qkv_bias) + self.norm_q = nn.BatchNorm2d(num_heads * dim_head) + self.norm_v = nn.BatchNorm2d(self.dim_v) + + if r is not None: + # local lambda convolution for pos + self.conv_lambda = nn.Conv3d(1, dim_head, (r, r, 1), padding=(r // 2, r // 2, 0)) + self.pos_emb = None + self.rel_pos_indices = None + else: + # relative pos embedding + assert feat_size is not None + feat_size = to_2tuple(feat_size) + rel_size = [2 * s - 1 for s in feat_size] + self.conv_lambda = None + self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_k)) + self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.dim ** -0.5) + if self.conv_lambda is not None: + trunc_normal_(self.conv_lambda.weight, std=self.dim_k ** -0.5) + if self.pos_emb is not None: + trunc_normal_(self.pos_emb, std=.02) + + def forward(self, x): + B, C, H, W = x.shape + M = H * W + qkv = self.qkv(x) + q, k, v = torch.split(qkv, [ + self.num_heads * self.dim_k, self.dim_k, self.dim_v], dim=1) + q = self.norm_q(q).reshape(B, self.num_heads, self.dim_k, M).transpose(-1, -2) # B, num_heads, M, K + v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V + k = F.softmax(k.reshape(B, self.dim_k, M), dim=-1) # B, K, M + + content_lam = k @ v # B, K, V + content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V + + if self.pos_emb is None: + position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K + position_lam = position_lam.reshape(B, 1, self.dim_k, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V + else: + # FIXME relative pos embedding path not fully verified + pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) + position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V + position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V + + out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W + out = self.pool(out) + return out diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/linear.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/linear.py new file mode 100644 index 0000000000..38fe3380b0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/linear.py @@ -0,0 +1,19 @@ +""" Linear layer (alternate definition) +""" +import torch +import torch.nn.functional as F +from torch import nn as nn + + +class Linear(nn.Linear): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting + weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. + """ + def forward(self, input: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting(): + bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None + return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) + else: + return F.linear(input, self.weight, self.bias) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/median_pool.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/median_pool.py new file mode 100644 index 0000000000..40bd71a7a3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/median_pool.py @@ -0,0 +1,49 @@ +""" Median Pool +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F +from .helpers import to_2tuple, to_4tuple + + +class MedianPool2d(nn.Module): + """ Median pool (usable as median filter when stride=1) module. + + Args: + kernel_size: size of pooling kernel, int or 2-tuple + stride: pool stride, int or 2-tuple + padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad + same: override padding and enforce same padding, boolean + """ + def __init__(self, kernel_size=3, stride=1, padding=0, same=False): + super(MedianPool2d, self).__init__() + self.k = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + self.padding = to_4tuple(padding) # convert to l, r, t, b + self.same = same + + def _padding(self, x): + if self.same: + ih, iw = x.size()[2:] + if ih % self.stride[0] == 0: + ph = max(self.k[0] - self.stride[0], 0) + else: + ph = max(self.k[0] - (ih % self.stride[0]), 0) + if iw % self.stride[1] == 0: + pw = max(self.k[1] - self.stride[1], 0) + else: + pw = max(self.k[1] - (iw % self.stride[1]), 0) + pl = pw // 2 + pr = pw - pl + pt = ph // 2 + pb = ph - pt + padding = (pl, pr, pt, pb) + else: + padding = self.padding + return padding + + def forward(self, x): + x = F.pad(x, self._padding(x), mode='reflect') + x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) + x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/mixed_conv2d.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/mixed_conv2d.py new file mode 100644 index 0000000000..fa0ce565c0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/mixed_conv2d.py @@ -0,0 +1,51 @@ +""" PyTorch Mixed Convolution + +Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn + +from .conv2d_same import create_conv2d_pad + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = in_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/mlp.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/mlp.py new file mode 100644 index 0000000000..05d076527c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/mlp.py @@ -0,0 +1,108 @@ +""" MLP module w/ dropout and configurable activation layer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class GluMlp(nn.Module): + """ MLP w/ GLU style gating + See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + assert hidden_features % 2 == 0 + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features // 2, out_features) + self.drop = nn.Dropout(drop) + + def init_weights(self): + # override init of fc1 w/ gate portion set to weight near zero, bias=1 + fc1_mid = self.fc1.bias.shape[0] // 2 + nn.init.ones_(self.fc1.bias[fc1_mid:]) + nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x, gates = x.chunk(2, dim=-1) + x = x * self.act(gates) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class GatedMlp(nn.Module): + """ MLP as used in gMLP + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, + gate_layer=None, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + if gate_layer is not None: + assert hidden_features % 2 == 0 + self.gate = gate_layer(hidden_features) + hidden_features = hidden_features // 2 # FIXME base reduction on gate property? + else: + self.gate = nn.Identity() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.gate(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class ConvMlp(nn.Module): + """ MLP using 1x1 convs that keeps spatial dims + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) + self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() + self.act = act_layer() + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.norm(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/non_local_attn.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/non_local_attn.py new file mode 100644 index 0000000000..a537d60e6e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/non_local_attn.py @@ -0,0 +1,143 @@ +""" Bilinear-Attention-Transform and Non-Local Attention + +Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` + - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html +Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification +""" +import torch +from torch import nn +from torch.nn import functional as F + +from .conv_bn_act import ConvBnAct +from .helpers import make_divisible + + +class NonLocalAttn(nn.Module): + """Spatial NL block for image classification. + + This was adapted from https://github.com/BA-Transform/BAT-Image-Classification + Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. + """ + + def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): + super(NonLocalAttn, self).__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.scale = in_channels ** -0.5 if use_scale else 1.0 + self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) + self.norm = nn.BatchNorm2d(in_channels) + self.reset_parameters() + + def forward(self, x): + shortcut = x + + t = self.t(x) + p = self.p(x) + g = self.g(x) + + B, C, H, W = t.size() + t = t.view(B, C, -1).permute(0, 2, 1) + p = p.view(B, C, -1) + g = g.view(B, C, -1).permute(0, 2, 1) + + att = torch.bmm(t, p) * self.scale + att = F.softmax(att, dim=2) + x = torch.bmm(att, g) + + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.z(x) + x = self.norm(x) + shortcut + + return x + + def reset_parameters(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + if len(list(m.parameters())) > 1: + nn.init.constant_(m.bias, 0.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + + +class BilinearAttnTransform(nn.Module): + + def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(BilinearAttnTransform, self).__init__() + + self.conv1 = ConvBnAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) + self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) + self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) + self.conv2 = ConvBnAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.block_size = block_size + self.groups = groups + self.in_channels = in_channels + + def resize_mat(self, x, t: int): + B, C, block_size, block_size1 = x.shape + assert block_size == block_size1 + if t <= 1: + return x + x = x.view(B * C, -1, 1, 1) + x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) + x = x.view(B * C, block_size, block_size, t, t) + x = torch.cat(torch.split(x, 1, dim=1), dim=3) + x = torch.cat(torch.split(x, 1, dim=2), dim=4) + x = x.view(B, C, block_size * t, block_size * t) + return x + + def forward(self, x): + assert x.shape[-1] % self.block_size == 0 and x.shape[-2] % self.block_size == 0 + B, C, H, W = x.shape + out = self.conv1(x) + rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) + cp = F.adaptive_max_pool2d(out, (1, self.block_size)) + p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + p = p / p.sum(dim=3, keepdim=True) + q = q / q.sum(dim=2, keepdim=True) + p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + p = p.view(B, C, self.block_size, self.block_size) + q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + q = q.view(B, C, self.block_size, self.block_size) + p = self.resize_mat(p, H // self.block_size) + q = self.resize_mat(q, W // self.block_size) + y = p.matmul(x) + y = y.matmul(q) + + y = self.conv2(y) + return y + + +class BatNonLocalAttn(nn.Module): + """ BAT + Adapted from: https://github.com/BA-Transform/BAT-Image-Classification + """ + + def __init__( + self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): + super().__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.conv1 = ConvBnAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) + self.conv2 = ConvBnAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.dropout = nn.Dropout2d(p=drop_rate) + + def forward(self, x): + xl = self.conv1(x) + y = self.ba(xl) + y = self.conv2(y) + y = self.dropout(y) + return y + x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/norm.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/norm.py new file mode 100644 index 0000000000..aace107b08 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/norm.py @@ -0,0 +1,24 @@ +""" Normalization layers and wrappers +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class GroupNorm(nn.GroupNorm): + def __init__(self, num_channels, num_groups, eps=1e-5, affine=True): + # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN + super().__init__(num_groups, num_channels, eps=eps, affine=affine) + + def forward(self, x): + return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + + +class LayerNorm2d(nn.LayerNorm): + """ LayerNorm for channels of '2D' spatial BCHW tensors """ + def __init__(self, num_channels): + super().__init__(num_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/norm_act.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/norm_act.py new file mode 100644 index 0000000000..02cabe8886 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/norm_act.py @@ -0,0 +1,85 @@ +""" Normalization + Activation Layers +""" +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .create_act import get_act_layer + + +class BatchNormAct2d(nn.BatchNorm2d): + """BatchNorm + Activation + + This module performs BatchNorm + Activation in a manner that will remain backwards + compatible with weights trained with separate bn, act. This is why we inherit from BN + instead of composing it as a .bn member. + """ + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None): + super(BatchNormAct2d, self).__init__( + num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def _forward_jit(self, x): + """ A cut & paste of the contents of the PyTorch BatchNorm2d forward function + """ + # exponential_average_factor is self.momentum set to + # (when it is available) only so that if gets updated + # in ONNX graph when this node is exported to ONNX. + if self.momentum is None: + exponential_average_factor = 0.0 + else: + exponential_average_factor = self.momentum + + if self.training and self.track_running_stats: + # TODO: if statement only here to tell the jit to skip emitting this when it is None + if self.num_batches_tracked is not None: + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + x = F.batch_norm( + x, self.running_mean, self.running_var, self.weight, self.bias, + self.training or not self.track_running_stats, + exponential_average_factor, self.eps) + return x + + @torch.jit.ignore + def _forward_python(self, x): + return super(BatchNormAct2d, self).forward(x) + + def forward(self, x): + # FIXME cannot call parent forward() and maintain jit.script compatibility? + if torch.jit.is_scripting(): + x = self._forward_jit(x) + else: + x = self._forward_python(x) + x = self.act(x) + return x + + +class GroupNormAct(nn.GroupNorm): + # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args + def __init__(self, num_channels, num_groups, eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None): + super(GroupNormAct, self).__init__(num_groups, num_channels, eps=eps, affine=affine) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + x = self.act(x) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/padding.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/padding.py new file mode 100644 index 0000000000..34afc37c6c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/padding.py @@ -0,0 +1,56 @@ +""" Padding Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from typing import List, Tuple + +import torch.nn.functional as F + + +# Calculate symmetric padding for a convolution +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + + +# Can SAME padding for given args be done statically? +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +# Dynamically pad input x with 'SAME' padding for conv with specified args +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/patch_embed.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/patch_embed.py new file mode 100644 index 0000000000..42997fb89f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/patch_embed.py @@ -0,0 +1,39 @@ +""" Image to Patch Embedding using Conv2d + +A convolution based approach to patchifying a 2D image w/ embedding projection. + +Based on the impl in https://github.com/google-research/vision_transformer + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from torch import nn as nn + +from .helpers import to_2tuple + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/pool2d_same.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/pool2d_same.py new file mode 100644 index 0000000000..4c2a1c4471 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/pool2d_same.py @@ -0,0 +1,73 @@ +""" AvgPool2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import List, Tuple, Optional + +from .helpers import to_2tuple +from .padding import pad_same, get_padding_value + + +def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + ceil_mode: bool = False, count_include_pad: bool = True): + # FIXME how to deal with count_include_pad vs not for external padding? + x = pad_same(x, kernel_size, stride) + return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + +class AvgPool2dSame(nn.AvgPool2d): + """ Tensorflow like 'SAME' wrapper for 2D average pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride) + return F.avg_pool2d( + x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) + + +def max_pool2d_same( + x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + dilation: List[int] = (1, 1), ceil_mode: bool = False): + x = pad_same(x, kernel_size, stride, value=-float('inf')) + return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) + + +class MaxPool2dSame(nn.MaxPool2d): + """ Tensorflow like 'SAME' wrapper for 2D max pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) + return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) + + +def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): + stride = stride or kernel_size + padding = kwargs.pop('padding', '') + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) + if is_dynamic: + if pool_type == 'avg': + return AvgPool2dSame(kernel_size, stride=stride, **kwargs) + elif pool_type == 'max': + return MaxPool2dSame(kernel_size, stride=stride, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' + else: + if pool_type == 'avg': + return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + elif pool_type == 'max': + return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/selective_kernel.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/selective_kernel.py new file mode 100644 index 0000000000..f28b8d2e9a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/selective_kernel.py @@ -0,0 +1,119 @@ +""" Selective Kernel Convolution/Attention + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn + +from .conv_bn_act import ConvBnAct +from .helpers import make_divisible + + +def _kernel_valid(k): + if isinstance(k, (list, tuple)): + for ki in k: + return _kernel_valid(ki) + assert k >= 3 and k % 2 + + +class SelectiveKernelAttn(nn.Module): + def __init__(self, channels, num_paths=2, attn_channels=32, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + """ Selective Kernel Attention Module + + Selective Kernel attention mechanism factored out into its own module. + + """ + super(SelectiveKernelAttn, self).__init__() + self.num_paths = num_paths + self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) + self.bn = norm_layer(attn_channels) + self.act = act_layer(inplace=True) + self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) + + def forward(self, x): + assert x.shape[1] == self.num_paths + x = x.sum(1).mean((2, 3), keepdim=True) + x = self.fc_reduce(x) + x = self.bn(x) + x = self.act(x) + x = self.fc_select(x) + B, C, H, W = x.shape + x = x.view(B, self.num_paths, C // self.num_paths, H, W) + x = torch.softmax(x, dim=1) + return x + + +class SelectiveKernel(nn.Module): + + def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, + rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, + drop_block=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None): + """ Selective Kernel Convolution Module + + As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. + + Largest change is the input split, which divides the input channels across each convolution path, this can + be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps + the parameter count from ballooning when the convolutions themselves don't have groups, but still provides + a noteworthy increase in performance over similar param count models without this attention layer. -Ross W + + Args: + in_channels (int): module input (feature) channel count + out_channels (int): module output (feature) channel count + kernel_size (int, list): kernel size for each convolution branch + stride (int): stride for convolutions + dilation (int): dilation for module as a whole, impacts dilation of each branch + groups (int): number of groups for each branch + rd_ratio (int, float): reduction factor for attention features + keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations + split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, + can be viewed as grouping by path, output expands to module out_channels count + drop_block (nn.Module): drop block module + act_layer (nn.Module): activation layer to use + norm_layer (nn.Module): batchnorm/norm layer to use + """ + super(SelectiveKernel, self).__init__() + out_channels = out_channels or in_channels + kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation + _kernel_valid(kernel_size) + if not isinstance(kernel_size, list): + kernel_size = [kernel_size] * 2 + if keep_3x3: + dilation = [dilation * (k - 1) // 2 for k in kernel_size] + kernel_size = [3] * len(kernel_size) + else: + dilation = [dilation] * len(kernel_size) + self.num_paths = len(kernel_size) + self.in_channels = in_channels + self.out_channels = out_channels + self.split_input = split_input + if self.split_input: + assert in_channels % self.num_paths == 0 + in_channels = in_channels // self.num_paths + groups = min(out_channels, groups) + + conv_kwargs = dict( + stride=stride, groups=groups, drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, + aa_layer=aa_layer) + self.paths = nn.ModuleList([ + ConvBnAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) + for k, d in zip(kernel_size, dilation)]) + + attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) + self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) + self.drop_block = drop_block + + def forward(self, x): + if self.split_input: + x_split = torch.split(x, self.in_channels // self.num_paths, 1) + x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] + else: + x_paths = [op(x) for op in self.paths] + x = torch.stack(x_paths, dim=1) + x_attn = self.attn(x) + x = x * x_attn + x = torch.sum(x, dim=1) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/separable_conv.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/separable_conv.py new file mode 100644 index 0000000000..1ddcb4e624 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/separable_conv.py @@ -0,0 +1,73 @@ +""" Depthwise Separable Conv Modules + +Basic DWS convs. Other variations of DWS exist with batch norm or activations between the +DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import convert_norm_act + + +class SeparableConvBnAct(nn.Module): + """ Separable Conv w/ trailing Norm and Activation + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, + apply_act=True, drop_block=None): + super(SeparableConvBnAct, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + norm_act_layer = convert_norm_act(norm_layer, act_layer) + self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + if self.bn is not None: + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + """ Separable Conv + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1): + super(SeparableConv2d, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/space_to_depth.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/space_to_depth.py new file mode 100644 index 0000000000..a7e8e0b2a4 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/space_to_depth.py @@ -0,0 +1,53 @@ +import torch +import torch.nn as nn + + +class SpaceToDepth(nn.Module): + def __init__(self, block_size=4): + super().__init__() + assert block_size == 4 + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) + return x + + +@torch.jit.script +class SpaceToDepthJit(object): + def __call__(self, x: torch.Tensor): + # assuming hard-coded that block_size==4 for acceleration + N, C, H, W = x.size() + x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) + return x + + +class SpaceToDepthModule(nn.Module): + def __init__(self, no_jit=False): + super().__init__() + if not no_jit: + self.op = SpaceToDepthJit() + else: + self.op = SpaceToDepth() + + def forward(self, x): + return self.op(x) + + +class DepthToSpace(nn.Module): + + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) + x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) + return x diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/split_attn.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/split_attn.py new file mode 100644 index 0000000000..dde601befa --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/split_attn.py @@ -0,0 +1,85 @@ +""" Split Attention Conv2d (for ResNeSt Models) + +Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt + +Modified for torchscript compat, performance, and consistency with timm by Ross Wightman +""" +import torch +import torch.nn.functional as F +from torch import nn + +from .helpers import make_divisible + + +class RadixSoftmax(nn.Module): + def __init__(self, radix, cardinality): + super(RadixSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttn(nn.Module): + """Split-Attention (aka Splat) + """ + def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, + dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + act_layer=nn.ReLU, norm_layer=None, drop_block=None, **kwargs): + super(SplitAttn, self).__init__() + out_channels = out_channels or in_channels + self.radix = radix + self.drop_block = drop_block + mid_chs = out_channels * radix + if rd_channels is None: + attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) + else: + attn_chs = rd_channels * radix + + padding = kernel_size // 2 if padding is None else padding + self.conv = nn.Conv2d( + in_channels, mid_chs, kernel_size, stride, padding, dilation, + groups=groups * radix, bias=bias, **kwargs) + self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() + self.act0 = act_layer(inplace=True) + self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) + self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() + self.act1 = act_layer(inplace=True) + self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) + self.rsoftmax = RadixSoftmax(radix, groups) + + def forward(self, x): + x = self.conv(x) + x = self.bn0(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act0(x) + + B, RC, H, W = x.shape + if self.radix > 1: + x = x.reshape((B, self.radix, RC // self.radix, H, W)) + x_gap = x.sum(dim=1) + else: + x_gap = x + x_gap = x_gap.mean((2, 3), keepdim=True) + x_gap = self.fc1(x_gap) + x_gap = self.bn1(x_gap) + x_gap = self.act1(x_gap) + x_attn = self.fc2(x_gap) + + x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) + if self.radix > 1: + out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) + else: + out = x * x_attn + return out.contiguous() diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/split_batchnorm.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/split_batchnorm.py new file mode 100644 index 0000000000..830781b335 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/split_batchnorm.py @@ -0,0 +1,75 @@ +""" Split BatchNorm + +A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through +a separate BN layer. The first split is passed through the parent BN layers with weight/bias +keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' +namespace. + +This allows easily removing the auxiliary BN layers after training to efficiently +achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, +'Disentangled Learning via An Auxiliary BN' + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn + + +class SplitBatchNorm2d(torch.nn.BatchNorm2d): + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, num_splits=2): + super().__init__(num_features, eps, momentum, affine, track_running_stats) + assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' + self.num_splits = num_splits + self.aux_bn = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) + + def forward(self, input: torch.Tensor): + if self.training: # aux BN only relevant while training + split_size = input.shape[0] // self.num_splits + assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" + split_input = input.split(split_size) + x = [super().forward(split_input[0])] + for i, a in enumerate(self.aux_bn): + x.append(a(split_input[i + 1])) + return torch.cat(x, dim=0) + else: + return super().forward(input) + + +def convert_splitbn_model(module, num_splits=2): + """ + Recursively traverse module and its children to replace all instances of + ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. + Args: + module (torch.nn.Module): input module + num_splits: number of separate batchnorm layers to split input across + Example:: + >>> # model is an instance of torch.nn.Module + >>> model = timm.models.convert_splitbn_model(model, num_splits=2) + """ + mod = module + if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): + return module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + mod = SplitBatchNorm2d( + module.num_features, module.eps, module.momentum, module.affine, + module.track_running_stats, num_splits=num_splits) + mod.running_mean = module.running_mean + mod.running_var = module.running_var + mod.num_batches_tracked = module.num_batches_tracked + if module.affine: + mod.weight.data = module.weight.data.clone().detach() + mod.bias.data = module.bias.data.clone().detach() + for aux in mod.aux_bn: + aux.running_mean = module.running_mean.clone() + aux.running_var = module.running_var.clone() + aux.num_batches_tracked = module.num_batches_tracked.clone() + if module.affine: + aux.weight.data = module.weight.data.clone().detach() + aux.bias.data = module.bias.data.clone().detach() + for name, child in module.named_children(): + mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) + del module + return mod diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/squeeze_excite.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/squeeze_excite.py new file mode 100644 index 0000000000..e5da29ef16 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/squeeze_excite.py @@ -0,0 +1,74 @@ +""" Squeeze-and-Excitation Channel Attention + +An SE implementation originally based on PyTorch SE-Net impl. +Has since evolved with additional functionality / configuration. + +Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + +Also included is Effective Squeeze-Excitation (ESE). +Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class SEModule(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): + super(SEModule, self).__init__() + self.add_maxpool = add_maxpool + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=True) + self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc1(x_se) + x_se = self.act(self.bn(x_se)) + x_se = self.fc2(x_se) + return x * self.gate(x_se) + + +SqueezeExcite = SEModule # alias + + +class EffectiveSEModule(nn.Module): + """ 'Effective Squeeze-Excitation + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + """ + def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): + super(EffectiveSEModule, self).__init__() + self.add_maxpool = add_maxpool + self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.gate(x_se) + + +EffectiveSqueezeExcite = EffectiveSEModule # alias diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/std_conv.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/std_conv.py new file mode 100644 index 0000000000..d896ba5c2f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/std_conv.py @@ -0,0 +1,133 @@ +""" Convolution with Weight Standardization (StdConv and ScaledStdConv) + +StdConv: +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +Code: https://github.com/joe-siyuan-qiao/WeightStandardization + +ScaledStdConv: +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Hacked together by / copyright Ross Wightman, 2021. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .padding import get_padding, get_padding_value, pad_same + + +class StdConv2d(nn.Conv2d): + """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=False, eps=1e-6): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class StdConv2dSame(nn.Conv2d): + """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=False, eps=1e-6): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class ScaledStdConv2d(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization. + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +class ScaledStdConv2dSame(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/test_time_pool.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/test_time_pool.py new file mode 100644 index 0000000000..98c0bf53a7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/test_time_pool.py @@ -0,0 +1,52 @@ +""" Test Time Pooling (Average-Max Pool) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +from torch import nn +import torch.nn.functional as F + +from .adaptive_avgmax_pool import adaptive_avgmax_pool2d + + +_logger = logging.getLogger(__name__) + + +class TestTimePoolHead(nn.Module): + def __init__(self, base, original_pool=7): + super(TestTimePoolHead, self).__init__() + self.base = base + self.original_pool = original_pool + base_fc = self.base.get_classifier() + if isinstance(base_fc, nn.Conv2d): + self.fc = base_fc + else: + self.fc = nn.Conv2d( + self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) + self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) + self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) + self.base.reset_classifier(0) # delete original fc layer + + def forward(self, x): + x = self.base.forward_features(x) + x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) + x = self.fc(x) + x = adaptive_avgmax_pool2d(x, 1) + return x.view(x.size(0), -1) + + +def apply_test_time_pool(model, config, use_test_size=True): + test_time_pool = False + if not hasattr(model, 'default_cfg') or not model.default_cfg: + return model, False + if use_test_size and 'test_input_size' in model.default_cfg: + df_input_size = model.default_cfg['test_input_size'] + else: + df_input_size = model.default_cfg['input_size'] + if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: + _logger.info('Target input size %s > pretrained default %s, using test time pooling' % + (str(config['input_size'][-2:]), str(df_input_size[-2:]))) + model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) + test_time_pool = True + return model, test_time_pool diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/weight_init.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/weight_init.py new file mode 100644 index 0000000000..305a2fd067 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/layers/weight_init.py @@ -0,0 +1,89 @@ +import torch +import math +import warnings + +from torch.nn.init import _calculate_fan_in_and_fan_out + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == 'fan_in': + denom = fan_in + elif mode == 'fan_out': + denom = fan_out + elif mode == 'fan_avg': + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) + elif distribution == "normal": + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/levit.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/levit.py new file mode 100644 index 0000000000..9987e4ba98 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/levit.py @@ -0,0 +1,563 @@ +""" LeViT + +Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference` + - https://arxiv.org/abs/2104.01136 + +@article{graham2021levit, + title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference}, + author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze}, + journal={arXiv preprint arXiv:22104.01136}, + year={2021} +} + +Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow. + +This version combines both conv/linear models and fixes torchscript compatibility. + +Modifications by/coyright Copyright 2021 Ross Wightman +""" + +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +# Modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# Copyright 2020 Ross Wightman, Apache-2.0 License +import itertools +from copy import deepcopy +from functools import partial +from typing import Dict + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import to_ntuple, get_act_layer +from .vision_transformer import trunc_normal_ +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.0.c', 'classifier': ('head.l', 'head_dist.l'), + **kwargs + } + + +default_cfgs = dict( + levit_128s=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth' + ), + levit_128=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth' + ), + levit_192=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth' + ), + levit_256=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth' + ), + levit_384=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth' + ), +) + +model_cfgs = dict( + levit_128s=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)), + levit_128=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)), + levit_192=dict( + embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)), + levit_256=dict( + embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)), + levit_384=dict( + embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)), +) + +__all__ = ['Levit'] + + +@register_model +def levit_128s(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128s', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_128(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_192(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_192', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_256(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_256', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_384(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_384', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +class ConvNorm(nn.Sequential): + def __init__( + self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1, resolution=-10000): + super().__init__() + self.add_module('c', nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)) + bn = nn.BatchNorm2d(b) + nn.init.constant_(bn.weight, bn_weight_init) + nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Conv2d( + w.size(1), w.size(0), w.shape[2:], stride=self.c.stride, + padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class LinearNorm(nn.Sequential): + def __init__(self, a, b, bn_weight_init=1, resolution=-100000): + super().__init__() + self.add_module('c', nn.Linear(a, b, bias=False)) + bn = nn.BatchNorm1d(b) + nn.init.constant_(bn.weight, bn_weight_init) + nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + l, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[:, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + def forward(self, x): + x = self.c(x) + return self.bn(x.flatten(0, 1)).reshape_as(x) + + +class NormLinear(nn.Sequential): + def __init__(self, a, b, bias=True, std=0.02): + super().__init__() + self.add_module('bn', nn.BatchNorm1d(a)) + l = nn.Linear(a, b, bias=bias) + trunc_normal_(l.weight, std=std) + if bias: + nn.init.constant_(l.bias, 0) + self.add_module('l', l) + + @torch.no_grad() + def fuse(self): + bn, l = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[None, :] + if l.bias is None: + b = b @ self.l.weight.T + else: + b = (l.weight @ b[:, None]).view(-1) + self.l.bias + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +def stem_b16(in_chs, out_chs, activation, resolution=224): + return nn.Sequential( + ConvNorm(in_chs, out_chs // 8, 3, 2, 1, resolution=resolution), + activation(), + ConvNorm(out_chs // 8, out_chs // 4, 3, 2, 1, resolution=resolution // 2), + activation(), + ConvNorm(out_chs // 4, out_chs // 2, 3, 2, 1, resolution=resolution // 4), + activation(), + ConvNorm(out_chs // 2, out_chs, 3, 2, 1, resolution=resolution // 8)) + + +class Residual(nn.Module): + def __init__(self, m, drop): + super().__init__() + self.m = m + self.drop = drop + + def forward(self, x): + if self.training and self.drop > 0: + return x + self.m(x) * torch.rand( + x.size(0), 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() + else: + return x + self.m(x) + + +class Subsample(nn.Module): + def __init__(self, stride, resolution): + super().__init__() + self.stride = stride + self.resolution = resolution + + def forward(self, x): + B, N, C = x.shape + x = x.view(B, self.resolution, self.resolution, C)[:, ::self.stride, ::self.stride] + return x.reshape(B, -1, C) + + +class Attention(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, dim, key_dim, num_heads=8, attn_ratio=4, act_layer=None, resolution=14, use_conv=False): + super().__init__() + + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + self.use_conv = use_conv + ln_layer = ConvNorm if self.use_conv else LinearNorm + h = self.dh + nh_kd * 2 + self.qkv = ln_layer(dim, h, resolution=resolution) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.dh, dim, bn_weight_init=0, resolution=resolution)) + + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N)) + self.ab = {} + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): # x (B,C,H,W) + if self.use_conv: + B, C, H, W = x.shape + q, k, v = self.qkv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.d], dim=2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) + else: + B, N, C = x.shape + qkv = self.qkv(x) + q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class AttentionSubsample(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=2, + act_layer=None, stride=2, resolution=14, resolution_=7, use_conv=False): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = self.d * self.num_heads + self.attn_ratio = attn_ratio + self.resolution_ = resolution_ + self.resolution_2 = resolution_ ** 2 + self.use_conv = use_conv + if self.use_conv: + ln_layer = ConvNorm + sub_layer = partial(nn.AvgPool2d, kernel_size=1, padding=0) + else: + ln_layer = LinearNorm + sub_layer = partial(Subsample, resolution=resolution) + + h = self.dh + nh_kd + self.kv = ln_layer(in_dim, h, resolution=resolution) + self.q = nn.Sequential( + sub_layer(stride=stride), + ln_layer(in_dim, nh_kd, resolution=resolution_)) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.dh, out_dim, resolution=resolution_)) + + self.stride = stride + self.resolution = resolution + points = list(itertools.product(range(resolution), range(resolution))) + points_ = list(itertools.product(range(resolution_), range(resolution_))) + N = len(points) + N_ = len(points_) + attention_offsets = {} + idxs = [] + for p1 in points_: + for p2 in points: + size = 1 + offset = ( + abs(p1[0] * stride - p2[0] + (size - 1) / 2), + abs(p1[1] * stride - p2[1] + (size - 1) / 2)) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N_, N)) + self.ab = {} # per-device attention_biases cache + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): + if self.use_conv: + B, C, H, W = x.shape + k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.d], dim=2) + q = self.q(x).view(B, self.num_heads, self.key_dim, self.resolution_2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).reshape(B, -1, self.resolution_, self.resolution_) + else: + B, N, C = x.shape + k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.d], dim=3) + k = k.permute(0, 2, 1, 3) # BHNC + v = v.permute(0, 2, 1, 3) # BHNC + q = self.q(x).view(B, self.resolution_2, self.num_heads, self.key_dim).permute(0, 2, 1, 3) + + attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, -1, self.dh) + x = self.proj(x) + return x + + +class Levit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + + NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems + w/ train scripts that don't take tuple outputs, + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=(192,), + key_dim=64, + depth=(12,), + num_heads=(3,), + attn_ratio=2, + mlp_ratio=2, + hybrid_backbone=None, + down_ops=None, + act_layer='hard_swish', + attn_act_layer='hard_swish', + distillation=True, + use_conv=False, + drop_rate=0., + drop_path_rate=0.): + super().__init__() + act_layer = get_act_layer(act_layer) + attn_act_layer = get_act_layer(attn_act_layer) + if isinstance(img_size, tuple): + # FIXME origin impl passes single img/res dim through whole hierarchy, + # not sure this model will be used enough to spend time fixing it. + assert img_size[0] == img_size[1] + img_size = img_size[0] + self.num_classes = num_classes + self.num_features = embed_dim[-1] + self.embed_dim = embed_dim + N = len(embed_dim) + assert len(depth) == len(num_heads) == N + key_dim = to_ntuple(N)(key_dim) + attn_ratio = to_ntuple(N)(attn_ratio) + mlp_ratio = to_ntuple(N)(mlp_ratio) + down_ops = down_ops or ( + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + ('Subsample', key_dim[0], embed_dim[0] // key_dim[0], 4, 2, 2), + ('Subsample', key_dim[0], embed_dim[1] // key_dim[1], 4, 2, 2), + ('',) + ) + self.distillation = distillation + self.use_conv = use_conv + ln_layer = ConvNorm if self.use_conv else LinearNorm + + self.patch_embed = hybrid_backbone or stem_b16(in_chans, embed_dim[0], activation=act_layer) + + self.blocks = [] + resolution = img_size // patch_size + for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate( + zip(embed_dim, key_dim, depth, num_heads, attn_ratio, mlp_ratio, down_ops)): + for _ in range(dpth): + self.blocks.append( + Residual( + Attention( + ed, kd, nh, attn_ratio=ar, act_layer=attn_act_layer, + resolution=resolution, use_conv=use_conv), + drop_path_rate)) + if mr > 0: + h = int(ed * mr) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(ed, h, resolution=resolution), + act_layer(), + ln_layer(h, ed, bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + if do[0] == 'Subsample': + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + resolution_ = (resolution - 1) // do[5] + 1 + self.blocks.append( + AttentionSubsample( + *embed_dim[i:i + 2], key_dim=do[1], num_heads=do[2], + attn_ratio=do[3], act_layer=attn_act_layer, stride=do[5], + resolution=resolution, resolution_=resolution_, use_conv=use_conv)) + resolution = resolution_ + if do[4] > 0: # mlp_ratio + h = int(embed_dim[i + 1] * do[4]) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(embed_dim[i + 1], h, resolution=resolution), + act_layer(), + ln_layer(h, embed_dim[i + 1], bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + self.blocks = nn.Sequential(*self.blocks) + + # Classifier head + self.head = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distillation: + self.head_dist = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def no_weight_decay(self): + return {x for x in self.state_dict().keys() if 'attention_biases' in x} + + def get_classifier(self): + if self.head_dist is None: + return self.head + else: + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool='', distillation=None): + self.num_classes = num_classes + self.head = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + if distillation is not None: + self.distillation = distillation + if self.distillation: + self.head_dist = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + else: + self.head_dist = None + + def forward_features(self, x): + x = self.patch_embed(x) + if not self.use_conv: + x = x.flatten(2).transpose(1, 2) + x = self.blocks(x) + x = x.mean((-2, -1)) if self.use_conv else x.mean(1) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x), self.head_dist(x) + if self.training and not torch.jit.is_scripting(): + return x, x_dist + else: + # during inference, return the average of both classifier predictions + return (x + x_dist) / 2 + else: + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + D = model.state_dict() + for k in state_dict.keys(): + if k in D and D[k].ndim == 4 and state_dict[k].ndim == 2: + state_dict[k] = state_dict[k][:, :, None, None] + return state_dict + + +def create_levit(variant, pretrained=False, default_cfg=None, fuse=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model_cfg = dict(**model_cfgs[variant], **kwargs) + model = build_model_with_cfg( + Levit, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **model_cfg) + #if fuse: + # utils.replace_batchnorm(model) + return model + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/mlp_mixer.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/mlp_mixer.py new file mode 100644 index 0000000000..f128b9c916 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/mlp_mixer.py @@ -0,0 +1,625 @@ +""" MLP-Mixer, ResMLP, and gMLP in PyTorch + +This impl originally based on MLP-Mixer paper. + +Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py + +Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + +@article{tolstikhin2021, + title={MLP-Mixer: An all-MLP Architecture for Vision}, + author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, + Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, + journal={arXiv preprint arXiv:2105.01601}, + year={2021} +} + +Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP + +Code: https://github.com/facebookresearch/deit +Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 +@misc{touvron2021resmlp, + title={ResMLP: Feedforward networks for image classification with data-efficient training}, + author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and + Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, + year={2021}, + eprint={2105.03404}, +} + +Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 +@misc{liu2021pay, + title={Pay Attention to MLPs}, + author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, + year={2021}, + eprint={2105.08050}, +} + +A thank you to paper authors for releasing code and weights. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg, named_apply +from .layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + mixer_s32_224=_cfg(), + mixer_s16_224=_cfg(), + mixer_b32_224=_cfg(), + mixer_b16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', + ), + mixer_b16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', + num_classes=21843 + ), + mixer_l32_224=_cfg(), + mixer_l16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', + ), + mixer_l16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', + num_classes=21843 + ), + + # Mixer ImageNet-21K-P pretraining + mixer_b16_224_miil_in21k=_cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil_in21k.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + mixer_b16_224_miil=_cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', + ), + + gmixer_12_224=_cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + gmixer_24_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', + #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_big_24_224_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + gmlp_ti16_224=_cfg(), + gmlp_s16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', + ), + gmlp_b16_224=_cfg(), +) + + +class MixerBlock(nn.Module): + """ Residual Block w/ token mixing and channel MLPs + Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + def __init__( + self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] + self.norm1 = norm_layer(dim) + self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class Affine(nn.Module): + def __init__(self, dim): + super().__init__() + self.alpha = nn.Parameter(torch.ones((1, 1, dim))) + self.beta = nn.Parameter(torch.zeros((1, 1, dim))) + + def forward(self, x): + return torch.addcmul(self.beta, self.alpha, x) + + +class ResBlock(nn.Module): + """ Residual MLP block w/ LayerScale and Affine 'norm' + + Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, + act_layer=nn.GELU, init_values=1e-4, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm1 = norm_layer(dim) + self.linear_tokens = nn.Linear(seq_len, seq_len) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) + self.ls1 = nn.Parameter(init_values * torch.ones(dim)) + self.ls2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) + return x + + +class SpatialGatingUnit(nn.Module): + """ Spatial Gating Unit + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): + super().__init__() + gate_dim = dim // 2 + self.norm = norm_layer(gate_dim) + self.proj = nn.Linear(seq_len, seq_len) + + def init_weights(self): + # special init for the projection gate, called as override by base model init + nn.init.normal_(self.proj.weight, std=1e-6) + nn.init.ones_(self.proj.bias) + + def forward(self, x): + u, v = x.chunk(2, dim=-1) + v = self.norm(v) + v = self.proj(v.transpose(-1, -2)) + return u * v.transpose(-1, -2) + + +class SpatialGatingBlock(nn.Module): + """ Residual Block w/ Spatial Gating + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm = norm_layer(dim) + sgu = partial(SpatialGatingUnit, seq_len=seq_len) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.mlp_channels(self.norm(x))) + return x + + +class MlpMixer(nn.Module): + + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + patch_size=16, + num_blocks=8, + embed_dim=512, + mlp_ratio=(0.5, 4.0), + block_layer=MixerBlock, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + ): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.stem = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None) + # FIXME drop_path (stochastic depth scaling rule or all the same?) + self.blocks = nn.Sequential(*[ + block_layer( + embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, + act_layer=act_layer, drop=drop_rate, drop_path=drop_path_rate) + for _ in range(num_blocks)]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + x = self.norm(x) + x = x.mean(dim=1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + """ Mixer weight initialization (trying to match Flax defaults) + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # like MLP init in vit (my original init) + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + # NOTE if a parent module contains init_weights method, it can override the init of the + # child modules as this will be called in depth-first order. + module.init_weights() + + +def checkpoint_filter_fn(state_dict, model): + """ Remap checkpoints if needed """ + if 'patch_embed.proj.weight' in state_dict: + # Remap FB ResMlp models -> timm + out_dict = {} + for k, v in state_dict.items(): + k = k.replace('patch_embed.', 'stem.') + k = k.replace('attn.', 'linear_tokens.') + k = k.replace('mlp.', 'mlp_channels.') + k = k.replace('gamma_', 'ls') + if k.endswith('.alpha') or k.endswith('.beta'): + v = v.reshape(1, 1, -1) + out_dict[k] = v + return out_dict + return state_dict + + +def _create_mixer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for MLP-Mixer models.') + + model = build_model_with_cfg( + MlpMixer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def mixer_s32_224(pretrained=False, **kwargs): + """ Mixer-S/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_s16_224(pretrained=False, **kwargs): + """ Mixer-S/16 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b32_224(pretrained=False, **kwargs): + """ Mixer-B/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l32_224(pretrained=False, **kwargs): + """ Mixer-L/32 224x224. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224_in21k(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_12_224(pretrained=False, **kwargs): + """ Glu-Mixer-12 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_24_224(pretrained=False, **kwargs): + """ Glu-Mixer-24 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_distilled_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_distilled_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224_in22ft1k(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_ti16_224(pretrained=False, **kwargs): + """ gMLP-Tiny + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_s16_224(pretrained=False, **kwargs): + """ gMLP-Small + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_b16_224(pretrained=False, **kwargs): + """ gMLP-Base + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/mobilenetv3.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/mobilenetv3.py new file mode 100644 index 0000000000..f810eb8281 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/mobilenetv3.py @@ -0,0 +1,562 @@ + +""" MobileNet V3 + +A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. + +Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import SelectAdaptivePool2d, Linear, create_conv2d, get_act_fn, hard_sigmoid +from .registry import register_model + +__all__ = ['MobileNetV3', 'MobileNetV3Features'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mobilenetv3_large_075': _cfg(url=''), + 'mobilenetv3_large_100': _cfg( + interpolation='bicubic', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'), + 'mobilenetv3_large_100_miil': _cfg( + interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_1k_miil_78_0.pth'), + 'mobilenetv3_large_100_miil_in21k': _cfg( + interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_in21k_miil.pth', num_classes=11221), + 'mobilenetv3_small_075': _cfg(url=''), + 'mobilenetv3_small_100': _cfg(url=''), + + 'mobilenetv3_rw': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + interpolation='bicubic'), + + 'tf_mobilenetv3_large_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_100': _cfg( + url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'fbnetv3_b': _cfg(), + 'fbnetv3_d': _cfg(), + 'fbnetv3_g': _cfg(), +} + + +class MobileNetV3(nn.Module): + """ MobiletNet-V3 + + Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific + 'efficient head', where global pooling is done before the head convolution without a final batch-norm + layer before the classifier. + + Paper: https://arxiv.org/abs/1905.02244 + """ + + def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, + pad_type='', act_layer=None, norm_layer=None, se_layer=None, se_from_exp=True, + round_chs_fn=round_channels, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(MobileNetV3, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + + # Stem + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + num_pooled_chs = head_chs * self.global_pool.feat_mult() + self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.global_pool, self.conv_head, self.act2]) + layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class MobileNetV3Features(nn.Module): + """ MobileNetV3 Feature Extractor + + A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=16, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=True, + act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(MobileNetV3Features, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, + drop_path_rate=drop_path_rate, feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_mnv3(variant, pretrained=False, **kwargs): + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + head_bias=False, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + if 'small' in variant: + num_features = 1024 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16'], + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], + # stage 2, 28x28 in + ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], + # stage 3, 14x14 in + ['ir_r2_k3_s1_e3_c48'], + # stage 4, 14x14in + ['ir_r3_k3_s2_e6_c96'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu + # stage 2, 28x28 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish + # stage 3, 14x14 in + ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish + # stage 4, 14x14in + ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], # hard-swish + ] + else: + num_features = 1280 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k3_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112'], + # stage 5, 14x14in + ['ir_r3_k3_s2_e6_c160'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetv3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNetV3 + Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining` + - https://arxiv.org/abs/2006.02049 + FIXME untested, this is a preliminary impl of some FBNet-V3 variants. + """ + vl = variant.split('_')[-1] + if vl in ('a', 'b'): + stem_size = 16 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], + ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], + ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], + ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], + ['cn_r1_k1_s1_c1344'], + ] + elif vl == 'd': + stem_size = 24 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], + ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], + ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], + ['cn_r1_k1_s1_c1440'], + ] + elif vl == 'g': + stem_size = 32 + arch_def = [ + ['ds_r3_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], + ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], + ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], + ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], + ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], + ['cn_r1_k1_s1_c1728'], + ] + else: + raise NotImplemented + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) + act_layer = resolve_act_layer(kwargs, 'hard_swish') + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1984, + head_bias=False, + stem_size=stem_size, + round_chs_fn=round_chs_fn, + se_from_exp=False, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil(pretrained=False, **kwargs): + """ MobileNet V3 + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil_in21k(pretrained=False, **kwargs): + """ MobileNet V3, 21k pretraining + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil_in21k', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_rw(pretrained=False, **kwargs): + """ MobileNet V3 """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_b(pretrained=False, **kwargs): + """ FBNetV3-B """ + model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_d(pretrained=False, **kwargs): + """ FBNetV3-D """ + model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_g(pretrained=False, **kwargs): + """ FBNetV3-G """ + model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/nasnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/nasnet.py new file mode 100644 index 0000000000..2afe82c3f3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/nasnet.py @@ -0,0 +1,567 @@ +""" NasNet-A (Large) + nasnetalarge implementation grabbed from Cadene's pretrained models + https://github.com/Cadene/pretrained-models.pytorch +""" +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['NASNetALarge'] + +default_cfgs = { + 'nasnetalarge': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=0) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) + self.act_2 = nn.ReLU(inplace=True) + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class CellStem0(nn.Module): + def __init__(self, stem_size, num_channels=42, pad_type=''): + super(CellStem0, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x): + x1 = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x1) + x_comb_iter_0_right = self.comb_iter_0_right(x) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x1) + x_comb_iter_1_right = self.comb_iter_1_right(x) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x1) + x_comb_iter_2_right = self.comb_iter_2_right(x) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x1) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem1(nn.Module): + + def __init__(self, stem_size, num_channels, pad_type=''): + super(CellStem1, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x_conv0, x_stem_0): + x_left = self.conv_1x1(x_stem_0) + + x_relu = self.act(x_conv0) + # path 1 + x_path1 = self.path_1(x_relu) + # path 2 + x_path2 = self.path_2(x_relu) + # final path + x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_right) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_left) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_left) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class FirstCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(FirstCell, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_relu = self.act(x_prev) + x_path1 = self.path_1(x_relu) + x_path2 = self.path_2(x_relu) + x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NormalCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(NormalCell, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell0(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell0, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell1(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell1, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NASNetALarge(nn.Module): + """NASNetALarge (6 @ 4032) """ + + def __init__(self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, + num_features=4032, output_stride=32, drop_rate=0., global_pool='avg', pad_type='same'): + super(NASNetALarge, self).__init__() + self.num_classes = num_classes + self.stem_size = stem_size + self.num_features = num_features + self.channel_multiplier = channel_multiplier + self.drop_rate = drop_rate + assert output_stride == 32 + + channels = self.num_features // 24 + # 24 is default value for the architecture + + self.conv0 = ConvBnAct( + in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) + self.cell_stem_1 = CellStem1( + self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) + + self.cell_0 = FirstCell( + in_chs_left=channels, out_chs_left=channels // 2, + in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_1 = NormalCell( + in_chs_left=2 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_2 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_3 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_4 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_5 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + + self.reduction_cell_0 = ReductionCell0( + in_chs_left=6 * channels, out_chs_left=2 * channels, + in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_6 = FirstCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_7 = NormalCell( + in_chs_left=8 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_8 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_9 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_10 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_11 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + + self.reduction_cell_1 = ReductionCell1( + in_chs_left=12 * channels, out_chs_left=4 * channels, + in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_12 = FirstCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_13 = NormalCell( + in_chs_left=16 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_14 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_15 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_16 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_17 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.act = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv0'), + dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), + dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), + dict(num_chs=4032, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv0 = self.conv0(x) + + x_stem_0 = self.cell_stem_0(x_conv0) + x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) + + x_cell_0 = self.cell_0(x_stem_1, x_stem_0) + x_cell_1 = self.cell_1(x_cell_0, x_stem_1) + x_cell_2 = self.cell_2(x_cell_1, x_cell_0) + x_cell_3 = self.cell_3(x_cell_2, x_cell_1) + x_cell_4 = self.cell_4(x_cell_3, x_cell_2) + x_cell_5 = self.cell_5(x_cell_4, x_cell_3) + + x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) + x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) + x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) + x_cell_8 = self.cell_8(x_cell_7, x_cell_6) + x_cell_9 = self.cell_9(x_cell_8, x_cell_7) + x_cell_10 = self.cell_10(x_cell_9, x_cell_8) + x_cell_11 = self.cell_11(x_cell_10, x_cell_9) + + x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) + x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) + x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) + x_cell_14 = self.cell_14(x_cell_13, x_cell_12) + x_cell_15 = self.cell_15(x_cell_14, x_cell_13) + x_cell_16 = self.cell_16(x_cell_15, x_cell_14) + x_cell_17 = self.cell_17(x_cell_16, x_cell_15) + x = self.act(x_cell_17) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_nasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + NASNetALarge, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def nasnetalarge(pretrained=False, **kwargs): + """NASNet-A large model architecture. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_nasnet('nasnetalarge', pretrained, **model_kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/nest.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/nest.py new file mode 100644 index 0000000000..fe0645ccb5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/nest.py @@ -0,0 +1,462 @@ +""" Nested Transformer (NesT) in PyTorch + +A PyTorch implement of Aggregating Nested Transformers as described in: + +'Aggregating Nested Transformers' + - https://arxiv.org/abs/2105.12723 + +The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights +have been converted with convert/convert_nest_flax.py + +Acknowledgments: +* The paper authors for sharing their research, code, and model weights +* Ross Wightman's existing code off which I based this + +Copyright 2021 Alexander Soare +""" + +import collections.abc +import logging +import math +from functools import partial + +import torch +import torch.nn.functional as F +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply +from .layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_ +from .layers import create_conv2d, create_pool2d, to_ntuple +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], + 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # (weights from official Google JAX impl) + 'nest_base': _cfg(), + 'nest_small': _cfg(), + 'nest_tiny': _cfg(), + 'jx_nest_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_base-8bc41011.pth'), + 'jx_nest_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_small-422eaded.pth'), + 'jx_nest_tiny': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_tiny-e3428fb9.pth'), +} + + +class Attention(nn.Module): + """ + This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with + an extra "image block" dim + """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + """ + x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) + """ + B, T, N, C = x.shape + # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) + qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale # (B, H, T, N, N) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, T, N, C'), permute -> (B, T, N, C', H) + x = (attn @ v).permute(0, 2, 3, 4, 1).reshape(B, T, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x # (B, T, N, C) + + +class TransformerLayer(nn.Module): + """ + This is much like `.vision_transformer.Block` but: + - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") + - Uses modified Attention layer that handles the "block" dimension + """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + y = self.norm1(x) + x = x + self.drop_path(self.attn(y)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConvPool(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): + super().__init__() + self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) + self.norm = norm_layer(out_channels) + self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) + + def forward(self, x): + """ + x is expected to have shape (B, C, H, W) + """ + assert x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims' + assert x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims' + x = self.conv(x) + # Layer norm done over channel dim only + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + x = self.pool(x) + return x # (B, C, H//2, W//2) + + +def blockify(x, block_size: int): + """image to blocks + Args: + x (Tensor): with shape (B, H, W, C) + block_size (int): edge length of a single square block in units of H, W + """ + B, H, W, C = x.shape + assert H % block_size == 0, '`block_size` must divide input height evenly' + assert W % block_size == 0, '`block_size` must divide input width evenly' + grid_height = H // block_size + grid_width = W // block_size + x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) + x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) + return x # (B, T, N, C) + + +def deblockify(x, block_size: int): + """blocks to image + Args: + x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block + block_size (int): edge length of a single square block in units of desired H, W + """ + B, T, _, C = x.shape + grid_size = int(math.sqrt(T)) + height = width = grid_size * block_size + x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) + x = x.transpose(2, 3).reshape(B, height, width, C) + return x # (B, H, W, C) + + +class NestLevel(nn.Module): + """ Single hierarchical level of a Nested Transformer + """ + def __init__( + self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, + mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rates=[], + norm_layer=None, act_layer=None, pad_type=''): + super().__init__() + self.block_size = block_size + self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) + + if prev_embed_dim is not None: + self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) + else: + self.pool = nn.Identity() + + # Transformer encoder + if len(drop_path_rates): + assert len(drop_path_rates) == depth, 'Must provide as many drop path rates as there are transformer layers' + self.transformer_encoder = nn.Sequential(*[ + TransformerLayer( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rates[i], + norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + + def forward(self, x): + """ + expects x as (B, C, H, W) + """ + x = self.pool(x) + x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer + x = blockify(x, self.block_size) # (B, T, N, C') + x = x + self.pos_embed + x = self.transformer_encoder(x) # (B, T, N, C') + x = deblockify(x, self.block_size) # (B, H', W', C') + # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage + return x.permute(0, 3, 1, 2) # (B, C, H', W') + + +class Nest(nn.Module): + """ Nested Transformer (NesT) + + A PyTorch impl of : `Aggregating Nested Transformers` + - https://arxiv.org/abs/2105.12723 + """ + + def __init__(self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), + num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, + pad_type='', weight_init='', global_pool='avg'): + """ + Args: + img_size (int, tuple): input image size + in_chans (int): number of input channels + patch_size (int): patch size + num_levels (int): number of block hierarchies (T_d in the paper) + embed_dims (int, tuple): embedding dimensions of each level + num_heads (int, tuple): number of attention heads for each level + depths (int, tuple): number of transformer layers for each level + num_classes (int): number of classes for classification head + mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer for transformer layers + act_layer: (nn.Module): activation layer in MLP of transformer layers + pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME + weight_init: (str): weight init scheme + global_pool: (str): type of pooling operation to apply to final feature map + + Notes: + - Default values follow NesT-B from the original Jax code. + - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. + - For those following the paper, Table A1 may have errors! + - https://github.com/google-research/nested-transformer/issues/2 + """ + super().__init__() + + for param_name in ['embed_dims', 'num_heads', 'depths']: + param_value = locals()[param_name] + if isinstance(param_value, collections.abc.Sequence): + assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' + + embed_dims = to_ntuple(num_levels)(embed_dims) + num_heads = to_ntuple(num_levels)(num_heads) + depths = to_ntuple(num_levels)(depths) + self.num_classes = num_classes + self.num_features = embed_dims[-1] + self.feature_info = [] + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + self.drop_rate = drop_rate + self.num_levels = num_levels + if isinstance(img_size, collections.abc.Sequence): + assert img_size[0] == img_size[1], 'Model only handles square inputs' + img_size = img_size[0] + assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' + self.patch_size = patch_size + + # Number of blocks at each level + self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() + assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ + 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' + + # Block edge size in units of patches + # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the + # number of blocks along edge of image + self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) + + # Patch embedding + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False) + self.num_patches = self.patch_embed.num_patches + self.seq_length = self.num_patches // self.num_blocks[0] + + # Build up each hierarchical level + levels = [] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + prev_dim = None + curr_stride = 4 + for i in range(len(self.num_blocks)): + dim = embed_dims[i] + levels.append(NestLevel( + self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, + mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, dp_rates[i], norm_layer, act_layer, pad_type=pad_type)) + self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] + prev_dim = dim + curr_stride *= 2 + self.levels = nn.Sequential(*levels) + + # Final normalization layer + self.norm = norm_layer(embed_dims[-1]) + + # Classifier + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + for level in self.levels: + trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) + named_apply(partial(_init_nest_weights, head_bias=head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + return {f'level.{i}.pos_embed' for i in range(len(self.levels))} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + """ x shape (B, C, H, W) + """ + x = self.patch_embed(x) + x = self.levels(x) + # Layer norm done over channel dim only (to NHWC and back) + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + + def forward(self, x): + """ x shape (B, C, H, W) + """ + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.head(x) + + +def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): + """ NesT weight initialization + Can replicate Jax implementation. Otherwise follows vision_transformer.py + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + nn.init.constant_(module.bias, head_bias) + else: + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): + nn.init.zeros_(module.bias) + nn.init.ones_(module.weight) + + +def resize_pos_embed(posemb, posemb_new): + """ + Rescale the grid of position embeddings when loading from state_dict + Expected shape of position embeddings is (1, T, N, C), and considers only square images + """ + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + seq_length_old = posemb.shape[2] + num_blocks_new, seq_length_new = posemb_new.shape[1:3] + size_new = int(math.sqrt(num_blocks_new*seq_length_new)) + # First change to (1, C, H, W) + posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) + posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) + # Now change to new (1, T, N, C) + posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ resize positional embeddings of pretrained weights """ + pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] + for k in pos_embed_keys: + if state_dict[k].shape != getattr(model, k).shape: + state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) + return state_dict + + +def _create_nest(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + model = build_model_with_cfg( + Nest, variant, pretrained, + default_cfg=default_cfg, + feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + +@register_model +def nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224 + """ + model_kwargs = dict( + embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('jx_nest_tiny', pretrained=pretrained, **model_kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/nfnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/nfnet.py new file mode 100644 index 0000000000..4e0f2b2111 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/nfnet.py @@ -0,0 +1,966 @@ +""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models + +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + +Paper: `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Status: +* These models are a work in progress, experiments ongoing. +* Pretrained weights for two models so far, more to come. +* Model details updated to closer match official JAX code now that it's released +* NF-ResNet, NF-RegNet-B, and NFNet-F models supported + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field +from collections import OrderedDict +from typing import Tuple, Optional +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame,\ + get_act_layer, get_act_fn, get_attn, make_divisible + + +def _dcfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + dm_nfnet_f0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', + pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9), + dm_nfnet_f1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91), + dm_nfnet_f2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92), + dm_nfnet_f3=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94), + dm_nfnet_f4=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', + pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951), + dm_nfnet_f5=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', + pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954), + dm_nfnet_f6=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', + pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956), + + nfnet_f0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_f0s=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1s=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2s=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3s=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4s=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5s=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6s=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7s=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', + hf_hub='timm/eca_nfnet_l0', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), crop_pct=1.0), + eca_nfnet_l2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), crop_pct=1.0), + eca_nfnet_l3=_dcfg( + url='', + pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), crop_pct=1.0), + + nf_regnet_b0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), + nf_regnet_b1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec + nf_regnet_b2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), + nf_regnet_b3=_dcfg( + url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), + nf_regnet_b4=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), + nf_regnet_b5=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), + + nf_resnet26=_dcfg(url='', first_conv='stem.conv'), + nf_resnet50=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), + nf_resnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_seresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_ecaresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet101=_dcfg(url='', first_conv='stem.conv'), +) + + +@dataclass +class NfCfg: + depths: Tuple[int, int, int, int] + channels: Tuple[int, int, int, int] + alpha: float = 0.2 + stem_type: str = '3x3' + stem_chs: Optional[int] = None + group_size: Optional[int] = None + attn_layer: Optional[str] = None + attn_kwargs: dict = None + attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used + width_factor: float = 1.0 + bottle_ratio: float = 0.5 + num_features: int = 0 # num out_channels for final conv, no final_conv if 0 + ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal + reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle + extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models + gamma_in_act: bool = False + same_padding: bool = False + std_conv_eps: float = 1e-5 + skipinit: bool = False # disabled by default, non-trivial performance impact + zero_init_fc: bool = False + act_layer: str = 'silu' + + +def _nfres_cfg( + depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None): + attn_kwargs = attn_kwargs or {} + cfg = NfCfg( + depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, + group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): + num_features = 1280 * channels[-1] // 440 + attn_kwargs = dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, + num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs) + return cfg + + +def _nfnet_cfg( + depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2., + act_layer='gelu', attn_layer='se', attn_kwargs=None): + num_features = int(channels[-1] * feat_mult) + attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, + bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, + attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _dm_nfnet_cfg(depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True): + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, + bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, + num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5)) + return cfg + + + +model_cfgs = dict( + # NFNet-F models w/ GELU compatible with DeepMind weights + dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), + dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), + dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), + dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), + dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), + dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), + dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), + + # NFNet-F models w/ GELU (I will likely deprecate/remove these models and just keep dm_ ver for GELU) + nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), + nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), + nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), + nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), + nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), + nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), + nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), + nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), + + # NFNet-F models w/ SiLU (much faster in PyTorch) + nfnet_f0s=_nfnet_cfg(depths=(1, 2, 6, 3), act_layer='silu'), + nfnet_f1s=_nfnet_cfg(depths=(2, 4, 12, 6), act_layer='silu'), + nfnet_f2s=_nfnet_cfg(depths=(3, 6, 18, 9), act_layer='silu'), + nfnet_f3s=_nfnet_cfg(depths=(4, 8, 24, 12), act_layer='silu'), + nfnet_f4s=_nfnet_cfg(depths=(5, 10, 30, 15), act_layer='silu'), + nfnet_f5s=_nfnet_cfg(depths=(6, 12, 36, 18), act_layer='silu'), + nfnet_f6s=_nfnet_cfg(depths=(7, 14, 42, 21), act_layer='silu'), + nfnet_f7s=_nfnet_cfg(depths=(8, 16, 48, 24), act_layer='silu'), + + # Experimental 'light' versions of NFNet-F that are little leaner + nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), + eca_nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l1=_nfnet_cfg( + depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l2=_nfnet_cfg( + depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l3=_nfnet_cfg( + depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + + # EffNet influenced RegNet defs. + # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. + nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), + nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), + nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), + nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), + nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), + nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), + # FIXME add B6-B8 + + # ResNet (preact, D style deep stem/avg down) defs + nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), + nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), + nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), + + nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + + nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), + +) + + +class GammaAct(nn.Module): + def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False): + super().__init__() + self.act_fn = get_act_fn(act_type) + self.gamma = gamma + self.inplace = inplace + + def forward(self, x): + return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) + + +def act_with_gamma(act_type, gamma: float = 1.): + def _create(inplace=False): + return GammaAct(act_type, gamma=gamma, inplace=inplace) + return _create + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, conv_layer=ScaledStdConv2d): + """ AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + + def forward(self, x): + return self.conv(self.pool(x)) + + +class NormFreeBlock(nn.Module): + """Normalization-Free pre-activation block. + """ + + def __init__( + self, in_chs, out_chs=None, stride=1, dilation=1, first_dilation=None, + alpha=1.0, beta=1.0, bottle_ratio=0.25, group_size=None, ch_div=1, reg=True, extra_conv=False, + skipinit=False, attn_layer=None, attn_gain=2.0, act_layer=None, conv_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + out_chs = out_chs or in_chs + # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet + mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) + groups = 1 if not group_size else mid_chs // group_size + if group_size and group_size % ch_div == 0: + mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error + self.alpha = alpha + self.beta = beta + self.attn_gain = attn_gain + + if in_chs != out_chs or stride != 1 or dilation != first_dilation: + self.downsample = DownsampleAvg( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer) + else: + self.downsample = None + + self.act1 = act_layer() + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.act2 = act_layer(inplace=True) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + if extra_conv: + self.act2b = act_layer(inplace=True) + self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) + else: + self.act2b = None + self.conv2b = None + if reg and attn_layer is not None: + self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 + else: + self.attn = None + self.act3 = act_layer() + self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) + if not reg and attn_layer is not None: + self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 + else: + self.attn_last = None + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None + + def forward(self, x): + out = self.act1(x) * self.beta + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(out) + + # residual branch + out = self.conv1(out) + out = self.conv2(self.act2(out)) + if self.conv2b is not None: + out = self.conv2b(self.act2b(out)) + if self.attn is not None: + out = self.attn_gain * self.attn(out) + out = self.conv3(self.act3(out)) + if self.attn_last is not None: + out = self.attn_gain * self.attn_last(out) + out = self.drop_path(out) + + if self.skipinit_gain is not None: + out.mul_(self.skipinit_gain) # this slows things down more than expected, TBD + out = out * self.alpha + shortcut + return out + + +def create_stem(in_chs, out_chs, stem_type='', conv_layer=None, act_layer=None, preact_feature=True): + stem_stride = 2 + stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') + stem = OrderedDict() + assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') + if 'deep' in stem_type: + if 'quad' in stem_type: + # 4 deep conv stack as in NFNet-F models + assert not 'pool' in stem_type + stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) + strides = (2, 1, 1, 2) + stem_stride = 4 + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') + else: + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets + strides = (2, 1, 1) + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') + last_idx = len(stem_chs) - 1 + for i, (c, s) in enumerate(zip(stem_chs, strides)): + stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) + if i != last_idx: + stem[f'act{i + 2}'] = act_layer(inplace=True) + in_chs = c + elif '3x3' in stem_type: + # 3x3 stem conv as in RegNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) + else: + # 7x7 stem conv as in ResNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + + if 'pool' in stem_type: + stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) + stem_stride = 4 + + return nn.Sequential(stem), stem_stride, stem_feature + + +# from https://github.com/deepmind/deepmind-research/tree/master/nfnets +_nonlin_gamma = dict( + identity=1.0, + celu=1.270926833152771, + elu=1.2716004848480225, + gelu=1.7015043497085571, + leaky_relu=1.70590341091156, + log_sigmoid=1.9193484783172607, + log_softmax=1.0002083778381348, + relu=1.7139588594436646, + relu6=1.7131484746932983, + selu=1.0008515119552612, + sigmoid=4.803835391998291, + silu=1.7881293296813965, + softsign=2.338853120803833, + softplus=1.9203323125839233, + tanh=1.5939117670059204, +) + + +class NormFreeNet(nn.Module): + """ Normalization-Free Network + + As described in : + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + and + `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 + + This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and + the (preact) ResNet models described earlier in the paper. + + There are a few differences: + * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), + this changes channel dim and param counts slightly from the paper models + * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance + impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. + * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but + apply it in each activation. This is slightly slower, numerically different, but matches official impl. + * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput + for what it is/does. Approx 8-10% throughput loss. + """ + def __init__(self, cfg: NfCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + drop_rate=0., drop_path_rate=0.): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." + conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d + if cfg.gamma_in_act: + act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) + conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) + else: + act_layer = get_act_layer(cfg.act_layer) + conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) + attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + + stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) + self.stem, stem_stride, stem_feat = create_stem( + in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer) + + self.feature_info = [stem_feat] + drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + prev_chs = stem_chs + net_stride = stem_stride + dilation = 1 + expected_var = 1.0 + stages = [] + for stage_idx, stage_depth in enumerate(cfg.depths): + stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx in range(cfg.depths[stage_idx]): + first_block = block_idx == 0 and stage_idx == 0 + out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) + blocks += [NormFreeBlock( + in_chs=prev_chs, out_chs=out_chs, + alpha=cfg.alpha, + beta=1. / expected_var ** 0.5, + stride=stride if block_idx == 0 else 1, + dilation=dilation, + first_dilation=first_dilation, + group_size=cfg.group_size, + bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, + ch_div=cfg.ch_div, + reg=cfg.reg, + extra_conv=cfg.extra_conv, + skipinit=cfg.skipinit, + attn_layer=attn_layer, + attn_gain=cfg.attn_gain, + act_layer=act_layer, + conv_layer=conv_layer, + drop_path_rate=drop_path_rates[stage_idx][block_idx], + )] + if block_idx == 0: + expected_var = 1. # expected var is reset after first block of each stage + expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance + first_dilation = dilation + prev_chs = out_chs + self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] + stages += [nn.Sequential(*blocks)] + self.stages = nn.Sequential(*stages) + + if cfg.num_features: + # The paper NFRegNet models have an EfficientNet-like final head convolution. + self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) + self.final_conv = conv_layer(prev_chs, self.num_features, 1) + self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.final_act = act_layer(inplace=cfg.num_features > 0) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + for n, m in self.named_modules(): + if 'fc' in n and isinstance(m, nn.Linear): + if cfg.zero_init_fc: + nn.init.zeros_(m.weight) + else: + nn.init.normal_(m.weight, 0., .01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') + if m.bias is not None: + nn.init.zeros_(m.bias) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.final_conv(x) + x = self.final_act(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_normfreenet(variant, pretrained=False, **kwargs): + model_cfg = model_cfgs[variant] + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + NormFreeNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfg, + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def dm_nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7(pretrained=False, **kwargs): + """ NFNet-F7 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0s(pretrained=False, **kwargs): + """ NFNet-F0 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1s(pretrained=False, **kwargs): + """ NFNet-F1 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2s(pretrained=False, **kwargs): + """ NFNet-F2 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3s(pretrained=False, **kwargs): + """ NFNet-F3 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4s(pretrained=False, **kwargs): + """ NFNet-F4 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5s(pretrained=False, **kwargs): + """ NFNet-F5 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6s(pretrained=False, **kwargs): + """ NFNet-F6 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7s(pretrained=False, **kwargs): + """ NFNet-F7 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_l0(pretrained=False, **kwargs): + """ NFNet-L0b w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio + """ + return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l0(pretrained=False, **kwargs): + """ ECA-NFNet-L0 w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l1(pretrained=False, **kwargs): + """ ECA-NFNet-L1 w/ SiLU + My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l2(pretrained=False, **kwargs): + """ ECA-NFNet-L2 w/ SiLU + My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l3(pretrained=False, **kwargs): + """ ECA-NFNet-L3 w/ SiLU + My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b0(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B0 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b1(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B1 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b2(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B2 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b3(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B3 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b4(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B4 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b5(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B5 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet26(pretrained=False, **kwargs): + """ Normalization-Free ResNet-26 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet50(pretrained=False, **kwargs): + """ Normalization-Free ResNet-50 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet101(pretrained=False, **kwargs): + """ Normalization-Free ResNet-101 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet26(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet26 + """ + return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet50(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet50 + """ + return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet101(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet101 + """ + return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet26(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet26 + """ + return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet50(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet50 + """ + return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet101(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet101 + """ + return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/pit.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/pit.py new file mode 100644 index 0000000000..460824e2d6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/pit.py @@ -0,0 +1,384 @@ +""" Pooling-based Vision Transformer (PiT) in PyTorch + +A PyTorch implement of Pooling-based Vision Transformers as described in +'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 + +This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below. + +Modifications for timm by / Copyright 2020 Ross Wightman +""" +# PiT +# Copyright 2021-present NAVER Corp. +# Apache License v2.0 + +import math +import re +from copy import deepcopy +from functools import partial +from typing import Tuple + +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import trunc_normal_, to_2tuple +from .registry import register_model +from .vision_transformer import Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # deit models (FB weights) + 'pit_ti_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'), + 'pit_xs_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'), + 'pit_s_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'), + 'pit_b_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'), + 'pit_ti_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth', + classifier=('head', 'head_dist')), + 'pit_xs_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth', + classifier=('head', 'head_dist')), + 'pit_s_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth', + classifier=('head', 'head_dist')), + 'pit_b_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth', + classifier=('head', 'head_dist')), +} + + +class SequentialTuple(nn.Sequential): + """ This module exists to work around torchscript typing issues list -> list""" + def __init__(self, *args): + super(SequentialTuple, self).__init__(*args) + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + for module in self: + x = module(x) + return x + + +class Transformer(nn.Module): + def __init__( + self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None): + super(Transformer, self).__init__() + self.layers = nn.ModuleList([]) + embed_dim = base_dim * heads + + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, + num_heads=heads, + mlp_ratio=mlp_ratio, + qkv_bias=True, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_prob[i], + norm_layer=partial(nn.LayerNorm, eps=1e-6) + ) + for i in range(depth)]) + + self.pool = pool + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + x, cls_tokens = x + B, C, H, W = x.shape + token_length = cls_tokens.shape[1] + + x = x.flatten(2).transpose(1, 2) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.blocks(x) + + cls_tokens = x[:, :token_length] + x = x[:, token_length:] + x = x.transpose(1, 2).reshape(B, C, H, W) + + if self.pool is not None: + x, cls_tokens = self.pool(x, cls_tokens) + return x, cls_tokens + + +class ConvHeadPooling(nn.Module): + def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): + super(ConvHeadPooling, self).__init__() + + self.conv = nn.Conv2d( + in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, + padding_mode=padding_mode, groups=in_feature) + self.fc = nn.Linear(in_feature, out_feature) + + def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: + + x = self.conv(x) + cls_token = self.fc(cls_token) + + return x, cls_token + + +class ConvEmbedding(nn.Module): + def __init__(self, in_channels, out_channels, patch_size, stride, padding): + super(ConvEmbedding, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True) + + def forward(self, x): + x = self.conv(x) + return x + + +class PoolingVisionTransformer(nn.Module): + """ Pooling-based Vision Transformer + + A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers' + - https://arxiv.org/abs/2103.16302 + """ + def __init__(self, img_size, patch_size, stride, base_dims, depth, heads, + mlp_ratio, num_classes=1000, in_chans=3, distilled=False, + attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0): + super(PoolingVisionTransformer, self).__init__() + + padding = 0 + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1) + width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1) + + self.base_dims = base_dims + self.heads = heads + self.num_classes = num_classes + self.num_tokens = 2 if distilled else 1 + + self.patch_size = patch_size + self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width)) + self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding) + + self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0])) + self.pos_drop = nn.Dropout(p=drop_rate) + + transformers = [] + # stochastic depth decay rule + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] + for stage in range(len(depth)): + pool = None + if stage < len(heads) - 1: + pool = ConvHeadPooling( + base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2) + transformers += [Transformer( + base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage]) + ] + self.transformers = SequentialTuple(*transformers) + self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6) + self.num_features = self.embed_dim = base_dims[-1] * heads[-1] + + # Classifier head + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + if self.head_dist is not None: + return self.head, self.head_dist + else: + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.head_dist is not None: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x + self.pos_embed) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + x, cls_tokens = self.transformers((x, cls_tokens)) + cls_tokens = self.norm(cls_tokens) + if self.head_dist is not None: + return cls_tokens[:, 0], cls_tokens[:, 1] + else: + return cls_tokens[:, 0] + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple + if self.training and not torch.jit.is_scripting(): + return x, x_dist + else: + return (x + x_dist) / 2 + else: + return self.head(x) + + +def checkpoint_filter_fn(state_dict, model): + """ preprocess checkpoints """ + out_dict = {} + p_blocks = re.compile(r'pools\.(\d)\.') + for k, v in state_dict.items(): + # FIXME need to update resize for PiT impl + # if k == 'pos_embed' and v.shape != model.pos_embed.shape: + # # To resize pos embedding when using model at different size from pretrained weights + # v = resize_pos_embed(v, model.pos_embed) + k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k) + out_dict[k] = v + return out_dict + + +def _create_pit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + PoolingVisionTransformer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def pit_b_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_b_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_s_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_xs_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_ti_224', pretrained, **model_kwargs) + + +@register_model +def pit_b_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs) \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/pnasnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/pnasnet.py new file mode 100644 index 0000000000..999181563a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/pnasnet.py @@ -0,0 +1,350 @@ +""" + pnasnet5large implementation grabbed from Cadene's pretrained models + Additional credit to https://github.com/creafz + + https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py + +""" +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['PNASNet5Large'] + +default_cfgs = { + 'pnasnet5large': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv_0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=padding) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=padding) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) + self.act_2 = nn.ReLU() + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=padding) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class FactorizedReduction(nn.Module): + + def __init__(self, in_channels, out_channels, padding=''): + super(FactorizedReduction, self).__init__() + self.act = nn.ReLU() + self.path_1 = nn.Sequential(OrderedDict([ + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.path_2 = nn.Sequential(OrderedDict([ + ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x_path1 = self.path_1(x) + x_path2 = self.path_2(x) + out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + return out + + +class CellBase(nn.Module): + + def cell_forward(self, x_left, x_right): + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) + x_comb_iter_3_right = self.comb_iter_3_right(x_right) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_left) + if self.comb_iter_4_right is not None: + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + else: + x_comb_iter_4_right = x_right + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem0(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(CellStem0, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_0_right = nn.Sequential(OrderedDict([ + ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), + ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), + ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)), + ])) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) + self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) + + def forward(self, x_left): + x_right = self.conv_1x1(x_left) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class Cell(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='', + is_reduction=False, match_prev_layer_dims=False): + super(Cell, self).__init__() + + # If `is_reduction` is set to `True` stride 2 is used for + # convolution and pooling layers to reduce the spatial size of + # the output of a cell approximately by a factor of 2. + stride = 2 if is_reduction else 1 + + # If `match_prev_layer_dimensions` is set to `True` + # `FactorizedReduction` is used to reduce the spatial size + # of the left input of a cell approximately by a factor of 2. + self.match_prev_layer_dimensions = match_prev_layer_dims + if match_prev_layer_dims: + self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) + else: + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) + self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) + if is_reduction: + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) + else: + self.comb_iter_4_right = None + + def forward(self, x_left, x_right): + x_left = self.conv_prev_1x1(x_left) + x_right = self.conv_1x1(x_right) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class PNASNet5Large(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', pad_type=''): + super(PNASNet5Large, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.num_features = 4320 + assert output_stride == 32 + + self.conv_0 = ConvBnAct( + in_chans, 96, kernel_size=3, stride=2, padding=0, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) + + self.cell_stem_1 = Cell( + in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, + match_prev_layer_dims=True, is_reduction=True) + self.cell_0 = Cell( + in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_1 = Cell( + in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_2 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_3 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + + self.cell_4 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, + is_reduction=True) + self.cell_5 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_6 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + self.cell_7 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + + self.cell_8 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, + is_reduction=True) + self.cell_9 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_10 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.cell_11 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.act = nn.ReLU() + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv_0'), + dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), + dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), + dict(num_chs=4320, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv_0 = self.conv_0(x) + x_stem_0 = self.cell_stem_0(x_conv_0) + x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) + x_cell_0 = self.cell_0(x_stem_0, x_stem_1) + x_cell_1 = self.cell_1(x_stem_1, x_cell_0) + x_cell_2 = self.cell_2(x_cell_0, x_cell_1) + x_cell_3 = self.cell_3(x_cell_1, x_cell_2) + x_cell_4 = self.cell_4(x_cell_2, x_cell_3) + x_cell_5 = self.cell_5(x_cell_3, x_cell_4) + x_cell_6 = self.cell_6(x_cell_4, x_cell_5) + x_cell_7 = self.cell_7(x_cell_5, x_cell_6) + x_cell_8 = self.cell_8(x_cell_6, x_cell_7) + x_cell_9 = self.cell_9(x_cell_7, x_cell_8) + x_cell_10 = self.cell_10(x_cell_8, x_cell_9) + x_cell_11 = self.cell_11(x_cell_9, x_cell_10) + x = self.act(x_cell_11) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_pnasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + PNASNet5Large, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def pnasnet5large(pretrained=False, **kwargs): + r"""PNASNet-5 model architecture from the + `"Progressive Neural Architecture Search" + `_ paper. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/ecaresnet101d_pruned.txt b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/ecaresnet101d_pruned.txt new file mode 100644 index 0000000000..2589b2f9dd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/ecaresnet101d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042] \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/ecaresnet50d_pruned.txt b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/ecaresnet50d_pruned.txt new file mode 100644 index 0000000000..9a8b2bf50e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/ecaresnet50d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022] \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b1_pruned.txt b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b1_pruned.txt new file mode 100644 index 0000000000..0972b52761 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b1_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000] \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b2_pruned.txt b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b2_pruned.txt new file mode 100644 index 0000000000..6e3fadee3e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b2_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000] \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b3_pruned.txt b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b3_pruned.txt new file mode 100644 index 0000000000..489781736d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/pruned/efficientnet_b3_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000] \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/registry.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/registry.py new file mode 100644 index 0000000000..f92219b218 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/registry.py @@ -0,0 +1,149 @@ +""" Model Registry +Hacked together by / Copyright 2020 Ross Wightman +""" + +import sys +import re +import fnmatch +from collections import defaultdict +from copy import deepcopy + +__all__ = ['list_models', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', + 'is_model_default_key', 'has_model_default_key', 'get_model_default_value', 'is_model_pretrained'] + +_module_to_models = defaultdict(set) # dict of sets to check membership of model in module +_model_to_module = {} # mapping of model names to module names +_model_entrypoints = {} # mapping of model names to entrypoint fns +_model_has_pretrained = set() # set of model names that have pretrained weight url present +_model_default_cfgs = dict() # central repo for model default_cfgs + + +def register_model(fn): + # lookup containing module + mod = sys.modules[fn.__module__] + module_name_split = fn.__module__.split('.') + module_name = module_name_split[-1] if len(module_name_split) else '' + + # add model to __all__ in module + model_name = fn.__name__ + if hasattr(mod, '__all__'): + mod.__all__.append(model_name) + else: + mod.__all__ = [model_name] + + # add entries to registry dict/sets + _model_entrypoints[model_name] = fn + _model_to_module[model_name] = module_name + _module_to_models[module_name].add(model_name) + has_pretrained = False # check if model has a pretrained url to allow filtering on this + if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: + # this will catch all models that have entrypoint matching cfg key, but miss any aliasing + # entrypoints or non-matching combos + has_pretrained = 'url' in mod.default_cfgs[model_name] and 'http' in mod.default_cfgs[model_name]['url'] + _model_default_cfgs[model_name] = deepcopy(mod.default_cfgs[model_name]) + if has_pretrained: + _model_has_pretrained.add(model_name) + return fn + + +def _natural_key(string_): + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def list_models(filter='', module='', pretrained=False, exclude_filters='', name_matches_cfg=False): + """ Return list of available model names, sorted alphabetically + + Args: + filter (str) - Wildcard filter string that works with fnmatch + module (str) - Limit model selection to a specific sub-module (ie 'gen_efficientnet') + pretrained (bool) - Include only models with pretrained weights if True + exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter + name_matches_cfg (bool) - Include only models w/ model_name matching default_cfg name (excludes some aliases) + + Example: + model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' + model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module + """ + if module: + all_models = list(_module_to_models[module]) + else: + all_models = _model_entrypoints.keys() + if filter: + models = [] + include_filters = filter if isinstance(filter, (tuple, list)) else [filter] + for f in include_filters: + include_models = fnmatch.filter(all_models, f) # include these models + if len(include_models): + models = set(models).union(include_models) + else: + models = all_models + if exclude_filters: + if not isinstance(exclude_filters, (tuple, list)): + exclude_filters = [exclude_filters] + for xf in exclude_filters: + exclude_models = fnmatch.filter(models, xf) # exclude these models + if len(exclude_models): + models = set(models).difference(exclude_models) + if pretrained: + models = _model_has_pretrained.intersection(models) + if name_matches_cfg: + models = set(_model_default_cfgs).intersection(models) + return list(sorted(models, key=_natural_key)) + + +def is_model(model_name): + """ Check if a model name exists + """ + return model_name in _model_entrypoints + + +def model_entrypoint(model_name): + """Fetch a model entrypoint for specified model name + """ + return _model_entrypoints[model_name] + + +def list_modules(): + """ Return list of module names that contain models / model entrypoints + """ + modules = _module_to_models.keys() + return list(sorted(modules)) + + +def is_model_in_modules(model_name, module_names): + """Check if a model exists within a subset of modules + Args: + model_name (str) - name of model to check + module_names (tuple, list, set) - names of modules to search in + """ + assert isinstance(module_names, (tuple, list, set)) + return any(model_name in _module_to_models[n] for n in module_names) + + +def has_model_default_key(model_name, cfg_key): + """ Query model default_cfgs for existence of a specific key. + """ + if model_name in _model_default_cfgs and cfg_key in _model_default_cfgs[model_name]: + return True + return False + + +def is_model_default_key(model_name, cfg_key): + """ Return truthy value for specified model default_cfg key, False if does not exist. + """ + if model_name in _model_default_cfgs and _model_default_cfgs[model_name].get(cfg_key, False): + return True + return False + + +def get_model_default_value(model_name, cfg_key): + """ Get a specific model default_cfg value by key. None if it doesn't exist. + """ + if model_name in _model_default_cfgs: + return _model_default_cfgs[model_name].get(cfg_key, None) + else: + return None + + +def is_model_pretrained(model_name): + return model_name in _model_has_pretrained diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/regnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/regnet.py new file mode 100644 index 0000000000..6a38107467 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/regnet.py @@ -0,0 +1,494 @@ +"""RegNet + +Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 +Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + +Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here) +and cleaned up with more descriptive variable names. + +Weights from original impl have been modified +* first layer from BGR -> RGB as most PyTorch models are +* removed training specific dict entries from checkpoints and keep model state_dict only +* remap names to match the ones here + +Hacked together by / Copyright 2020 Ross Wightman +""" +import numpy as np +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, AvgPool2dSame, ConvBnAct, SEModule, DropPath +from .registry import register_model + + +def _mcfg(**kwargs): + cfg = dict(se_ratio=0., bottle_ratio=1., stem_width=32) + cfg.update(**kwargs) + return cfg + + +# Model FLOPS = three trailing digits * 10^8 +model_cfgs = dict( + regnetx_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13), + regnetx_004=_mcfg(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22), + regnetx_006=_mcfg(w0=48, wa=36.97, wm=2.24, group_w=24, depth=16), + regnetx_008=_mcfg(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16), + regnetx_016=_mcfg(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18), + regnetx_032=_mcfg(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25), + regnetx_040=_mcfg(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23), + regnetx_064=_mcfg(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17), + regnetx_080=_mcfg(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23), + regnetx_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19), + regnetx_160=_mcfg(w0=216, wa=55.59, wm=2.1, group_w=128, depth=22), + regnetx_320=_mcfg(w0=320, wa=69.86, wm=2.0, group_w=168, depth=23), + regnety_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13, se_ratio=0.25), + regnety_004=_mcfg(w0=48, wa=27.89, wm=2.09, group_w=8, depth=16, se_ratio=0.25), + regnety_006=_mcfg(w0=48, wa=32.54, wm=2.32, group_w=16, depth=15, se_ratio=0.25), + regnety_008=_mcfg(w0=56, wa=38.84, wm=2.4, group_w=16, depth=14, se_ratio=0.25), + regnety_016=_mcfg(w0=48, wa=20.71, wm=2.65, group_w=24, depth=27, se_ratio=0.25), + regnety_032=_mcfg(w0=80, wa=42.63, wm=2.66, group_w=24, depth=21, se_ratio=0.25), + regnety_040=_mcfg(w0=96, wa=31.41, wm=2.24, group_w=64, depth=22, se_ratio=0.25), + regnety_064=_mcfg(w0=112, wa=33.22, wm=2.27, group_w=72, depth=25, se_ratio=0.25), + regnety_080=_mcfg(w0=192, wa=76.82, wm=2.19, group_w=56, depth=17, se_ratio=0.25), + regnety_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, se_ratio=0.25), + regnety_160=_mcfg(w0=200, wa=106.23, wm=2.48, group_w=112, depth=18, se_ratio=0.25), + regnety_320=_mcfg(w0=232, wa=115.89, wm=2.53, group_w=232, depth=20, se_ratio=0.25), +) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + regnetx_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth'), + regnetx_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth'), + regnetx_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth'), + regnetx_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth'), + regnetx_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth'), + regnetx_032=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth'), + regnetx_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth'), + regnetx_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth'), + regnetx_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth'), + regnetx_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth'), + regnetx_160=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth'), + regnetx_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth'), + regnety_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth'), + regnety_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth'), + regnety_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth'), + regnety_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth'), + regnety_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth'), + regnety_032=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth', + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth'), + regnety_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth'), + regnety_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth'), + regnety_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth'), + regnety_160=_cfg( + url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth', # from Facebook DeiT GitHub repository + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth'), +) + + +def quantize_float(f, q): + """Converts a float to closest non-zero int divisible by q.""" + return int(round(f / q) * q) + + +def adjust_widths_groups_comp(widths, bottle_ratios, groups): + """Adjusts the compatibility of widths and groups.""" + bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)] + bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)] + widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)] + return widths, groups + + +def generate_regnet(width_slope, width_initial, width_mult, depth, q=8): + """Generates per block widths from RegNet parameters.""" + assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % q == 0 + widths_cont = np.arange(depth) * width_slope + width_initial + width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult)) + widths = width_initial * np.power(width_mult, width_exps) + widths = np.round(np.divide(widths, q)) * q + num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1 + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages, max_stage, widths_cont + + +class Bottleneck(nn.Module): + """ RegNet Bottleneck + + This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from + after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. + """ + + def __init__(self, in_chs, out_chs, stride=1, dilation=1, bottleneck_ratio=1, group_width=1, se_ratio=0.25, + downsample=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, + drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + bottleneck_chs = int(round(out_chs * bottleneck_ratio)) + groups = bottleneck_chs // group_width + + cargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + self.conv1 = ConvBnAct(in_chs, bottleneck_chs, kernel_size=1, **cargs) + self.conv2 = ConvBnAct( + bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation, + groups=groups, **cargs) + if se_ratio: + se_channels = int(round(in_chs * se_ratio)) + self.se = SEModule(bottleneck_chs, rd_channels=se_channels) + else: + self.se = None + cargs['act_layer'] = None + self.conv3 = ConvBnAct(bottleneck_chs, out_chs, kernel_size=1, **cargs) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + x = self.conv3(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + return x + + +def downsample_conv( + in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + dilation = dilation if kernel_size > 1 else 1 + return ConvBnAct( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, act_layer=None) + + +def downsample_avg( + in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + pool = nn.Identity() + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + return nn.Sequential(*[ + pool, ConvBnAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, act_layer=None)]) + + +class RegStage(nn.Module): + """Stage (sequence of blocks w/ the same output shape).""" + + def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio, group_width, + block_fn=Bottleneck, se_ratio=0., drop_path_rates=None, drop_block=None): + super(RegStage, self).__init__() + block_kwargs = {} # FIXME setup to pass various aa, norm, act layer common args + first_dilation = 1 if dilation in (1, 2) else 2 + for i in range(depth): + block_stride = stride if i == 0 else 1 + block_in_chs = in_chs if i == 0 else out_chs + block_dilation = first_dilation if i == 0 else dilation + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + if (block_in_chs != out_chs) or (block_stride != 1): + proj_block = downsample_conv(block_in_chs, out_chs, 1, block_stride, block_dilation) + else: + proj_block = None + + name = "b{}".format(i + 1) + self.add_module( + name, block_fn( + block_in_chs, out_chs, block_stride, block_dilation, bottle_ratio, group_width, se_ratio, + downsample=proj_block, drop_block=drop_block, drop_path=drop_path, **block_kwargs) + ) + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +class RegNet(nn.Module): + """RegNet model. + + Paper: https://arxiv.org/abs/2003.13678 + Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., + drop_path_rate=0., zero_init_last_bn=True): + super().__init__() + # TODO add drop block, drop path, anti-aliasing, custom bn/act args + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + # Construct the stem + stem_width = cfg['stem_width'] + self.stem = ConvBnAct(in_chans, stem_width, 3, stride=2) + self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')] + + # Construct the stages + prev_width = stem_width + curr_stride = 2 + stage_params = self._get_stage_params(cfg, output_stride=output_stride, drop_path_rate=drop_path_rate) + se_ratio = cfg['se_ratio'] + for i, stage_args in enumerate(stage_params): + stage_name = "s{}".format(i + 1) + self.add_module(stage_name, RegStage(prev_width, **stage_args, se_ratio=se_ratio)) + prev_width = stage_args['out_chs'] + curr_stride *= stage_args['stride'] + self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)] + + # Construct the head + self.num_features = prev_width + self.head = ClassifierHead( + in_chs=prev_width, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, mean=0.0, std=0.01) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def _get_stage_params(self, cfg, default_stride=2, output_stride=32, drop_path_rate=0.): + # Generate RegNet ws per block + w_a, w_0, w_m, d = cfg['wa'], cfg['w0'], cfg['wm'], cfg['depth'] + widths, num_stages, _, _ = generate_regnet(w_a, w_0, w_m, d) + + # Convert to per stage format + stage_widths, stage_depths = np.unique(widths, return_counts=True) + + # Use the same group width, bottleneck mult and stride for each stage + stage_groups = [cfg['group_w'] for _ in range(num_stages)] + stage_bottle_ratios = [cfg['bottle_ratio'] for _ in range(num_stages)] + stage_strides = [] + stage_dilations = [] + net_stride = 2 + dilation = 1 + for _ in range(num_stages): + if net_stride >= output_stride: + dilation *= default_stride + stride = 1 + else: + stride = default_stride + net_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + stage_dpr = np.split(np.linspace(0, drop_path_rate, d), np.cumsum(stage_depths[:-1])) + + # Adjust the compatibility of ws and gws + stage_widths, stage_groups = adjust_widths_groups_comp(stage_widths, stage_bottle_ratios, stage_groups) + param_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_width', 'drop_path_rates'] + stage_params = [ + dict(zip(param_names, params)) for params in + zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_bottle_ratios, stage_groups, + stage_dpr)] + return stage_params + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + for block in list(self.children())[:-1]: + x = block(x) + return x + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if 'model' in state_dict: + # For DeiT trained regnety_160 pretraiend model + state_dict = state_dict['model'] + return state_dict + + +def _create_regnet(variant, pretrained, **kwargs): + return build_model_with_cfg( + RegNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + pretrained_filter_fn=_filter_fn, + **kwargs) + + +@register_model +def regnetx_002(pretrained=False, **kwargs): + """RegNetX-200MF""" + return _create_regnet('regnetx_002', pretrained, **kwargs) + + +@register_model +def regnetx_004(pretrained=False, **kwargs): + """RegNetX-400MF""" + return _create_regnet('regnetx_004', pretrained, **kwargs) + + +@register_model +def regnetx_006(pretrained=False, **kwargs): + """RegNetX-600MF""" + return _create_regnet('regnetx_006', pretrained, **kwargs) + + +@register_model +def regnetx_008(pretrained=False, **kwargs): + """RegNetX-800MF""" + return _create_regnet('regnetx_008', pretrained, **kwargs) + + +@register_model +def regnetx_016(pretrained=False, **kwargs): + """RegNetX-1.6GF""" + return _create_regnet('regnetx_016', pretrained, **kwargs) + + +@register_model +def regnetx_032(pretrained=False, **kwargs): + """RegNetX-3.2GF""" + return _create_regnet('regnetx_032', pretrained, **kwargs) + + +@register_model +def regnetx_040(pretrained=False, **kwargs): + """RegNetX-4.0GF""" + return _create_regnet('regnetx_040', pretrained, **kwargs) + + +@register_model +def regnetx_064(pretrained=False, **kwargs): + """RegNetX-6.4GF""" + return _create_regnet('regnetx_064', pretrained, **kwargs) + + +@register_model +def regnetx_080(pretrained=False, **kwargs): + """RegNetX-8.0GF""" + return _create_regnet('regnetx_080', pretrained, **kwargs) + + +@register_model +def regnetx_120(pretrained=False, **kwargs): + """RegNetX-12GF""" + return _create_regnet('regnetx_120', pretrained, **kwargs) + + +@register_model +def regnetx_160(pretrained=False, **kwargs): + """RegNetX-16GF""" + return _create_regnet('regnetx_160', pretrained, **kwargs) + + +@register_model +def regnetx_320(pretrained=False, **kwargs): + """RegNetX-32GF""" + return _create_regnet('regnetx_320', pretrained, **kwargs) + + +@register_model +def regnety_002(pretrained=False, **kwargs): + """RegNetY-200MF""" + return _create_regnet('regnety_002', pretrained, **kwargs) + + +@register_model +def regnety_004(pretrained=False, **kwargs): + """RegNetY-400MF""" + return _create_regnet('regnety_004', pretrained, **kwargs) + + +@register_model +def regnety_006(pretrained=False, **kwargs): + """RegNetY-600MF""" + return _create_regnet('regnety_006', pretrained, **kwargs) + + +@register_model +def regnety_008(pretrained=False, **kwargs): + """RegNetY-800MF""" + return _create_regnet('regnety_008', pretrained, **kwargs) + + +@register_model +def regnety_016(pretrained=False, **kwargs): + """RegNetY-1.6GF""" + return _create_regnet('regnety_016', pretrained, **kwargs) + + +@register_model +def regnety_032(pretrained=False, **kwargs): + """RegNetY-3.2GF""" + return _create_regnet('regnety_032', pretrained, **kwargs) + + +@register_model +def regnety_040(pretrained=False, **kwargs): + """RegNetY-4.0GF""" + return _create_regnet('regnety_040', pretrained, **kwargs) + + +@register_model +def regnety_064(pretrained=False, **kwargs): + """RegNetY-6.4GF""" + return _create_regnet('regnety_064', pretrained, **kwargs) + + +@register_model +def regnety_080(pretrained=False, **kwargs): + """RegNetY-8.0GF""" + return _create_regnet('regnety_080', pretrained, **kwargs) + + +@register_model +def regnety_120(pretrained=False, **kwargs): + """RegNetY-12GF""" + return _create_regnet('regnety_120', pretrained, **kwargs) + + +@register_model +def regnety_160(pretrained=False, **kwargs): + """RegNetY-16GF""" + return _create_regnet('regnety_160', pretrained, **kwargs) + + +@register_model +def regnety_320(pretrained=False, **kwargs): + """RegNetY-32GF""" + return _create_regnet('regnety_320', pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/res2net.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/res2net.py new file mode 100644 index 0000000000..282baba3b0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/res2net.py @@ -0,0 +1,216 @@ +""" Res2Net and Res2NeXt +Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ +Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .resnet import ResNet + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'res2net50_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth'), + 'res2net50_48w_2s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth'), + 'res2net50_14w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth'), + 'res2net50_26w_6s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth'), + 'res2net50_26w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth'), + 'res2net101_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth'), + 'res2next50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth'), +} + + +class Bottle2neck(nn.Module): + """ Res2Net/Res2NeXT Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py + """ + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_): + super(Bottle2neck, self).__init__() + self.scale = scale + self.is_first = stride > 1 or downsample is not None + self.num_scales = max(1, scale - 1) + width = int(math.floor(planes * (base_width / 64.0))) * cardinality + self.width = width + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) + self.bn1 = norm_layer(width * scale) + + convs = [] + bns = [] + for i in range(self.num_scales): + convs.append(nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False)) + bns.append(norm_layer(width)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + # FIXME this should probably have count_include_pad=False, but hurts original weights + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + else: + self.pool = None + + self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + self.se = attn_layer(outplanes) if attn_layer is not None else None + + self.relu = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + sp = spx[0] # redundant, for torchscript + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + if i == 0 or self.is_first: + sp = spx[i] + else: + sp = sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + if self.pool is not None: + # self.is_first == True, None check for torchscript + spo.append(self.pool(spx[-1])) + else: + spo.append(spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + if self.se is not None: + out = self.se(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.relu(out) + + return out + + +def _create_res2net(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def res2net50_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net50_26w_4s', pretrained, **model_args) + + +@register_model +def res2net101_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-101 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net101_26w_4s', pretrained, **model_args) + + +@register_model +def res2net50_26w_6s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w6s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6), **kwargs) + return _create_res2net('res2net50_26w_6s', pretrained, **model_args) + + +@register_model +def res2net50_26w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_26w_8s', pretrained, **model_args) + + +@register_model +def res2net50_48w_2s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 48w2s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2), **kwargs) + return _create_res2net('res2net50_48w_2s', pretrained, **model_args) + + +@register_model +def res2net50_14w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 14w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_14w_8s', pretrained, **model_args) + + +@register_model +def res2next50(pretrained=False, **kwargs): + """Construct Res2NeXt-50 4s + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2next50', pretrained, **model_args) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/resnest.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/resnest.py new file mode 100644 index 0000000000..31eebd8092 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/resnest.py @@ -0,0 +1,237 @@ +""" ResNeSt Models + +Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang + +Modified for torchscript compat, and consistency with timm by Ross Wightman +""" +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SplitAttn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1.0', 'classifier': 'fc', + **kwargs + } + +default_cfgs = { + 'resnest14d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth'), + 'resnest26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth'), + 'resnest50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth'), + 'resnest101e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'resnest200e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest200-75117900.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), + 'resnest269e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth', + input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), + 'resnest50d_4s2x40d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth', + interpolation='bicubic'), + 'resnest50d_1s4x24d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth', + interpolation='bicubic') +} + + +class ResNestBottleneck(nn.Module): + """ResNet Bottleneck + """ + # pylint: disable=unused-argument + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResNestBottleneck, self).__init__() + assert reduce_first == 1 # not supported + assert attn_layer is None # not supported + assert aa_layer is None # TODO not yet supported + assert drop_path is None # TODO not yet supported + + group_width = int(planes * (base_width / 64.)) * cardinality + first_dilation = first_dilation or dilation + if avd and (stride > 1 or is_first): + avd_stride = stride + stride = 1 + else: + avd_stride = 0 + self.radix = radix + self.drop_block = drop_block + + self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) + self.bn1 = norm_layer(group_width) + self.act1 = act_layer(inplace=True) + self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None + + if self.radix >= 1: + self.conv2 = SplitAttn( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_block=drop_block) + self.bn2 = nn.Identity() + self.act2 = nn.Identity() + else: + self.conv2 = nn.Conv2d( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(group_width) + self.act2 = act_layer(inplace=True) + self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None + + self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) + self.bn3 = norm_layer(planes*4) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + if self.drop_block is not None: + out = self.drop_block(out) + out = self.act1(out) + + if self.avd_first is not None: + out = self.avd_first(out) + + out = self.conv2(out) + out = self.bn2(out) + if self.drop_block is not None: + out = self.drop_block(out) + out = self.act2(out) + + if self.avd_last is not None: + out = self.avd_last(out) + + out = self.conv3(out) + out = self.bn3(out) + if self.drop_block is not None: + out = self.drop_block(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.act3(out) + return out + + +def _create_resnest(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def resnest14d(pretrained=False, **kwargs): + """ ResNeSt-14d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[1, 1, 1, 1], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest14d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest26d(pretrained=False, **kwargs): + """ ResNeSt-26d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[2, 2, 2, 2], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest26d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d(pretrained=False, **kwargs): + """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest50d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest101e(pretrained=False, **kwargs): + """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 23, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest101e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest200e(pretrained=False, **kwargs): + """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 24, 36, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest200e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest269e(pretrained=False, **kwargs): + """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 30, 48, 8], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest269e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_4s2x40d(pretrained=False, **kwargs): + """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, + block_args=dict(radix=4, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_1s4x24d(pretrained=False, **kwargs): + """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, + block_args=dict(radix=1, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **model_kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/resnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/resnet.py new file mode 100644 index 0000000000..c1336458e0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/resnet.py @@ -0,0 +1,1455 @@ +"""PyTorch ResNet + +This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with +additional dropout and dynamic global avg/max pool. + +ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman +Copyright 2020 Ross Wightman +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, create_attn, get_attn, create_classifier +from .registry import register_model + +__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # ResNet and Wide ResNet + 'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'), + 'resnet18d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), + 'resnet34d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth', + interpolation='bicubic'), + 'resnet26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), + 'resnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet50t': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet101': _cfg(url='', interpolation='bicubic'), + 'resnet101d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet152': _cfg(url='', interpolation='bicubic'), + 'resnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet200': _cfg(url='', interpolation='bicubic'), + 'resnet200d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'), + 'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'), + 'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'), + 'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'), + 'wide_resnet50_2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth', + interpolation='bicubic'), + 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'), + + # ResNeXt + 'resnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth', + interpolation='bicubic'), + 'resnext50d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'resnext101_32x4d': _cfg(url=''), + 'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'), + 'resnext101_64x4d': _cfg(url=''), + 'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'), + + # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags + # from https://github.com/facebookresearch/WSL-Images + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'), + 'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'), + 'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'), + 'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'), + + # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ssl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'), + 'ssl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'), + 'ssl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'), + 'ssl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'), + 'ssl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'), + 'ssl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'), + + # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'swsl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'), + 'swsl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'), + 'swsl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'), + 'swsl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'), + 'swsl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'), + 'swsl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'), + + # Squeeze-Excitation ResNets, to eventually replace the models in senet.py + 'seresnet18': _cfg( + url='', + interpolation='bicubic'), + 'seresnet34': _cfg( + url='', + interpolation='bicubic'), + 'seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth', + interpolation='bicubic'), + 'seresnet50t': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnet101': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320) + ), + 'seresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'seresnet269d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + + + # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py + 'seresnext26d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext26t_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth', + interpolation='bicubic'), + 'seresnext101_32x4d': _cfg( + url='', + interpolation='bicubic'), + 'seresnext101_32x8d': _cfg( + url='', + interpolation='bicubic'), + 'senet154': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + + # Efficient Channel Attention ResNets + 'ecaresnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnetlight': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth', + interpolation='bicubic'), + 'ecaresnet50d': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50d_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnet101d': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnet101d_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'ecaresnet269d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), + crop_pct=1.0, test_input_size=(3, 352, 352)), + + # Efficient Channel Attention ResNeXts + 'ecaresnext26t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnext50t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + + # ResNets with anti-aliasing blur pool + 'resnetblur18': _cfg( + interpolation='bicubic'), + 'resnetblur50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth', + interpolation='bicubic'), + + # ResNet-RS models + 'resnetrs50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', + input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs200': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs200_ema-623d2f59.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs270': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs350': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs420': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), + interpolation='bicubic', first_conv='conv1.0'), +} + + +def get_padding(kernel_size, stride, dilation=1): + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(BasicBlock, self).__init__() + + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock does not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d( + inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, + dilation=first_dilation, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + self.aa = aa_layer(channels=first_planes, stride=stride) if use_aa else None + + self.conv2 = nn.Conv2d( + first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) + self.bn2 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act2 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn2.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + if self.aa is not None: + x = self.aa(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act2(x) + + return x + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + + self.conv2 = nn.Conv2d( + first_planes, width, kernel_size=3, stride=1 if use_aa else stride, + padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(width) + self.act2 = act_layer(inplace=True) + self.aa = aa_layer(channels=width, stride=stride) if use_aa else None + + self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act2(x) + if self.aa is not None: + x = self.aa(x) + + x = self.conv3(x) + x = self.bn3(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + + return x + + +def downsample_conv( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 + p = get_padding(kernel_size, stride, first_dilation) + + return nn.Sequential(*[ + nn.Conv2d( + in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), + norm_layer(out_channels) + ]) + + +def downsample_avg( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + if stride == 1 and dilation == 1: + pool = nn.Identity() + else: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + + return nn.Sequential(*[ + pool, + nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), + norm_layer(out_channels) + ]) + + +def drop_blocks(drop_block_rate=0.): + return [ + None, None, + DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None, + DropBlock2d(drop_block_rate, 3, 1.00) if drop_block_rate else None] + + +def make_blocks( + block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32, + down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs): + stages = [] + feature_info = [] + net_num_blocks = sum(block_repeats) + net_block_idx = 0 + net_stride = 4 + dilation = prev_dilation = 1 + for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))): + stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it + stride = 1 if stage_idx == 0 else 2 + if net_stride >= output_stride: + dilation *= stride + stride = 1 + else: + net_stride *= stride + + downsample = None + if stride != 1 or inplanes != planes * block_fn.expansion: + down_kwargs = dict( + in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, + stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer')) + downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) + + block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) + blocks = [] + for block_idx in range(num_blocks): + downsample = downsample if block_idx == 0 else None + stride = stride if block_idx == 0 else 1 + block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule + blocks.append(block_fn( + inplanes, planes, stride, downsample, first_dilation=prev_dilation, + drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs)) + prev_dilation = dilation + inplanes = planes * block_fn.expansion + net_block_idx += 1 + + stages.append((stage_name, nn.Sequential(*blocks))) + feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) + + return stages, feature_info + + +class ResNet(nn.Module): + """ResNet / ResNeXt / SE-ResNeXt / SE-Net + + This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that + * have > 1 stride in the 3x3 conv layer of bottleneck + * have conv-bn-act ordering + + This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s + variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the + 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. + + ResNet variants (the same modifications can be used in SE/ResNeXt models as well): + * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b + * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) + * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample + * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample + * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) + * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample + * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample + + ResNeXt + * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths + * same c,d, e, s variants as ResNet can be enabled + + SE-ResNeXt + * normal - 7x7 stem, stem_width = 64 + * same c, d, e, s variants as ResNet can be enabled + + SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, + reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block + + Parameters + ---------- + block : Block + Class for the residual block. Options are BasicBlockGl, BottleneckGl. + layers : list of int + Numbers of layers in each block + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + cardinality : int, default 1 + Number of convolution groups for 3x3 conv in Bottleneck. + base_width : int, default 64 + Factor determining bottleneck channels. `planes * base_width / 64 * cardinality` + stem_width : int, default 64 + Number of channels in stem convolutions + stem_type : str, default '' + The type of stem: + * '', default - a single 7x7 conv with a width of stem_width + * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 + * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 + block_reduce_first: int, default 1 + Reduction factor for first convolution output width of residual blocks, + 1 for all archs except senets, where 2 + down_kernel_size: int, default 1 + Kernel size of residual block downsampling path, 1x1 for most archs, 3x3 for senets + avg_down : bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + output_stride : int, default 32 + Set the output stride of the network, 32, 16, or 8. Typically used in segmentation. + act_layer : nn.Module, activation layer + norm_layer : nn.Module, normalization layer + aa_layer : nn.Module, anti-aliasing layer + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, block, layers, num_classes=1000, in_chans=3, + cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False, + output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0., + drop_block_rate=0., global_pool='avg', zero_init_last_bn=True, block_args=None): + block_args = block_args or dict() + assert output_stride in (8, 16, 32) + self.num_classes = num_classes + self.drop_rate = drop_rate + super(ResNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type + inplanes = stem_width * 2 if deep_stem else 64 + if deep_stem: + stem_chs = (stem_width, stem_width) + if 'tiered' in stem_type: + stem_chs = (3 * (stem_width // 4), stem_width) + self.conv1 = nn.Sequential(*[ + nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), + norm_layer(stem_chs[0]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), + norm_layer(stem_chs[1]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) + else: + self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = norm_layer(inplanes) + self.act1 = act_layer(inplace=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] + + # Stem Pooling + if replace_stem_pool: + self.maxpool = nn.Sequential(*filter(None, [ + nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), + aa_layer(channels=inplanes, stride=2) if aa_layer else None, + norm_layer(inplanes), + act_layer(inplace=True) + ])) + else: + if aa_layer is not None: + self.maxpool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=inplanes, stride=2)]) + else: + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # Feature Blocks + channels = [64, 128, 256, 512] + stage_modules, stage_feature_info = make_blocks( + block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, + output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, + down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, + drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args) + for stage in stage_modules: + self.add_module(*stage) # layer1, layer2, etc + self.feature_info.extend(stage_feature_info) + + # Head (Pooling and Classifier) + self.num_features = 512 * block.expansion + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(zero_init_last_bn=zero_init_last_bn) + + def init_weights(self, zero_init_last_bn=True): + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + x = self.fc(x) + return x + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet18', pretrained, **model_args) + + +@register_model +def resnet18d(pretrained=False, **kwargs): + """Constructs a ResNet-18-D model. + """ + model_args = dict( + block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet18d', pretrained, **model_args) + + +@register_model +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet34', pretrained, **model_args) + + +@register_model +def resnet34d(pretrained=False, **kwargs): + """Constructs a ResNet-34-D model. + """ + model_args = dict( + block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet34d', pretrained, **model_args) + + +@register_model +def resnet26(pretrained=False, **kwargs): + """Constructs a ResNet-26 model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet26', pretrained, **model_args) + + +@register_model +def resnet26t(pretrained=False, **kwargs): + """Constructs a ResNet-26-T model. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet26t', pretrained, **model_args) + + +@register_model +def resnet26d(pretrained=False, **kwargs): + """Constructs a ResNet-26-D model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet26d', pretrained, **model_args) + + +@register_model +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet50', pretrained, **model_args) + + +@register_model +def resnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet50d', pretrained, **model_args) + + +@register_model +def resnet50t(pretrained=False, **kwargs): + """Constructs a ResNet-50-T model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet50t', pretrained, **model_args) + + +@register_model +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('resnet101', pretrained, **model_args) + + +@register_model +def resnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet101d', pretrained, **model_args) + + +@register_model +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('resnet152', pretrained, **model_args) + + +@register_model +def resnet152d(pretrained=False, **kwargs): + """Constructs a ResNet-152-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet152d', pretrained, **model_args) + + +@register_model +def resnet200(pretrained=False, **kwargs): + """Constructs a ResNet-200 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs) + return _create_resnet('resnet200', pretrained, **model_args) + + +@register_model +def resnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet200d', pretrained, **model_args) + + +@register_model +def tv_resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model with original Torchvision weights. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet34', pretrained, **model_args) + + +@register_model +def tv_resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet50', pretrained, **model_args) + + +@register_model +def tv_resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('tv_resnet101', pretrained, **model_args) + + +@register_model +def tv_resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('tv_resnet152', pretrained, **model_args) + + +@register_model +def wide_resnet50_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-50-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet50_2', pretrained, **model_args) + + +@register_model +def wide_resnet101_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-101-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet101_2', pretrained, **model_args) + + +@register_model +def resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext50_32x4d', pretrained, **model_args) + + +@register_model +def resnext50d_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnext50d_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext101_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x8d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x8d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('resnext101_32x8d', pretrained, **model_args) + + +@register_model +def resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt101-64x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('resnext101_64x4d', pretrained, **model_args) + + +@register_model +def tv_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x32d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs) + return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x48d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs) + return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args) + + +@register_model +def ssl_resnet18(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('ssl_resnet18', pretrained, **model_args) + + +@register_model +def ssl_resnet50(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('ssl_resnet50', pretrained, **model_args) + + +@register_model +def ssl_resnext50_32x4d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x4d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def swsl_resnet18(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('swsl_resnet18', pretrained, **model_args) + + +@register_model +def swsl_resnet50(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('swsl_resnet50', pretrained, **model_args) + + +@register_model +def swsl_resnext50_32x4d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x4d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ecaresnet26t(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet26t', pretrained, **model_args) + + +@register_model +def ecaresnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d', pretrained, **model_args) + + +@register_model +def resnetrs50(pretrained=False, **kwargs): + """Constructs a ResNet-RS-50 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs50', pretrained, **model_args) + + +@register_model +def resnetrs101(pretrained=False, **kwargs): + """Constructs a ResNet-RS-101 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs101', pretrained, **model_args) + + +@register_model +def resnetrs152(pretrained=False, **kwargs): + """Constructs a ResNet-RS-152 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs152', pretrained, **model_args) + + +@register_model +def resnetrs200(pretrained=False, **kwargs): + """Constructs a ResNet-RS-200 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs200', pretrained, **model_args) + + +@register_model +def resnetrs270(pretrained=False, **kwargs): + """Constructs a ResNet-RS-270 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs270', pretrained, **model_args) + + + +@register_model +def resnetrs350(pretrained=False, **kwargs): + """Constructs a ResNet-RS-350 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs350', pretrained, **model_args) + + +@register_model +def resnetrs420(pretrained=False, **kwargs): + """Constructs a ResNet-RS-420 model + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs420', pretrained, **model_args) + + +@register_model +def ecaresnet50d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet50t(pretrained=False, **kwargs): + """Constructs an ECA-ResNet-50-T model. + Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50t', pretrained, **model_args) + + +@register_model +def ecaresnetlight(pretrained=False, **kwargs): + """Constructs a ResNet-50-D light model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnetlight', pretrained, **model_args) + + +@register_model +def ecaresnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d', pretrained, **model_args) + + +@register_model +def ecaresnet101d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet200d', pretrained, **model_args) + + +@register_model +def ecaresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet269d', pretrained, **model_args) + + +@register_model +def ecaresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def ecaresnext50t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-50-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args) + + +@register_model +def resnetblur18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model with blur anti-aliasing + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur18', pretrained, **model_args) + + +@register_model +def resnetblur50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with blur anti-aliasing + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur50', pretrained, **model_args) + + +@register_model +def seresnet18(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet18', pretrained, **model_args) + + +@register_model +def seresnet34(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet34', pretrained, **model_args) + + +@register_model +def seresnet50(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50', pretrained, **model_args) + + +@register_model +def seresnet50t(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50t', pretrained, **model_args) + + +@register_model +def seresnet101(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet101', pretrained, **model_args) + + +@register_model +def seresnet152(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152', pretrained, **model_args) + + +@register_model +def seresnet152d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152d', pretrained, **model_args) + + +@register_model +def seresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet200d', pretrained, **model_args) + + +@register_model +def seresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet269d', pretrained, **model_args) + + +@register_model +def seresnext26d_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-D model.` + This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for + combination of deep stem and avg_pool in downsample. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26d_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNet-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26tn_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-T model. + NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note + so keeping this def for backwards compat with any uses out there. Old 't' model is lost. + """ + return seresnext26t_32x4d(pretrained=pretrained, **kwargs) + + +@register_model +def seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x8d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x8d', pretrained, **model_args) + + +@register_model +def senet154(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('senet154', pretrained, **model_args) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/resnetv2.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/resnetv2.py new file mode 100644 index 0000000000..b1344cb2df --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/resnetv2.py @@ -0,0 +1,656 @@ +"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization. + +A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfoer (BiT) source code +at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have +been included here as pretrained models from their original .NPZ checkpoints. + +Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and +extra padding support to allow porting of official Hybrid ResNet pretrained weights from +https://github.com/google-research/vision_transformer + +Thanks to the Google team for the above two repositories and associated papers: +* Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370 +* An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929 +* Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + +Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020. +""" +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict # pylint: disable=g-importing-member + +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv +from .registry import register_model +from .layers import GroupNormAct, BatchNormAct2d, EvoNormBatch2d, EvoNormSample2d,\ + ClassifierHead, DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # pretrained on imagenet21k, finetuned on imagenet1k + 'resnetv2_50x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_50x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x2_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x4_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz', + input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0), # only one at 480x480? + + # trained on imagenet-21k + 'resnetv2_50x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1.npz', + num_classes=21843), + 'resnetv2_50x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3.npz', + num_classes=21843), + 'resnetv2_101x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1.npz', + num_classes=21843), + 'resnetv2_101x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3.npz', + num_classes=21843), + 'resnetv2_152x2_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2.npz', + num_classes=21843), + 'resnetv2_152x4_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4.npz', + num_classes=21843), + + 'resnetv2_50x1_bit_distilled': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R50x1_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher_384': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_384.npz', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic'), + + 'resnetv2_50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetv2_50_a1_h-000cdf49.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnetv2_50d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50t': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_101': _cfg( + interpolation='bicubic'), + 'resnetv2_101d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_152': _cfg( + interpolation='bicubic'), + 'resnetv2_152d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), +} + + +def make_div(v, divisor=8): + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + + Follows the implementation of "Identity Mappings in Deep Residual Networks": + https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua + + Except it puts the stride on 3x3 conv when available. + """ + + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.norm1 = norm_layer(in_chs) + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm2 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm3 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.weight) + + def forward(self, x): + x_preact = self.norm1(x) + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x_preact) + + # residual branch + x = self.conv1(x_preact) + x = self.conv2(self.norm2(x)) + x = self.conv3(self.norm3(x)) + x = self.drop_path(x) + return x + shortcut + + +class Bottleneck(nn.Module): + """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT. + """ + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + act_layer = act_layer or nn.ReLU + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, preact=False, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm1 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm2 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.norm3 = norm_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.act3 = act_layer(inplace=True) + + def zero_init_last(self): + nn.init.zeros_(self.norm3.weight) + + def forward(self, x): + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + # residual + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.conv3(x) + x = self.norm3(x) + x = self.drop_path(x) + x = self.act3(x + shortcut) + return x + + +class DownsampleConv(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, + conv_layer=None, norm_layer=None): + super(DownsampleConv, self).__init__() + self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(x)) + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, + preact=True, conv_layer=None, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(self.pool(x))) + + +class ResNetStage(nn.Module): + """ResNet Stage.""" + def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio=0.25, groups=1, + avg_down=False, block_dpr=None, block_fn=PreActBottleneck, + act_layer=None, conv_layer=None, norm_layer=None, **block_kwargs): + super(ResNetStage, self).__init__() + first_dilation = 1 if dilation in (1, 2) else 2 + layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) + proj_layer = DownsampleAvg if avg_down else DownsampleConv + prev_chs = in_chs + self.blocks = nn.Sequential() + for block_idx in range(depth): + drop_path_rate = block_dpr[block_idx] if block_dpr else 0. + stride = stride if block_idx == 0 else 1 + self.blocks.add_module(str(block_idx), block_fn( + prev_chs, out_chs, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, + first_dilation=first_dilation, proj_layer=proj_layer, drop_path_rate=drop_path_rate, + **layer_kwargs, **block_kwargs)) + prev_chs = out_chs + first_dilation = dilation + proj_layer = None + + def forward(self, x): + x = self.blocks(x) + return x + + +def is_stem_deep(stem_type): + return any([s in stem_type for s in ('deep', 'tiered')]) + + +def create_resnetv2_stem( + in_chs, out_chs=64, stem_type='', preact=True, + conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32)): + stem = OrderedDict() + assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') + + # NOTE conv padding mode can be changed by overriding the conv_layer def + if is_stem_deep(stem_type): + # A 3 deep 3x3 conv stack as in ResNet V1D models + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets + stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) + stem['norm1'] = norm_layer(stem_chs[0]) + stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) + stem['norm2'] = norm_layer(stem_chs[1]) + stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) + if not preact: + stem['norm3'] = norm_layer(out_chs) + else: + # The usual 7x7 stem conv + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + if not preact: + stem['norm'] = norm_layer(out_chs) + + if 'fixed' in stem_type: + # 'fixed' SAME padding approximation that is used in BiT models + stem['pad'] = nn.ConstantPad2d(1, 0.) + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + elif 'same' in stem_type: + # full, input size based 'SAME' padding, used in ViT Hybrid model + stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') + else: + # the usual PyTorch symmetric padding + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + return nn.Sequential(stem) + + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode. + """ + + def __init__( + self, layers, channels=(256, 512, 1024, 2048), + num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, + act_layer=nn.ReLU, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32), + drop_rate=0., drop_path_rate=0., zero_init_last=False): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + wf = width_factor + + self.feature_info = [] + stem_chs = make_div(stem_chs * wf) + self.stem = create_resnetv2_stem( + in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer) + stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) + + prev_chs = stem_chs + curr_stride = 4 + dilation = 1 + block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] + block_fn = PreActBottleneck if preact else Bottleneck + self.stages = nn.Sequential() + for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)): + out_chs = make_div(c * wf) + stride = 1 if stage_idx == 0 else 2 + if curr_stride >= output_stride: + dilation *= stride + stride = 1 + stage = ResNetStage( + prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, + act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn) + prev_chs = out_chs + curr_stride *= stride + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] + self.stages.add_module(str(stage_idx), stage) + + self.num_features = prev_chs + self.norm = norm_layer(self.num_features) if preact else nn.Identity() + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + self.init_weights(zero_init_last=zero_init_last) + + def init_weights(self, zero_init_last=True): + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix='resnet/'): + _load_weights(self, checkpoint_path, prefix) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str = '', zero_init_last=True): + if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +@torch.no_grad() +def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'): + import numpy as np + + def t2p(conv_weights): + """Possibly convert HWIO to OIHW.""" + if conv_weights.ndim == 4: + conv_weights = conv_weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(conv_weights) + + weights = np.load(checkpoint_path) + stem_conv_w = adapt_input_conv( + model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) + model.stem.conv.weight.copy_(stem_conv_w) + model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) + model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) + if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \ + model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: + model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) + model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) + for i, (sname, stage) in enumerate(model.stages.named_children()): + for j, (bname, block) in enumerate(stage.blocks.named_children()): + cname = 'standardized_conv2d' + block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' + block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) + block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) + block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) + block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) + block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) + block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) + block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) + block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) + block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) + if block.downsample is not None: + w = weights[f'{block_prefix}a/proj/{cname}/kernel'] + block.downsample.conv.weight.copy_(t2p(w)) + + +def _create_resnetv2(variant, pretrained=False, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ResNetV2, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=feature_cfg, + pretrained_custom_load='_bit' in variant, + **kwargs) + + +def _create_resnetv2_bit(variant, pretrained=False, **kwargs): + return _create_resnetv2( + variant, pretrained=pretrained, stem_type='fixed', conv_layer=partial(StdConv2d, eps=1e-8), **kwargs) + + +@register_model +def resnetv2_50x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x1_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bit_distilled(pretrained=False, **kwargs): + """ ResNetV2-50x1-BiT Distilled + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_50x1_bit_distilled', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher(pretrained=False, **kwargs): + """ ResNetV2-152x2-BiT Teacher + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher_384(pretrained=False, **kwargs): + """ ResNetV2-152xx-BiT Teacher @ 384x384 + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher_384', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_50(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_50d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50t(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50t', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='tiered', avg_down=True, **kwargs) + + +@register_model +def resnetv2_101(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_101d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101d', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_152(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_152d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152d', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +# @register_model +# def resnetv2_50ebd(pretrained=False, **kwargs): +# # FIXME for testing w/ TPU + PyTorch XLA +# return _create_resnetv2( +# 'resnetv2_50d', pretrained=pretrained, +# layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNormBatch2d, +# stem_type='deep', avg_down=True, **kwargs) +# +# +# @register_model +# def resnetv2_50esd(pretrained=False, **kwargs): +# # FIXME for testing w/ TPU + PyTorch XLA +# return _create_resnetv2( +# 'resnetv2_50d', pretrained=pretrained, +# layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNormSample2d, +# stem_type='deep', avg_down=True, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/rexnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/rexnet.py new file mode 100644 index 0000000000..279780beb6 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/rexnet.py @@ -0,0 +1,238 @@ +""" ReXNet + +A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - +https://arxiv.org/abs/2007.00992 + +Adapted from original impl at https://github.com/clovaai/rexnet +Copyright (c) 2020-present NAVER Corp. MIT license + +Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman +Copyright 2020 Ross Wightman +""" + +import torch.nn as nn +from functools import partial +from math import ceil + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, create_act_layer, ConvBnAct, DropPath, make_divisible, SEModule +from .registry import register_model +from .efficientnet_builder import efficientnet_init_weights + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + rexnet_100=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth'), + rexnet_130=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth'), + rexnet_150=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth'), + rexnet_200=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth'), + rexnetr_100=_cfg( + url=''), + rexnetr_130=_cfg( + url=''), + rexnetr_150=_cfg( + url=''), + rexnetr_200=_cfg( + url=''), +) + +SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) + + +class LinearBottleneck(nn.Module): + def __init__(self, in_chs, out_chs, stride, exp_ratio=1.0, se_ratio=0., ch_div=1, + act_layer='swish', dw_act_layer='relu6', drop_path=None): + super(LinearBottleneck, self).__init__() + self.use_shortcut = stride == 1 and in_chs <= out_chs + self.in_channels = in_chs + self.out_channels = out_chs + + if exp_ratio != 1.: + dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) + self.conv_exp = ConvBnAct(in_chs, dw_chs, act_layer=act_layer) + else: + dw_chs = in_chs + self.conv_exp = None + + self.conv_dw = ConvBnAct(dw_chs, dw_chs, 3, stride=stride, groups=dw_chs, apply_act=False) + if se_ratio > 0: + self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) + else: + self.se = None + self.act_dw = create_act_layer(dw_act_layer) + + self.conv_pwl = ConvBnAct(dw_chs, out_chs, 1, apply_act=False) + self.drop_path = drop_path + + def feat_channels(self, exp=False): + return self.conv_dw.out_channels if exp else self.out_channels + + def forward(self, x): + shortcut = x + if self.conv_exp is not None: + x = self.conv_exp(x) + x = self.conv_dw(x) + if self.se is not None: + x = self.se(x) + x = self.act_dw(x) + x = self.conv_pwl(x) + if self.use_shortcut: + if self.drop_path is not None: + x = self.drop_path(x) + x[:, 0:self.in_channels] += shortcut + return x + + +def _block_cfg(width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, se_ratio=0., ch_div=1): + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], []) + exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) + depth = sum(layers[:]) * 3 + base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs + + # The following channel configuration is a simple instance to make each layer become an expand layer. + out_chs_list = [] + for i in range(depth // 3): + out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) + base_chs += final_chs / (depth // 3 * 1.0) + + se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) + + return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) + + +def _build_blocks( + block_cfg, prev_chs, width_mult, ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_path_rate=0.): + feat_chs = [prev_chs] + feature_info = [] + curr_stride = 2 + features = [] + num_blocks = len(block_cfg) + for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg): + if stride > 1: + fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] + curr_stride *= stride + block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule + drop_path = DropPath(block_dpr) if block_dpr > 0. else None + features.append(LinearBottleneck( + in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, se_ratio=se_ratio, + ch_div=ch_div, act_layer=act_layer, dw_act_layer=dw_act_layer, drop_path=drop_path)) + prev_chs = chs + feat_chs += [features[-1].feat_channels()] + pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] + features.append(ConvBnAct(prev_chs, pen_chs, act_layer=act_layer)) + return features, feature_info + + +class ReXNetV1(nn.Module): + def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, + initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, se_ratio=1/12., + ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_rate=0.2, drop_path_rate=0.): + super(ReXNetV1, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + + assert output_stride == 32 # FIXME support dilation + stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 + stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) + self.stem = ConvBnAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) + + block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) + features, self.feature_info = _build_blocks( + block_cfg, stem_chs, width_mult, ch_div, act_layer, dw_act_layer, drop_path_rate) + self.num_features = features[-1].out_channels + self.features = nn.Sequential(*features) + + self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) + + efficientnet_init_weights(self) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_rexnet(variant, pretrained, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ReXNetV1, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def rexnet_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x""" + return _create_rexnet('rexnet_100', pretrained, **kwargs) + + +@register_model +def rexnet_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x""" + return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) + + +@register_model +def rexnet_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x""" + return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) + + +@register_model +def rexnet_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x""" + return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) + + +@register_model +def rexnetr_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) + + +@register_model +def rexnetr_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) + + +@register_model +def rexnetr_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) + + +@register_model +def rexnetr_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/selecsls.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/selecsls.py new file mode 100644 index 0000000000..1f3379db3d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/selecsls.py @@ -0,0 +1,362 @@ +"""PyTorch SelecSLS Net example for ImageNet Classification +License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) +Author: Dushyant Mehta (@mehtadushy) + +SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D +Human Pose Estimation with a Single RGB Camera, Mehta et al." +https://arxiv.org/abs/1907.00837 + +Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models +and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SelecSLS'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'selecsls42': _cfg( + url='', + interpolation='bicubic'), + 'selecsls42b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth', + interpolation='bicubic'), + 'selecsls60': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth', + interpolation='bicubic'), + 'selecsls60b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth', + interpolation='bicubic'), + 'selecsls84': _cfg( + url='', + interpolation='bicubic'), +} + + +class SequentialList(nn.Sequential): + + def __init__(self, *args): + super(SequentialList, self).__init__(*args) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (List[torch.Tensor]) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (List[torch.Tensor]) + pass + + def forward(self, x) -> List[torch.Tensor]: + for module in self: + x = module(x) + return x + + +class SelectSeq(nn.Module): + def __init__(self, mode='index', index=0): + super(SelectSeq, self).__init__() + self.mode = mode + self.index = index + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor]) -> (torch.Tensor) + pass + + def forward(self, x) -> torch.Tensor: + if self.mode == 'index': + return x[self.index] + else: + return torch.cat(x, dim=1) + + +def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): + if padding is None: + padding = ((stride - 1) + dilation * (k - 1)) // 2 + return nn.Sequential( + nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(out_chs), + nn.ReLU(inplace=True) + ) + + +class SelecSLSBlock(nn.Module): + def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): + super(SelecSLSBlock, self).__init__() + self.stride = stride + self.is_first = is_first + assert stride in [1, 2] + + # Process input with 4 conv blocks with the same number of input and output channels + self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) + self.conv2 = conv_bn(mid_chs, mid_chs, 1) + self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) + self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + if not isinstance(x, list): + x = [x] + assert len(x) in [1, 2] + + d1 = self.conv1(x[0]) + d2 = self.conv3(self.conv2(d1)) + d3 = self.conv5(self.conv4(d2)) + if self.is_first: + out = self.conv6(torch.cat([d1, d2, d3], 1)) + return [out, out] + else: + return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] + + +class SelecSLS(nn.Module): + """SelecSLS42 / SelecSLS60 / SelecSLS84 + + Parameters + ---------- + cfg : network config dictionary specifying block type, feature, and head args + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(SelecSLS, self).__init__() + + self.stem = conv_bn(in_chans, 32, stride=2) + self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) + self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way + self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) + self.num_features = cfg['num_features'] + self.feature_info = cfg['feature_info'] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + x = self.head(self.from_seq(x)) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _create_selecsls(variant, pretrained, **kwargs): + cfg = {} + feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] + if variant.startswith('selecsls42'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 144, 144, True, 2), + (144, 144, 144, 288, False, 1), + (288, 0, 304, 304, True, 2), + (304, 304, 304, 480, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.3'), + dict(num_chs=480, reduction=16, module='features.5'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls42b': + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant.startswith('selecsls60'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 128, 128, True, 2), + (128, 128, 128, 128, False, 1), + (128, 128, 128, 288, False, 1), + (288, 0, 288, 288, True, 2), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 416, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.4'), + dict(num_chs=416, reduction=16, module='features.8'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls60b': + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant == 'selecsls84': + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 144, False, 1), + (144, 0, 144, 144, True, 2), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 304, False, 1), + (304, 0, 304, 304, True, 2), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 512, False, 1), + ] + feature_info.extend([ + dict(num_chs=144, reduction=4, module='features.1'), + dict(num_chs=304, reduction=8, module='features.6'), + dict(num_chs=512, reduction=16, module='features.12'), + ]) + # Head can be replaced with alternative configurations depending on the problem + cfg['head'] = [ + (512, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 3, 1), + ] + cfg['num_features'] = 1280 + feature_info.extend([ + dict(num_chs=1024, reduction=32, module='head.1'), + dict(num_chs=1280, reduction=64, module='head.3') + ]) + else: + raise ValueError('Invalid net configuration ' + variant + ' !!!') + cfg['feature_info'] = feature_info + + # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? + return build_model_with_cfg( + SelecSLS, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfg, + feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def selecsls42(pretrained=False, **kwargs): + """Constructs a SelecSLS42 model. + """ + return _create_selecsls('selecsls42', pretrained, **kwargs) + + +@register_model +def selecsls42b(pretrained=False, **kwargs): + """Constructs a SelecSLS42_B model. + """ + return _create_selecsls('selecsls42b', pretrained, **kwargs) + + +@register_model +def selecsls60(pretrained=False, **kwargs): + """Constructs a SelecSLS60 model. + """ + return _create_selecsls('selecsls60', pretrained, **kwargs) + + +@register_model +def selecsls60b(pretrained=False, **kwargs): + """Constructs a SelecSLS60_B model. + """ + return _create_selecsls('selecsls60b', pretrained, **kwargs) + + +@register_model +def selecsls84(pretrained=False, **kwargs): + """Constructs a SelecSLS84 model. + """ + return _create_selecsls('selecsls84', pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/senet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/senet.py new file mode 100644 index 0000000000..3d0ba7b3ee --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/senet.py @@ -0,0 +1,467 @@ +""" +SEResNet implementation from Cadene's pretrained models +https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py +Additional credit to https://github.com/creafz + +Original model: https://github.com/hujie-frank/SENet + +ResNet code gently borrowed from +https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py + +FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate +support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. +""" +import math +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SENet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', + **kwargs + } + + +default_cfgs = { + 'legacy_senet154': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'), + 'legacy_seresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', + interpolation='bicubic'), + 'legacy_seresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), + 'legacy_seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), + 'legacy_seresnet101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), + 'legacy_seresnet152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), + 'legacy_seresnext26_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', + interpolation='bicubic'), + 'legacy_seresnext50_32x4d': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'), + 'legacy_seresnext101_32x4d': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'), +} + + +def _weight_init(m): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + module_input = x + x = x.mean((2, 3), keepdim=True) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class Bottleneck(nn.Module): + """ + Base class for bottlenecks that implements `forward()` method. + """ + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SEBottleneck(Bottleneck): + """ + Bottleneck for SENet154. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes * 2) + self.conv2 = nn.Conv2d( + planes * 2, planes * 4, kernel_size=3, stride=stride, + padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes * 4) + self.conv3 = nn.Conv2d( + planes * 4, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBottleneck(Bottleneck): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `stride=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEResNetBottleneck, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=1, bias=False, stride=stride) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNeXtBottleneck(Bottleneck): + """ + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None, base_width=4): + super(SEResNeXtBottleneck, self).__init__() + width = math.floor(planes * (base_width / 64)) * groups + self.conv1 = nn.Conv2d( + inplanes, width, kernel_size=1, bias=False, stride=1) + self.bn1 = nn.BatchNorm2d(width) + self.conv2 = nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(width) + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEResNetBlock, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes, reduction=reduction) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SENet(nn.Module): + + def __init__(self, block, layers, groups, reduction, drop_rate=0.2, + in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, + downsample_padding=0, num_classes=1000, global_pool='avg'): + """ + Parameters + ---------- + block (nn.Module): Bottleneck class. + - For SENet154: SEBottleneck + - For SE-ResNet models: SEResNetBottleneck + - For SE-ResNeXt models: SEResNeXtBottleneck + layers (list of ints): Number of residual blocks for 4 layers of the + network (layer1...layer4). + groups (int): Number of groups for the 3x3 convolution in each + bottleneck block. + - For SENet154: 64 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 32 + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. + - For all models: 16 + dropout_p (float or None): Drop probability for the Dropout layer. + If `None` the Dropout layer is not used. + - For SENet154: 0.2 + - For SE-ResNet models: None + - For SE-ResNeXt models: None + inplanes (int): Number of input channels for layer1. + - For SENet154: 128 + - For SE-ResNet models: 64 + - For SE-ResNeXt models: 64 + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of + a single 7x7 convolution in layer0. + - For SENet154: True + - For SE-ResNet models: False + - For SE-ResNeXt models: False + downsample_kernel_size (int): Kernel size for downsampling convolutions + in layer2, layer3 and layer4. + - For SENet154: 3 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 1 + downsample_padding (int): Padding for downsampling convolutions in + layer2, layer3 and layer4. + - For SENet154: 1 + - For SE-ResNet models: 0 + - For SE-ResNeXt models: 0 + num_classes (int): Number of outputs in `last_linear` layer. + - For all models: 1000 + """ + super(SENet, self).__init__() + self.inplanes = inplanes + self.num_classes = num_classes + self.drop_rate = drop_rate + if input_3x3: + layer0_modules = [ + ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), + ('bn1', nn.BatchNorm2d(64)), + ('relu1', nn.ReLU(inplace=True)), + ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), + ('bn2', nn.BatchNorm2d(64)), + ('relu2', nn.ReLU(inplace=True)), + ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), + ('bn3', nn.BatchNorm2d(inplanes)), + ('relu3', nn.ReLU(inplace=True)), + ] + else: + layer0_modules = [ + ('conv1', nn.Conv2d( + in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), + ('bn1', nn.BatchNorm2d(inplanes)), + ('relu1', nn.ReLU(inplace=True)), + ] + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) + # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. + self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] + self.layer1 = self._make_layer( + block, + planes=64, + blocks=layers[0], + groups=groups, + reduction=reduction, + downsample_kernel_size=1, + downsample_padding=0 + ) + self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] + self.layer2 = self._make_layer( + block, + planes=128, + blocks=layers[1], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] + self.layer3 = self._make_layer( + block, + planes=256, + blocks=layers[2], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] + self.layer4 = self._make_layer( + block, + planes=512, + blocks=layers[3], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] + self.num_features = 512 * block.expansion + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + _weight_init(m) + + def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, + downsample_kernel_size=1, downsample_padding=0): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, + stride=stride, padding=downsample_padding, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups, reduction)) + + return nn.Sequential(*layers) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.layer0(x) + x = self.pool0(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def logits(self, x): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.logits(x) + return x + + +def _create_senet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + SENet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def legacy_seresnet18(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet18', pretrained, **model_args) + + +@register_model +def legacy_seresnet34(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet34', pretrained, **model_args) + + +@register_model +def legacy_seresnet50(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet50', pretrained, **model_args) + + +@register_model +def legacy_seresnet101(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet101', pretrained, **model_args) + + +@register_model +def legacy_seresnet152(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet152', pretrained, **model_args) + + +@register_model +def legacy_senet154(pretrained=False, **kwargs): + model_args = dict( + block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, + downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) + return _create_senet('legacy_senet154', pretrained, **model_args) + + +@register_model +def legacy_seresnext26_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/sknet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/sknet.py new file mode 100644 index 0000000000..4dc2aa534c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/sknet.py @@ -0,0 +1,215 @@ +""" Selective Kernel Networks (ResNet base) + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) +and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer +to the original paper with some modifications of my own to better balance param count vs accuracy. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +from torch import nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SelectiveKernel, ConvBnAct, create_attn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'skresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'), + 'skresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'), + 'skresnet50': _cfg(), + 'skresnet50d': _cfg( + first_conv='conv1.0'), + 'skresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'), +} + + +class SelectiveKernelBasic(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(SelectiveKernelBasic, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock doest not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = SelectiveKernel( + inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs) + conv_kwargs['act_layer'] = None + self.conv2 = ConvBnAct( + first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +class SelectiveKernelBottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, + drop_block=None, drop_path=None): + super(SelectiveKernelBottleneck, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = ConvBnAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) + self.conv2 = SelectiveKernel( + first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, + **conv_kwargs, **sk_kwargs) + conv_kwargs['act_layer'] = None + self.conv3 = ConvBnAct(width, outplanes, kernel_size=1, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +def _create_skresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def skresnet18(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-18 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet18', pretrained, **model_args) + + +@register_model +def skresnet34(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-34 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet34', pretrained, **model_args) + + +@register_model +def skresnet50(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet50', pretrained, **model_args) + + +@register_model +def skresnet50d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50-D model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet50d', pretrained, **model_args) + + +@register_model +def skresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to + the SKNet-50 model in the Select Kernel Paper + """ + sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnext50_32x4d', pretrained, **model_args) + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/swin_transformer.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/swin_transformer.py new file mode 100644 index 0000000000..2ee106d287 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/swin_transformer.py @@ -0,0 +1,652 @@ +""" Swin Transformer +A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` + - https://arxiv.org/pdf/2103.14030 + +Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below + +""" +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- +import logging +import math +from copy import deepcopy +from typing import Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_ +from .registry import register_model +from .vision_transformer import checkpoint_filter_fn, _init_vit_weights + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # patch models (my experiments) + 'swin_base_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_base_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth', + ), + + 'swin_large_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_large_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth', + ), + + 'swin_small_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth', + ), + + 'swin_tiny_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth', + ), + + 'swin_base_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_base_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', + num_classes=21841), + + 'swin_large_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_large_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth', + num_classes=21841), + +} + + +def window_partition(x, window_size: int): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size: int, H: int, W: int): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask: Optional[torch.Tensor] = None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock( + dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if not torch.jit.is_scripting() and self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + +class SwinTransformer(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, + embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), + window_size=7, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, weight_init='', **kwargs): + super().__init__() + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + self.patch_grid = self.patch_embed.grid_size + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + else: + self.absolute_pos_embed = None + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + layers = [] + for i_layer in range(self.num_layers): + layers += [BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + ] + self.layers = nn.Sequential(*layers) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1d(1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0. + if weight_init.startswith('jax'): + for n, m in self.named_modules(): + _init_vit_weights(m, n, head_bias=head_bias, jax_impl=True) + else: + self.apply(_init_vit_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.absolute_pos_embed is not None: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + x = self.layers(x) + x = self.norm(x) # B L C + x = self.avgpool(x.transpose(1, 2)) # B C 1 + x = torch.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs): + if default_cfg is None: + default_cfg = deepcopy(default_cfgs[variant]) + overlay_external_default_cfg(default_cfg, kwargs) + default_num_classes = default_cfg['num_classes'] + default_img_size = default_cfg['input_size'][-2:] + + num_classes = kwargs.pop('num_classes', default_num_classes) + img_size = kwargs.pop('img_size', default_img_size) + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + SwinTransformer, variant, pretrained, + default_cfg=default_cfg, + img_size=img_size, + num_classes=num_classes, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + + +@register_model +def swin_base_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_small_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-S @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_tiny_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-T @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-B @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-B @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-L @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-L @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/tnt.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/tnt.py new file mode 100644 index 0000000000..8186cc4aea --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/tnt.py @@ -0,0 +1,268 @@ +""" Transformer in Transformer (TNT) in PyTorch + +A PyTorch implement of TNT as described in +'Transformer in Transformer' - https://arxiv.org/abs/2103.00112 + +The official mindspore code is released and available at +https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT +""" +import math +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.helpers import build_model_with_cfg +from timm.models.layers import Mlp, DropPath, trunc_normal_ +from timm.models.layers.helpers import to_2tuple +from timm.models.registry import register_model +from timm.models.vision_transformer import resize_pos_embed + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'pixel_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'tnt_s_patch16_224': _cfg( + url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), + 'tnt_b_patch16_224': _cfg( + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), +} + + +class Attention(nn.Module): + """ Multi-Head Attention + """ + def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.hidden_dim = hidden_dim + self.num_heads = num_heads + head_dim = hidden_dim // num_heads + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + + self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop, inplace=True) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop, inplace=True) + + def forward(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k = qk[0], qk[1] # make torchscript happy (cannot use tensor as tuple) + v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + """ TNT Block + """ + def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4., + qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + # Inner transformer + self.norm_in = norm_layer(in_dim) + self.attn_in = Attention( + in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.norm_mlp_in = norm_layer(in_dim) + self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4), + out_features=in_dim, act_layer=act_layer, drop=drop) + + self.norm1_proj = norm_layer(in_dim) + self.proj = nn.Linear(in_dim * num_pixel, dim, bias=True) + # Outer transformer + self.norm_out = norm_layer(dim) + self.attn_out = Attention( + dim, dim, num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm_mlp = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), + out_features=dim, act_layer=act_layer, drop=drop) + + def forward(self, pixel_embed, patch_embed): + # inner + pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed))) + pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))) + # outer + B, N, C = patch_embed.size() + patch_embed[:, 1:] = patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1)) + patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed))) + patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed))) + return pixel_embed, patch_embed + + +class PixelEmbed(nn.Module): + """ Image to Pixel Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # grid_size property necessary for resizing positional embedding + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + num_patches = (self.grid_size[0]) * (self.grid_size[1]) + self.img_size = img_size + self.num_patches = num_patches + self.in_dim = in_dim + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) + self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2) + return x + + +class TNT(nn.Module): + """ Transformer in Transformer - https://arxiv.org/abs/2103.00112 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12, + num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.pixel_embed = PixelEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = norm_layer(num_pixel * in_dim) + self.proj = nn.Linear(num_pixel * in_dim, embed_dim) + self.norm2_proj = norm_layer(embed_dim) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pixel_pos = nn.Parameter(torch.zeros(1, in_dim, new_patch_size[0], new_patch_size[1])) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + blocks = [] + for i in range(depth): + blocks.append(Block( + dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head, + mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[i], norm_layer=norm_layer)) + self.blocks = nn.ModuleList(blocks) + self.norm = norm_layer(embed_dim) + + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'patch_pos', 'pixel_pos', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.pos_drop(patch_embed) + + for blk in self.blocks: + pixel_embed, patch_embed = blk(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return patch_embed[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if state_dict['patch_pos'].shape != model.patch_pos.shape: + state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'], + model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size) + return state_dict + + +def _create_tnt(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + TNT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def tnt_s_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def tnt_b_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **model_cfg) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/tresnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/tresnet.py new file mode 100644 index 0000000000..372bfb7bc0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/tresnet.py @@ -0,0 +1,297 @@ +""" +TResNet: High Performance GPU-Dedicated Architecture +https://arxiv.org/pdf/2003.13630.pdf + +Original model: https://github.com/mrT23/TResNet + +""" +from collections import OrderedDict + +import torch +import torch.nn as nn + +from .helpers import build_model_with_cfg +from .layers import SpaceToDepthModule, BlurPool2d, InplaceAbn, ClassifierHead, SEModule +from .registry import register_model + +__all__ = ['tresnet_m', 'tresnet_l', 'tresnet_xl'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': (0, 0, 0), 'std': (1, 1, 1), + 'first_conv': 'body.conv1.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'tresnet_m': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_1k_miil_83_1.pth'), + 'tresnet_m_miil_in21k': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_miil_in21k.pth', num_classes=11221), + 'tresnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth'), + 'tresnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth'), + 'tresnet_m_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth'), + 'tresnet_l_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth'), + 'tresnet_xl_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_448-8c1815de.pth') +} + + +def IABN2Float(module: nn.Module) -> nn.Module: + """If `module` is IABN don't use half precision.""" + if isinstance(module, InplaceAbn): + module.float() + for child in module.children(): + IABN2Float(child) + return module + + +def conv2d_iabn(ni, nf, stride, kernel_size=3, groups=1, act_layer="leaky_relu", act_param=1e-2): + return nn.Sequential( + nn.Conv2d( + ni, nf, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False), + InplaceAbn(nf, act_layer=act_layer, act_param=act_param) + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None): + super(BasicBlock, self).__init__() + if stride == 1: + self.conv1 = conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3) + else: + if aa_layer is None: + self.conv1 = conv2d_iabn(inplanes, planes, stride=2, act_param=1e-3) + else: + self.conv1 = nn.Sequential( + conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + self.conv2 = conv2d_iabn(planes, planes, stride=1, act_layer="identity") + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + rd_chs = max(planes * self.expansion // 4, 64) + self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + + if self.se is not None: + out = self.se(out) + + out += shortcut + out = self.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, + act_layer="leaky_relu", aa_layer=None): + super(Bottleneck, self).__init__() + self.conv1 = conv2d_iabn( + inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer, act_param=1e-3) + if stride == 1: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3) + else: + if aa_layer is None: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=2, act_layer=act_layer, act_param=1e-3) + else: + self.conv2 = nn.Sequential( + conv2d_iabn(planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + reduction_chs = max(planes * self.expansion // 8, 64) + self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None + + self.conv3 = conv2d_iabn( + planes, planes * self.expansion, kernel_size=1, stride=1, act_layer="identity") + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + if self.se is not None: + out = self.se(out) + + out = self.conv3(out) + out = out + shortcut # no inplace + out = self.relu(out) + + return out + + +class TResNet(nn.Module): + def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, global_pool='fast', drop_rate=0.): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(TResNet, self).__init__() + + aa_layer = BlurPool2d + + # TResnet stages + self.inplanes = int(64 * width_factor) + self.planes = int(64 * width_factor) + conv1 = conv2d_iabn(in_chans * 16, self.planes, stride=1, kernel_size=3) + layer1 = self._make_layer( + BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer) # 56x56 + layer2 = self._make_layer( + BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer) # 28x28 + layer3 = self._make_layer( + Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer) # 14x14 + layer4 = self._make_layer( + Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer) # 7x7 + + # body + self.body = nn.Sequential(OrderedDict([ + ('SpaceToDepth', SpaceToDepthModule()), + ('conv1', conv1), + ('layer1', layer1), + ('layer2', layer2), + ('layer3', layer3), + ('layer4', layer4)])) + + self.feature_info = [ + dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? + dict(num_chs=self.planes, reduction=4, module='body.layer1'), + dict(num_chs=self.planes * 2, reduction=8, module='body.layer2'), + dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), + dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), + ] + + # head + self.num_features = (self.planes * 8) * Bottleneck.expansion + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + # model initilization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') + elif isinstance(m, nn.BatchNorm2d) or isinstance(m, InplaceAbn): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # residual connections special initialization + for m in self.modules(): + if isinstance(m, BasicBlock): + m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight)) # BN to zero + if isinstance(m, Bottleneck): + m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight)) # BN to zero + if isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + + def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + layers = [] + if stride == 2: + # avg pooling before 1x1 conv + layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) + layers += [conv2d_iabn( + self.inplanes, planes * block.expansion, kernel_size=1, stride=1, act_layer="identity")] + downsample = nn.Sequential(*layers) + + layers = [] + layers.append(block( + self.inplanes, planes, stride, downsample, use_se=use_se, aa_layer=aa_layer)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block(self.inplanes, planes, use_se=use_se, aa_layer=aa_layer)) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='fast'): + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + return self.body(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_tresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + TResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def tresnet_m(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_miil_in21k(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_miil_in21k', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl_448', pretrained=pretrained, **model_kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/twins.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/twins.py new file mode 100644 index 0000000000..4aed09d90f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/twins.py @@ -0,0 +1,422 @@ +""" Twins +A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers` + - https://arxiv.org/pdf/2104.13840.pdf + +Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below + +""" +# -------------------------------------------------------- +# Twins +# Copyright (c) 2021 Meituan +# Licensed under The Apache 2.0 License [see LICENSE for details] +# Written by Xinjie Li, Xiangxiang Chu +# -------------------------------------------------------- +import math +from copy import deepcopy +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import Mlp, DropPath, to_2tuple, trunc_normal_ +from .registry import register_model +from .vision_transformer import Attention +from .helpers import build_model_with_cfg, overlay_external_default_cfg + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'twins_pcpvt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth', + ), + 'twins_pcpvt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_base-e5ecb09b.pth', + ), + 'twins_pcpvt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_large-d273f802.pth', + ), + 'twins_svt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_small-42e5f78c.pth', + ), + 'twins_svt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_base-c2265010.pth', + ), + 'twins_svt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_large-90f6aaa9.pth', + ), +} + +Size_ = Tuple[int, int] + + +class LocallyGroupedAttn(nn.Module): + """ LSA: self attention within a group + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1): + assert ws != 1 + super(LocallyGroupedAttn, self).__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.ws = ws + + def forward(self, x, size: Size_): + # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for + # both. You can choose any one, we recommend forward_padding because it's neat. However, + # the masking implementation is more reasonable and accurate. + B, N, C = x.shape + H, W = size + x = x.view(B, H, W, C) + pad_l = pad_t = 0 + pad_r = (self.ws - W % self.ws) % self.ws + pad_b = (self.ws - H % self.ws) % self.ws + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + _h, _w = Hp // self.ws, Wp // self.ws + x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) + qkv = self.qkv(x).reshape( + B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + x = x.reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + # def forward_mask(self, x, size: Size_): + # B, N, C = x.shape + # H, W = size + # x = x.view(B, H, W, C) + # pad_l = pad_t = 0 + # pad_r = (self.ws - W % self.ws) % self.ws + # pad_b = (self.ws - H % self.ws) % self.ws + # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + # _, Hp, Wp, _ = x.shape + # _h, _w = Hp // self.ws, Wp // self.ws + # mask = torch.zeros((1, Hp, Wp), device=x.device) + # mask[:, -pad_b:, :].fill_(1) + # mask[:, :, -pad_r:].fill_(1) + # + # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C + # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws) + # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws + # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0)) + # qkv = self.qkv(x).reshape( + # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + # # n_h, B, _w*_h, nhead, ws*ws, dim + # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head + # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws + # attn = attn + attn_mask.unsqueeze(2) + # attn = attn.softmax(dim=-1) + # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head + # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + # if pad_r > 0 or pad_b > 0: + # x = x[:, :H, :W, :].contiguous() + # x = x.reshape(B, N, C) + # x = self.proj(x) + # x = self.proj_drop(x) + # return x + + +class GlobalSubSampleAttn(nn.Module): + """ GSA: using a key to summarize the information for a group to be efficient. + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=True) + self.kv = nn.Linear(dim, dim * 2, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.sr = None + self.norm = None + + def forward(self, x, size: Size_): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr is not None: + x = x.permute(0, 2, 1).reshape(B, C, *size) + x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) + x = self.norm(x) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None): + super().__init__() + self.norm1 = norm_layer(dim) + if ws is None: + self.attn = Attention(dim, num_heads, False, None, attn_drop, drop) + elif ws == 1: + self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, drop, sr_ratio) + else: + self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, drop, ws) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Size_): + x = x + self.drop_path(self.attn(self.norm1(x), size)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PosConv(nn.Module): + # PEG from https://arxiv.org/abs/2102.10882 + def __init__(self, in_chans, embed_dim=768, stride=1): + super(PosConv, self).__init__() + self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) + self.stride = stride + + def forward(self, x, size: Size_): + B, N, C = x.shape + cnn_feat_token = x.transpose(1, 2).view(B, C, *size) + x = self.proj(cnn_feat_token) + if self.stride == 1: + x += cnn_feat_token + x = x.flatten(2).transpose(1, 2) + return x + + def no_weight_decay(self): + return ['proj.%d.weight' % i for i in range(4)] + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \ + f"img_size {img_size} should be divided by patch_size {patch_size}." + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x) -> Tuple[torch.Tensor, Size_]: + B, C, H, W = x.shape + + x = self.proj(x).flatten(2).transpose(1, 2) + x = self.norm(x) + out_size = (H // self.patch_size[0], W // self.patch_size[1]) + + return x, out_size + + +class Twins(nn.Module): + """ Twins Vision Transfomer (Revisiting Spatial Attention) + + Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git + """ + def __init__( + self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dims=(64, 128, 256, 512), + num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None, + block_cls=Block): + super().__init__() + self.num_classes = num_classes + self.depths = depths + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + + img_size = to_2tuple(img_size) + prev_chs = in_chans + self.patch_embeds = nn.ModuleList() + self.pos_drops = nn.ModuleList() + for i in range(len(depths)): + self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) + self.pos_drops.append(nn.Dropout(p=drop_rate)) + prev_chs = embed_dims[i] + img_size = tuple(t // patch_size for t in img_size) + patch_size = 2 + + self.blocks = nn.ModuleList() + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + cur = 0 + for k in range(len(depths)): + _block = nn.ModuleList([block_cls( + dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k], + ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])]) + self.blocks.append(_block) + cur += depths[k] + + self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) + + self.norm = norm_layer(self.num_features) + + # classification head + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # init weights + self.apply(self._init_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()]) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + + def forward_features(self, x): + B = x.shape[0] + for i, (embed, drop, blocks, pos_blk) in enumerate( + zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): + x, size = embed(x) + x = drop(x) + for j, blk in enumerate(blocks): + x = blk(x, size) + if j == 0: + x = pos_blk(x, size) # PEG here + if i < len(self.depths) - 1: + x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() + x = self.norm(x) + return x.mean(dim=1) # GAP here + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_twins(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + Twins, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + return model + + +@register_model +def twins_pcpvt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_large', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_large', pretrained=pretrained, **model_kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/vgg.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/vgg.py new file mode 100644 index 0000000000..8bea03e7ce --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/vgg.py @@ -0,0 +1,261 @@ +"""VGG + +Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for +timm functionality. + +Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Union, List, Dict, Any, cast + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct +from .registry import register_model + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'vgg11': _cfg(url='https://download.pytorch.org/models/vgg11-bbd30ac9.pth'), + 'vgg13': _cfg(url='https://download.pytorch.org/models/vgg13-c768596a.pth'), + 'vgg16': _cfg(url='https://download.pytorch.org/models/vgg16-397923af.pth'), + 'vgg19': _cfg(url='https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'), + 'vgg11_bn': _cfg(url='https://download.pytorch.org/models/vgg11_bn-6002323d.pth'), + 'vgg13_bn': _cfg(url='https://download.pytorch.org/models/vgg13_bn-abd245e5.pth'), + 'vgg16_bn': _cfg(url='https://download.pytorch.org/models/vgg16_bn-6c64b313.pth'), + 'vgg19_bn': _cfg(url='https://download.pytorch.org/models/vgg19_bn-c79401a0.pth'), +} + + +cfgs: Dict[str, List[Union[str, int]]] = { + 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +class ConvMlp(nn.Module): + + def __init__(self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, + drop_rate: float = 0.2, act_layer: nn.Module = None, conv_layer: nn.Module = None): + super(ConvMlp, self).__init__() + self.input_kernel_size = kernel_size + mid_features = int(out_features * mlp_ratio) + self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) + self.act1 = act_layer(True) + self.drop = nn.Dropout(drop_rate) + self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) + self.act2 = act_layer(True) + + def forward(self, x): + if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: + # keep the input size >= 7x7 + output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) + x = F.adaptive_avg_pool2d(x, output_size) + x = self.fc1(x) + x = self.act1(x) + x = self.drop(x) + x = self.fc2(x) + x = self.act2(x) + return x + + +class VGG(nn.Module): + + def __init__( + self, + cfg: List[Any], + num_classes: int = 1000, + in_chans: int = 3, + output_stride: int = 32, + mlp_ratio: float = 1.0, + act_layer: nn.Module = nn.ReLU, + conv_layer: nn.Module = nn.Conv2d, + norm_layer: nn.Module = None, + global_pool: str = 'avg', + drop_rate: float = 0., + ) -> None: + super(VGG, self).__init__() + assert output_stride == 32 + self.num_classes = num_classes + self.num_features = 4096 + self.drop_rate = drop_rate + self.feature_info = [] + prev_chs = in_chans + net_stride = 1 + pool_layer = nn.MaxPool2d + layers: List[nn.Module] = [] + for v in cfg: + last_idx = len(layers) - 1 + if v == 'M': + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) + layers += [pool_layer(kernel_size=2, stride=2)] + net_stride *= 2 + else: + v = cast(int, v) + conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) + if norm_layer is not None: + layers += [conv2d, norm_layer(v), act_layer(inplace=True)] + else: + layers += [conv2d, act_layer(inplace=True)] + prev_chs = v + self.features = nn.Sequential(*layers) + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) + self.pre_logits = ConvMlp( + prev_chs, self.num_features, 7, mlp_ratio=mlp_ratio, + drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer) + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + self._initialize_weights() + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + x = self.pre_logits(x) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.head(x) + return x + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + k_r = k + k_r = k_r.replace('classifier.0', 'pre_logits.fc1') + k_r = k_r.replace('classifier.3', 'pre_logits.fc2') + k_r = k_r.replace('classifier.6', 'head.fc') + if 'classifier.0.weight' in k: + v = v.reshape(-1, 512, 7, 7) + if 'classifier.3.weight' in k: + v = v.reshape(-1, 4096, 1, 1) + out_dict[k_r] = v + return out_dict + + +def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: + cfg = variant.split('_')[0] + # NOTE: VGG is one of the only models with stride==1 features, so indices are offset from other models + out_indices = kwargs.get('out_indices', (0, 1, 2, 3, 4, 5)) + model = build_model_with_cfg( + VGG, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfgs[cfg], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + pretrained_filter_fn=_filter_fn, + **kwargs) + return model + + +@register_model +def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg11', pretrained=pretrained, **model_args) + + +@register_model +def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg13', pretrained=pretrained, **model_args) + + +@register_model +def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg16', pretrained=pretrained, **model_args) + + +@register_model +def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg19', pretrained=pretrained, **model_args) + + +@register_model +def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/visformer.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/visformer.py new file mode 100644 index 0000000000..7740f38132 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/visformer.py @@ -0,0 +1,409 @@ +""" Visformer + +Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533 + +From original at https://github.com/danczs/Visformer + +""" +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier +from .registry import register_model + + +__all__ = ['Visformer'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + visformer_tiny=_cfg(), + visformer_small=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/visformer_small-839e1f5b.pth' + ), +) + + +class SpatialMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, + act_layer=nn.GELU, drop=0., group=8, spatial_conv=False): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.in_features = in_features + self.out_features = out_features + self.spatial_conv = spatial_conv + if self.spatial_conv: + if group < 2: # net setting + hidden_features = in_features * 5 // 6 + else: + hidden_features = in_features * 2 + self.hidden_features = hidden_features + self.group = group + self.drop = nn.Dropout(drop) + self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) + self.act1 = act_layer() + if self.spatial_conv: + self.conv2 = nn.Conv2d( + hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) + self.act2 = act_layer() + else: + self.conv2 = None + self.act2 = None + self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.act1(x) + x = self.drop(x) + if self.conv2 is not None: + x = self.conv2(x) + x = self.act2(x) + x = self.conv3(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.): + super().__init__() + self.dim = dim + self.num_heads = num_heads + head_dim = round(dim // num_heads * head_dim_ratio) + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, C, H, W = x.shape + x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) + q, k, v = x[0], x[1], x[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__(self, dim, num_heads, head_dim_ratio=1., mlp_ratio=4., + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm2d, + group=8, attn_disabled=False, spatial_conv=False): + super().__init__() + self.spatial_conv = spatial_conv + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + if attn_disabled: + self.norm1 = None + self.attn = None + else: + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=drop) + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = SpatialMlp( + in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, + group=group, spatial_conv=spatial_conv) # new setting + + def forward(self, x): + if self.attn is not None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Visformer(nn.Module): + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384, + depth=12, num_heads=6, mlp_ratio=4., drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=LayerNorm2d, attn_stage='111', pos_embed=True, spatial_conv='111', + vit_stem=False, group=8, global_pool='avg', conv_init=False, embed_norm=None): + super().__init__() + img_size = to_2tuple(img_size) + self.num_classes = num_classes + self.embed_dim = embed_dim + self.init_channels = init_channels + self.img_size = img_size + self.vit_stem = vit_stem + self.conv_init = conv_init + if isinstance(depth, (list, tuple)): + self.stage_num1, self.stage_num2, self.stage_num3 = depth + depth = sum(depth) + else: + self.stage_num1 = self.stage_num3 = depth // 3 + self.stage_num2 = depth - self.stage_num1 - self.stage_num3 + self.pos_embed = pos_embed + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + # stage 1 + if self.vit_stem: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // 16 for x in img_size] + else: + if self.init_channels is None: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // 8 for x in img_size] + else: + self.stem = nn.Sequential( + nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(self.init_channels), + nn.ReLU(inplace=True) + ) + img_size = [x // 2 for x in img_size] + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // 4 for x in img_size] + + if self.pos_embed: + if self.vit_stem: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + else: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size)) + self.pos_drop = nn.Dropout(p=drop_rate) + self.stage1 = nn.ModuleList([ + Block( + dim=embed_dim//2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[0] == '0'), spatial_conv=(spatial_conv[0] == '1') + ) + for i in range(self.stage_num1) + ]) + + # stage2 + if not self.vit_stem: + self.patch_embed2 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // 2 for x in img_size] + if self.pos_embed: + self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + self.stage2 = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[1] == '0'), spatial_conv=(spatial_conv[1] == '1') + ) + for i in range(self.stage_num1, self.stage_num1+self.stage_num2) + ]) + + # stage 3 + if not self.vit_stem: + self.patch_embed3 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, + embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False) + img_size = [x // 2 for x in img_size] + if self.pos_embed: + self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) + self.stage3 = nn.ModuleList([ + Block( + dim=embed_dim*2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[2] == '0'), spatial_conv=(spatial_conv[2] == '1') + ) + for i in range(self.stage_num1+self.stage_num2, depth) + ]) + + # head + self.num_features = embed_dim if self.vit_stem else embed_dim * 2 + self.norm = norm_layer(self.num_features) + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # weights init + if self.pos_embed: + trunc_normal_(self.pos_embed1, std=0.02) + if not self.vit_stem: + trunc_normal_(self.pos_embed2, std=0.02) + trunc_normal_(self.pos_embed3, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + if self.conv_init: + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + else: + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0.) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + if self.stem is not None: + x = self.stem(x) + + # stage 1 + x = self.patch_embed1(x) + if self.pos_embed: + x = x + self.pos_embed1 + x = self.pos_drop(x) + for b in self.stage1: + x = b(x) + + # stage 2 + if not self.vit_stem: + x = self.patch_embed2(x) + if self.pos_embed: + x = x + self.pos_embed2 + x = self.pos_drop(x) + for b in self.stage2: + x = b(x) + + # stage3 + if not self.vit_stem: + x = self.patch_embed3(x) + if self.pos_embed: + x = x + self.pos_embed3 + x = self.pos_drop(x) + for b in self.stage3: + x = b(x) + + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + x = self.head(x) + return x + + +def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg( + Visformer, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + return model + + +@register_model +def visformer_tiny(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def visformer_small(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_small', pretrained=pretrained, **model_cfg) + return model + + +# @register_model +# def visformer_net1(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net2(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net3(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net4(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net5(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net6(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net7(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model + + + + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/vision_transformer.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/vision_transformer.py new file mode 100644 index 0000000000..ca8f52defd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/vision_transformer.py @@ -0,0 +1,896 @@ +""" Vision Transformer (ViT) in PyTorch + +A PyTorch implement of Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 + +The official jax code is released and available at https://github.com/google-research/vision_transformer + +DeiT model defs and weights from https://github.com/facebookresearch/deit, +paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 + +Acknowledgments: +* The paper authors for releasing code and weights, thanks! +* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out +for some einops/einsum fun +* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT +* Bert reference code checks against Huggingface Transformers and Tensorflow Bert + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging +from functools import partial +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_ +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # patch models (weights from official Google JAX impl) + 'vit_tiny_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_tiny_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_base_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_base_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch32_224': _cfg( + url='', # no official model weights for this combo, only for in21k + ), + 'vit_large_patch32_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_large_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + + # patch models, imagenet21k (weights from official Google JAX impl) + 'vit_tiny_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_large_patch32_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', + num_classes=21843), + 'vit_large_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', + num_classes=21843), + 'vit_huge_patch14_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', + hf_hub='timm/vit_huge_patch14_224_in21k', + num_classes=21843), + + # SAM trained models (https://arxiv.org/abs/2106.01548) + 'vit_base_patch32_sam_224': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz'), + 'vit_base_patch16_sam_224': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz'), + + # deit models (FB weights) + 'deit_tiny_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_small_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_base_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_base_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0), + 'deit_tiny_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_small_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0, + classifier=('head', 'head_dist')), + + # ViT ImageNet-21K-P pretraining by MILL + 'vit_base_patch16_224_miil_in21k': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + 'vit_base_patch16_224_miil': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm' + '/vit_base_patch16_224_1k_miil_84_4.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', + ), +} + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + + Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` + - https://arxiv.org/abs/2012.12877 + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, + act_layer=None, weight_init=''): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + distilled (bool): model includes a distillation token and head as in DeiT models + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + weight_init: (str): weight init scheme + """ + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 2 if distilled else 1 + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = embed_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size and not distilled: + self.num_features = representation_size + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh()) + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier head(s) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + trunc_normal_(self.pos_embed, std=.02) + if self.dist_token is not None: + trunc_normal_(self.dist_token, std=.02) + if mode.startswith('jax'): + # leave cls token as zeros to match jax impl + named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) + else: + trunc_normal_(self.cls_token, std=.02) + self.apply(_init_vit_weights) + + def _init_weights(self, m): + # this fn left here for compat with downstream users + _init_vit_weights(m) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'dist_token'} + + def get_classifier(self): + if self.dist_token is None: + return self.head + else: + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.num_tokens == 2: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks + if self.dist_token is None: + x = torch.cat((cls_token, x), dim=1) + else: + x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) + x = self.pos_drop(x + self.pos_embed) + x = self.blocks(x) + x = self.norm(x) + if self.dist_token is None: + return self.pre_logits(x[:, 0]) + else: + return x[:, 0], x[:, 1] + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple + if self.training and not torch.jit.is_scripting(): + # during inference, return the average of both classifier predictions + return x, x_dist + else: + return (x + x_dist) / 2 + else: + x = self.head(x) + return x + + +def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False): + """ ViT weight initialization + * When called without n, head_bias, jax_impl args it will behave exactly the same + as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). + * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + elif name.startswith('pre_logits'): + lecun_normal_(module.weight) + nn.init.zeros_(module.bias) + else: + if jax_impl: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + else: + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif jax_impl and isinstance(module, nn.Conv2d): + # NOTE conv was left to pytorch default in my original init + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): + nn.init.zeros_(module.bias) + nn.init.ones_(module.weight) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) + if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: + model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) + model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) + if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: + model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) + model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + _logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == 'pos_embed' and v.shape != model.pos_embed.shape: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed( + v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + out_dict[k] = v + return out_dict + + +def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + # NOTE this extra code to support handling of repr size for in21k pretrained models + default_num_classes = default_cfg['num_classes'] + num_classes = kwargs.get('num_classes', default_num_classes) + repr_size = kwargs.pop('representation_size', None) + if repr_size is not None and num_classes != default_num_classes: + # Remove representation layer if fine-tuning. This may not always be the desired action, + # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? + _logger.warning("Removing representation layer for fine-tuning.") + repr_size = None + + model = build_model_with_cfg( + VisionTransformer, variant, pretrained, + default_cfg=default_cfg, + representation_size=repr_size, + pretrained_filter_fn=checkpoint_filter_fn, + pretrained_custom_load='npz' in default_cfg['url'], + **kwargs) + return model + + +@register_model +def vit_tiny_patch16_224(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_384(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) @ 384x384. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) at 384x384. + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_sam_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + # NOTE original SAM weights release worked with representation_size=768 + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=0, **kwargs) + model = _create_vision_transformer('vit_base_patch16_sam_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_sam_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + # NOTE original SAM weights release worked with representation_size=768 + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=0, **kwargs) + model = _create_vision_transformer('vit_base_patch32_sam_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict( + patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_huge_patch14_224_in21k(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_small_patch16_224(pretrained=False, **kwargs): + """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('deit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_224(pretrained=False, **kwargs): + """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('deit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_384(pretrained=False, **kwargs): + """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('deit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer( + 'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_small_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer( + 'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + 'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_384(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs) + return model \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/vision_transformer_hybrid.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/vision_transformer_hybrid.py new file mode 100644 index 0000000000..d5f0a5377e --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/vision_transformer_hybrid.py @@ -0,0 +1,363 @@ +""" Hybrid Vision Transformer (ViT) in PyTorch + +A PyTorch implement of the Hybrid Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.TODO + +NOTE These hybrid model definitions depend on code in vision_transformer.py. +They were moved here to keep file sizes sane. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import StdConv2dSame, StdConv2d, to_2tuple +from .resnet import resnet26d, resnet50d +from .resnetv2 import ResNetV2, create_resnetv2_stem +from .registry import register_model +from timm.models.vision_transformer import _create_vision_transformer + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # hybrid in-1k models (weights from official JAX impl where they exist) + 'vit_tiny_r_s16_p8_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + first_conv='patch_embed.backbone.conv'), + 'vit_tiny_r_s16_p8_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_r26_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', + ), + 'vit_small_r26_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_r26_s32_224': _cfg(), + 'vit_base_r50_s16_224': _cfg(), + 'vit_base_r50_s16_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_r50_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz' + ), + 'vit_large_r50_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0 + ), + + # hybrid in-21k models (weights from official Google JAX impl where they exist) + 'vit_tiny_r_s16_p8_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv'), + 'vit_small_r26_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + 'vit_base_r50_s16_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', + num_classes=21843, crop_pct=0.9), + 'vit_large_r50_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + + # hybrid models (using timm resnet backbones) + 'vit_small_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_small_resnet50d_s16_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet50d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), +} + + +class HybridEmbed(nn.Module): + """ CNN Feature Map Embedding + Extract feature map from CNN, flatten, project to embedding dim. + """ + def __init__(self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768): + super().__init__() + assert isinstance(backbone, nn.Module) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.backbone = backbone + if feature_size is None: + with torch.no_grad(): + # NOTE Most reliable way of determining output dims is to run forward pass + training = backbone.training + if training: + backbone.eval() + o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + o = o[-1] # last feature if backbone outputs list/tuple of features + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0 + self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +def _create_vision_transformer_hybrid(variant, backbone, pretrained=False, **kwargs): + embed_layer = partial(HybridEmbed, backbone=backbone) + kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set + return _create_vision_transformer( + variant, pretrained=pretrained, embed_layer=embed_layer, default_cfg=default_cfgs[variant], **kwargs) + + +def _resnetv2(layers=(3, 4, 9), **kwargs): + """ ResNet-V2 backbone helper""" + padding_same = kwargs.get('padding_same', True) + stem_type = 'same' if padding_same else '' + conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) + if len(layers): + backbone = ResNetV2( + layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), + preact=False, stem_type=stem_type, conv_layer=conv_layer) + else: + backbone = create_resnetv2_stem( + kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) + return backbone + + +@register_model +def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_384(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-B/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224(pretrained=False, **kwargs): + """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_384(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_384(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_384(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_r50_s32_384(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_224_in21k(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid. ImageNet-21k. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224_in21k(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2(layers=(3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_224_in21k(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_224_in21k(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet50d_s16_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/vovnet.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/vovnet.py new file mode 100644 index 0000000000..ec5b3e8160 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/vovnet.py @@ -0,0 +1,406 @@ +""" VoVNet (V1 & V2) + +Papers: +* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 +* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Looked at https://github.com/youngwanLEE/vovnet-detectron2 & +https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +for some reference, rewrote most of the code. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .registry import register_model +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, SeparableConvBnAct, BatchNormAct2d, ClassifierHead, DropPath,\ + create_attn, create_norm_act, get_norm_act_layer + + +# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & +# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +model_cfgs = dict( + vovnet39a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=False, + depthwise=False, + attn='', + ), + vovnet57a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=False, + depthwise=False, + attn='', + + ), + ese_vovnet19b_slim_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + + ), + ese_vovnet19b_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + ), + ese_vovnet19b_slim=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet19b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet57b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet99b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 3, 9, 3], + residual=True, + depthwise=False, + attn='ese', + ), + eca_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='eca', + ), +) +model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] +model_cfgs['ese_vovnet99b_iabn'] = model_cfgs['ese_vovnet99b'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + vovnet39a=_cfg(url=''), + vovnet57a=_cfg(url=''), + ese_vovnet19b_slim_dw=_cfg(url=''), + ese_vovnet19b_dw=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet19b_dw-a8741004.pth'), + ese_vovnet19b_slim=_cfg(url=''), + ese_vovnet39b=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet39b-f912fe73.pth'), + ese_vovnet57b=_cfg(url=''), + ese_vovnet99b=_cfg(url=''), + eca_vovnet39b=_cfg(url=''), + ese_vovnet39b_evos=_cfg(url=''), + ese_vovnet99b_iabn=_cfg(url=''), +) + + +class SequentialAppendList(nn.Sequential): + def __init__(self, *args): + super(SequentialAppendList, self).__init__(*args) + + def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: + for i, module in enumerate(self): + if i == 0: + concat_list.append(module(x)) + else: + concat_list.append(module(concat_list[-1])) + x = torch.cat(concat_list, dim=1) + return x + + +class OsaBlock(nn.Module): + + def __init__(self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, + depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None): + super(OsaBlock, self).__init__() + + self.residual = residual + self.depthwise = depthwise + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + next_in_chs = in_chs + if self.depthwise and next_in_chs != mid_chs: + assert not residual + self.conv_reduction = ConvBnAct(next_in_chs, mid_chs, 1, **conv_kwargs) + else: + self.conv_reduction = None + + mid_convs = [] + for i in range(layer_per_block): + if self.depthwise: + conv = SeparableConvBnAct(mid_chs, mid_chs, **conv_kwargs) + else: + conv = ConvBnAct(next_in_chs, mid_chs, 3, **conv_kwargs) + next_in_chs = mid_chs + mid_convs.append(conv) + self.conv_mid = SequentialAppendList(*mid_convs) + + # feature aggregation + next_in_chs = in_chs + layer_per_block * mid_chs + self.conv_concat = ConvBnAct(next_in_chs, out_chs, **conv_kwargs) + + if attn: + self.attn = create_attn(attn, out_chs) + else: + self.attn = None + + self.drop_path = drop_path + + def forward(self, x): + output = [x] + if self.conv_reduction is not None: + x = self.conv_reduction(x) + x = self.conv_mid(x, output) + x = self.conv_concat(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.residual: + x = x + output[0] + return x + + +class OsaStage(nn.Module): + + def __init__(self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, + residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, + drop_path_rates=None): + super(OsaStage, self).__init__() + + if downsample: + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + else: + self.pool = None + + blocks = [] + for i in range(block_per_stage): + last_block = i == block_per_stage - 1 + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + blocks += [OsaBlock( + in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, + attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path) + ] + in_chs = out_chs + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + if self.pool is not None: + x = self.pool(x) + x = self.blocks(x) + return x + + +class VovNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0., stem_stride=4, + output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rate=0.): + """ VovNet (v2) + """ + super(VovNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert stem_stride in (4, 2) + assert output_stride == 32 # FIXME support dilation + + stem_chs = cfg["stem_chs"] + stage_conv_chs = cfg["stage_conv_chs"] + stage_out_chs = cfg["stage_out_chs"] + block_per_stage = cfg["block_per_stage"] + layer_per_block = cfg["layer_per_block"] + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + # Stem module + last_stem_stride = stem_stride // 2 + conv_type = SeparableConvBnAct if cfg["depthwise"] else ConvBnAct + self.stem = nn.Sequential(*[ + ConvBnAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), + conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), + conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), + ]) + self.feature_info = [dict( + num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] + current_stride = stem_stride + + # OSA stages + stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) + in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] + stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) + stages = [] + for i in range(4): # num_stages + downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 + stages += [OsaStage( + in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, + downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args) + ] + self.num_features = stage_out_chs[i] + current_stride *= 2 if downsample else 1 + self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + elif isinstance(m, nn.Linear): + nn.init.zeros_(m.bias) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + return self.stages(x) + + def forward(self, x): + x = self.forward_features(x) + return self.head(x) + + +def _create_vovnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + VovNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def vovnet39a(pretrained=False, **kwargs): + return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) + + +@register_model +def vovnet57a(pretrained=False, **kwargs): + return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet57b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet99b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) + + +@register_model +def eca_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) + + +# Experimental Models + +@register_model +def ese_vovnet39b_evos(pretrained=False, **kwargs): + def norm_act_fn(num_features, **nkwargs): + return create_norm_act('EvoNormSample', num_features, jit=False, **nkwargs) + return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs) + + +@register_model +def ese_vovnet99b_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_vovnet( + 'ese_vovnet99b_iabn', pretrained=pretrained, norm_layer=norm_layer, act_layer=nn.LeakyReLU, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/xception.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/xception.py new file mode 100644 index 0000000000..86f558cb5b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/xception.py @@ -0,0 +1,232 @@ +""" +Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) + +@author: tstandley +Adapted by cadene + +Creates an Xception Model as defined in: + +Francois Chollet +Xception: Deep Learning with Depthwise Separable Convolutions +https://arxiv.org/pdf/1610.02357.pdf + +This weights ported from the Keras implementation. Achieves the following performance on the validation set: + +Loss:0.9173 Prec@1:78.892 Prec@5:94.292 + +REMEMBER to set your image size to 3x299x299 for both test and validation + +normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + +The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 +""" + +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['Xception'] + +default_cfgs = { + 'xception': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', + 'input_size': (3, 299, 299), + 'pool_size': (10, 10), + 'crop_pct': 0.8975, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } +} + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d( + in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_channels != in_channels or strides != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_channels) + else: + self.skip = None + + rep = [] + for i in range(reps): + if grow_first: + inc = in_channels if i == 0 else out_channels + outc = out_channels + else: + inc = in_channels + outc = in_channels if i < (reps - 1) else out_channels + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) + rep.append(nn.BatchNorm2d(outc)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + + +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception, self).__init__() + self.drop_rate = drop_rate + self.global_pool = global_pool + self.num_classes = num_classes + self.num_features = 2048 + + self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, 2, 2, start_with_relu=False) + self.block2 = Block(128, 256, 2, 2) + self.block3 = Block(256, 728, 2, 2) + + self.block4 = Block(728, 728, 3, 1) + self.block5 = Block(728, 728, 3, 1) + self.block6 = Block(728, 728, 3, 1) + self.block7 = Block(728, 728, 3, 1) + + self.block8 = Block(728, 728, 3, 1) + self.block9 = Block(728, 728, 3, 1) + self.block10 = Block(728, 728, 3, 1) + self.block11 = Block(728, 728, 3, 1) + + self.block12 = Block(728, 1024, 2, 2, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(self.num_features) + self.act4 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block2.rep.0'), + dict(num_chs=256, reduction=8, module='block3.rep.0'), + dict(num_chs=728, reduction=16, module='block12.rep.0'), + dict(num_chs=2048, reduction=32, module='act4'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # #------- init weights -------- + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def xception(pretrained=False, **kwargs): + return _xception('xception', pretrained=pretrained, **kwargs) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/xception_aligned.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/xception_aligned.py new file mode 100644 index 0000000000..ea7f5c05e0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/xception_aligned.py @@ -0,0 +1,238 @@ +"""Pytorch impl of Aligned Xception 41, 65, 71 + +This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at +https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md + +Hacked together by / Copyright 2020 Ross Wightman +""" +from functools import partial + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct, create_conv2d +from .layers.helpers import to_3tuple +from .registry import register_model + +__all__ = ['XceptionAligned'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10), + 'crop_pct': 0.903, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + xception41=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth'), + xception65=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth'), + xception71=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth'), +) + + +class SeparableConv2d(nn.Module): + def __init__( + self, inplanes, planes, kernel_size=3, stride=1, dilation=1, padding='', + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + self.conv_dw = create_conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, depthwise=True) + self.bn_dw = norm_layer(inplanes) + if act_layer is not None: + self.act_dw = act_layer(inplace=True) + else: + self.act_dw = None + + # pointwise convolution + self.conv_pw = create_conv2d(inplanes, planes, kernel_size=1) + self.bn_pw = norm_layer(planes) + if act_layer is not None: + self.act_pw = act_layer(inplace=True) + else: + self.act_pw = None + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn_dw(x) + if self.act_dw is not None: + x = self.act_dw(x) + x = self.conv_pw(x) + x = self.bn_pw(x) + if self.act_pw is not None: + x = self.act_pw(x) + return x + + +class XceptionModule(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, pad_type='', + start_with_relu=True, no_skip=False, act_layer=nn.ReLU, norm_layer=None): + super(XceptionModule, self).__init__() + out_chs = to_3tuple(out_chs) + self.in_channels = in_chs + self.out_channels = out_chs[-1] + self.no_skip = no_skip + if not no_skip and (self.out_channels != self.in_channels or stride != 1): + self.shortcut = ConvBnAct( + in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, act_layer=None) + else: + self.shortcut = None + + separable_act_layer = None if start_with_relu else act_layer + self.stack = nn.Sequential() + for i in range(3): + if start_with_relu: + self.stack.add_module(f'act{i + 1}', nn.ReLU(inplace=i > 0)) + self.stack.add_module(f'conv{i + 1}', SeparableConv2d( + in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, + act_layer=separable_act_layer, norm_layer=norm_layer)) + in_chs = out_chs[i] + + def forward(self, x): + skip = x + x = self.stack(x) + if self.shortcut is not None: + skip = self.shortcut(skip) + if not self.no_skip: + x = x + skip + return x + + +class XceptionAligned(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, block_cfg, num_classes=1000, in_chans=3, output_stride=32, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_rate=0., global_pool='avg'): + super(XceptionAligned, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) + self.stem = nn.Sequential(*[ + ConvBnAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), + ConvBnAct(32, 64, kernel_size=3, stride=1, **layer_args) + ]) + + curr_dilation = 1 + curr_stride = 2 + self.feature_info = [] + self.blocks = nn.Sequential() + for i, b in enumerate(block_cfg): + b['dilation'] = curr_dilation + if b['stride'] > 1: + self.feature_info += [dict( + num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=f'blocks.{i}.stack.act3')] + next_stride = curr_stride * b['stride'] + if next_stride > output_stride: + curr_dilation *= b['stride'] + b['stride'] = 1 + else: + curr_stride = next_stride + self.blocks.add_module(str(i), XceptionModule(**b, **layer_args)) + self.num_features = self.blocks[-1].out_channels + + self.feature_info += [dict( + num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))] + + self.head = ClassifierHead( + in_chs=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + XceptionAligned, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True, feature_cls='hook'), + **kwargs) + + +@register_model +def xception41(pretrained=False, **kwargs): + """ Modified Aligned Xception-41 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 8), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception41', pretrained=pretrained, **model_args) + + +@register_model +def xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception65', pretrained=pretrained, **model_args) + + +@register_model +def xception71(pretrained=False, **kwargs): + """ Modified Aligned Xception-71 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=1), + dict(in_chs=256, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=1), + dict(in_chs=728, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception71', pretrained=pretrained, **model_args) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/models/xcit.py b/PyTorch/contrib/cv/classification/convmixer/timm/models/xcit.py new file mode 100644 index 0000000000..b7af3b262b --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/models/xcit.py @@ -0,0 +1,810 @@ +""" Cross-Covariance Image Transformer (XCiT) in PyTorch + +Same as the official implementation, with some minor adaptations. + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + +Paper: + - https://arxiv.org/abs/2106.09681 +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +import math +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .vision_transformer import _cfg, Mlp +from .registry import register_model +from .layers import DropPath, trunc_normal_, to_2tuple +from .cait import ClassAttn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # Patch size 16 + 'xcit_nano_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), + 'xcit_nano_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), + 'xcit_nano_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), + 'xcit_tiny_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), + 'xcit_tiny_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), + 'xcit_tiny_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), + 'xcit_tiny_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), + 'xcit_small_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), + 'xcit_small_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), + 'xcit_small_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), + 'xcit_small_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), + 'xcit_medium_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), + 'xcit_medium_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), + 'xcit_large_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), + 'xcit_large_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), + + # Patch size 8 + 'xcit_nano_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), + 'xcit_nano_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), + 'xcit_nano_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), + 'xcit_tiny_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), + 'xcit_tiny_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), + 'xcit_tiny_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), + 'xcit_tiny_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), + 'xcit_small_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), + 'xcit_small_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), + 'xcit_small_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), + 'xcit_small_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), + 'xcit_medium_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), + 'xcit_medium_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), + 'xcit_large_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), + 'xcit_large_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), +} + + +class PositionalEncodingFourier(nn.Module): + """ + Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper. + Based on the official XCiT code + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + """ + + def __init__(self, hidden_dim=32, dim=768, temperature=10000): + super().__init__() + self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + self.eps = 1e-6 + + def forward(self, B: int, H: int, W: int): + device = self.token_projection.weight.device + y_embed = torch.arange(1, H+1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W) + x_embed = torch.arange(1, W+1, dtype=torch.float32, device=device).repeat(1, H, 1) + y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos) + return pos.repeat(B, 1, 1, 1) # (B, C, H, W) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution + batch norm""" + return torch.nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), + nn.BatchNorm2d(out_planes) + ) + + +class ConvPatchEmbed(nn.Module): + """Image to Patch Embedding using multiple convolutional layers""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): + super().__init__() + img_size = to_2tuple(img_size) + num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + if patch_size == 16: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 8, 2), + act_layer(), + conv3x3(embed_dim // 8, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + elif patch_size == 8: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + else: + raise('For convolutional projection, patch size has to be in [8, 16]') + + def forward(self, x): + x = self.proj(x) + Hp, Wp = x.shape[2], x.shape[3] + x = x.flatten(2).transpose(1, 2) # (B, N, C) + return x, (Hp, Wp) + + +class LPI(nn.Module): + """ + Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the + implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable + 3x3 convolutions with GeLU and BatchNorm2d + """ + + def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): + super().__init__() + out_features = out_features or in_features + + padding = kernel_size // 2 + + self.conv1 = torch.nn.Conv2d( + in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) + self.act = act_layer() + self.bn = nn.BatchNorm2d(in_features) + self.conv2 = torch.nn.Conv2d( + in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) + + def forward(self, x, H: int, W: int): + B, N, C = x.shape + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.conv1(x) + x = self.act(x) + x = self.bn(x) + x = self.conv2(x) + x = x.reshape(B, C, N).permute(0, 2, 1) + return x + + +class ClassAttentionBlock(nn.Module): + """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False): + super().__init__() + self.norm1 = norm_layer(dim) + + self.attn = ClassAttn( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + if eta is not None: # LayerScale Initialization (no layerscale when None) + self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + else: + self.gamma1, self.gamma2 = 1.0, 1.0 + + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + self.tokens_norm = tokens_norm + + def forward(self, x): + x_norm1 = self.norm1(x) + x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) + x = x + self.drop_path(self.gamma1 * x_attn) + if self.tokens_norm: + x = self.norm2(x) + else: + x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) + x_res = x + cls_token = x[:, 0:1] + cls_token = self.gamma2 * self.mlp(cls_token) + x = torch.cat([cls_token, x[:, 1:]], dim=1) + x = x_res + self.drop_path(x) + return x + + +class XCA(nn.Module): + """ Cross-Covariance Attention (XCA) + Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax + normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + # Paper section 3.2 l2-Normalization and temperature scaling + q = torch.nn.functional.normalize(q, dim=-1) + k = torch.nn.functional.normalize(k, dim=-1) + attn = (q @ k.transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, C', N), permute -> (B, N, H, C') + x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return {'temperature'} + + +class XCABlock(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1.): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm3 = norm_layer(dim) + self.local_mp = LPI(in_features=dim, act_layer=act_layer) + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + + def forward(self, x, H: int, W: int): + x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) + # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + +class XCiT(nn.Module): + """ + Based on timm and DeiT code bases + https://github.com/rwightman/pytorch-image-models/tree/master/timm + https://github.com/facebookresearch/deit/ + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False): + """ + Args: + img_size (int, tuple): input image size + patch_size (int): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate (constant across all layers) + norm_layer: (nn.Module): normalization layer + cls_attn_layers: (int) Depth of Class attention layers + use_pos_embed: (bool) whether to use positional encoding + eta: (float) layerscale initialization value + tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA + + Notes: + - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch + interaction (class LPI) and the patch embedding (class ConvPatchEmbed) + """ + super().__init__() + img_size = to_2tuple(img_size) + assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ + '`patch_size` should divide image dimensions evenly' + + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = ConvPatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.use_pos_embed = use_pos_embed + if use_pos_embed: + self.pos_embed = PositionalEncodingFourier(dim=embed_dim) + self.pos_drop = nn.Dropout(p=drop_rate) + + self.blocks = nn.ModuleList([ + XCABlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta) + for _ in range(depth)]) + + self.cls_attn_blocks = nn.ModuleList([ + ClassAttentionBlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm) + for _ in range(cls_attn_layers)]) + + # Classifier head + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Init weights + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) + x, (Hp, Wp) = self.patch_embed(x) + + if self.use_pos_embed: + # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) + pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x, Hp, Wp) + + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + for blk in self.cls_attn_blocks: + x = blk(x) + + x = self.norm(x)[:, 0] + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + # For consistency with timm's transformer models while being compatible with official weights source we rename + # pos_embeder to pos_embed. Also account for use_pos_embed == False + use_pos_embed = getattr(model, 'pos_embed', None) is not None + pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] + for k in pos_embed_keys: + if use_pos_embed: + state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) + else: + del state_dict[k] + # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors + # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v + if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): + num_ca_blocks = len(model.cls_attn_blocks) + for i in range(num_ca_blocks): + qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') + qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] + qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) + if qkv_bias is not None: + qkv_bias = qkv_bias.reshape(3, -1) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] + return state_dict + + +def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + model = build_model_with_cfg( + XCiT, variant, pretrained, default_cfg=default_cfg, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384, **kwargs) + model = _create_xcit('xcit_nano_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +# Patch size 8x8 models +@register_model +def xcit_nano_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/__init__.py new file mode 100644 index 0000000000..7ee4958eb5 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/__init__.py @@ -0,0 +1,15 @@ +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .adamw import AdamW +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP +from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/adabelief.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adabelief.py new file mode 100644 index 0000000000..951d715cc0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adabelief.py @@ -0,0 +1,201 @@ +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdaBelief(Optimizer): + r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-16) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + decoupled_decay (boolean, optional): (default: True) If set as True, then + the optimizer uses decoupled weight decay as in AdamW + fixed_decay (boolean, optional): (default: False) This is used when weight_decouple + is set as True. + When fixed_decay == True, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay$. + When fixed_decay == False, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the + weight decay ratio decreases with learning rate (lr). + rectify (boolean, optional): (default: True) If set as True, then perform the rectified + update similar to RAdam + degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update + when variance of gradient is high + reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 + + For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer' + For example train/args for EfficientNet see these gists + - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037 + - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3 + """ + + def __init__( + self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, + decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True): + + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + + if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): + for param in params: + if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): + param['buffer'] = [[None, None, None] for _ in range(10)] + + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, + degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, + fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)]) + super(AdaBelief, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdaBelief, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def reset(self): + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + amsgrad = group['amsgrad'] + + # State initialization + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError( + 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + state = self.state[p] + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p_fp32) + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p_fp32) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p_fp32) + + # perform weight decay, check if decoupled weight decay + if group['decoupled_decay']: + if not group['fixed_decay']: + p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) + else: + p_fp32.mul_(1.0 - group['weight_decay']) + else: + if group['weight_decay'] != 0: + grad.add_(p_fp32, alpha=group['weight_decay']) + + # get current state variable + exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Update first and second moment running average + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + grad_residual = grad - exp_avg + exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) + + if amsgrad: + max_exp_avg_var = state['max_exp_avg_var'] + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + # update + if not group['rectify']: + # Default update + step_size = group['lr'] / bias_correction1 + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + # Rectified update, forked from RAdam + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + elif group['degenerated_to_sgd']: + step_size = 1.0 / (1 - beta1 ** state['step']) + else: + step_size = -1 + buffered[2] = step_size + + if num_sma >= 5: + denom = exp_avg_var.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) + elif step_size > 0: + p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) + + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/adafactor.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adafactor.py new file mode 100644 index 0000000000..06057433a9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adafactor.py @@ -0,0 +1,167 @@ +""" Adafactor Optimizer + +Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py + +Original header/copyright below. + +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import torch +import math + + +class Adafactor(torch.optim.Optimizer): + """Implements Adafactor algorithm. + This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` + (see https://arxiv.org/abs/1804.04235) + + Note that this optimizer internally adjusts the learning rate depending on the + *scale_parameter*, *relative_step* and *warmup_init* options. + + To use a manual (external) learning rate schedule you should set `scale_parameter=False` and + `relative_step=False`. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): external learning rate (default: None) + eps (tuple[float, float]): regularization constants for square gradient + and parameter scale respectively (default: (1e-30, 1e-3)) + clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) + decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) + beta1 (float): coefficient used for computing running averages of gradient (default: None) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) + warmup_init (bool): time-dependent learning rate computation depends on + whether warm-up initialization is being used (default: False) + """ + + def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0, + decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False): + relative_step = not lr + if warmup_init and not relative_step: + raise ValueError('warmup_init requires relative_step=True') + + beta1 = None if betas is None else betas[0] # make it compat with standard betas arg + defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, + beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, + relative_step=relative_step, warmup_init=warmup_init) + super(Adafactor, self).__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + if param_group['relative_step']: + min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 + lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) + param_scale = 1.0 + if param_group['scale_parameter']: + param_scale = max(param_group['eps_scale'], param_state['RMS']) + param_group['lr'] = lr_t * param_scale + return param_group['lr'] + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group['beta1'] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError('Adafactor does not support sparse gradients.') + + state = self.state[p] + + factored, use_first_moment = self._get_options(group, grad.shape) + # State Initialization + if len(state) == 0: + state['step'] = 0 + + if use_first_moment: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(grad) + if factored: + state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad) + state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad) + else: + state['exp_avg_sq'] = torch.zeros_like(grad) + + state['RMS'] = 0 + else: + if use_first_moment: + state['exp_avg'] = state['exp_avg'].to(grad) + if factored: + state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) + state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) + else: + state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + state['step'] += 1 + state['RMS'] = self._rms(p_fp32) + lr_t = self._get_lr(group, state) + + beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) + update = grad ** 2 + group['eps'] + if factored: + exp_avg_sq_row = state['exp_avg_sq_row'] + exp_avg_sq_col = state['exp_avg_sq_col'] + + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state['exp_avg_sq'] + + exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) + update.mul_(lr_t) + + if use_first_moment: + exp_avg = state['exp_avg'] + exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) + update = exp_avg + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) + + p_fp32.add_(-update) + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/adahessian.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adahessian.py new file mode 100644 index 0000000000..985c67ca68 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adahessian.py @@ -0,0 +1,156 @@ +""" AdaHessian Optimizer + +Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py +Originally licensed MIT, Copyright 2020, David Samuel +""" +import torch + + +class Adahessian(torch.optim.Optimizer): + """ + Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): learning rate (default: 0.1) + betas ((float, float), optional): coefficients used for computing running averages of gradient and the + squared hessian trace (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) + hessian_power (float, optional): exponent of the hessian trace (default: 1.0) + update_each (int, optional): compute the hessian trace approximation only after *this* number of steps + (to save time) (default: 1) + n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) + """ + + def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, + hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= hessian_power <= 1.0: + raise ValueError(f"Invalid Hessian power value: {hessian_power}") + + self.n_samples = n_samples + self.update_each = update_each + self.avg_conv_kernel = avg_conv_kernel + + # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training + self.seed = 2147483647 + self.generator = torch.Generator().manual_seed(self.seed) + + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) + super(Adahessian, self).__init__(params, defaults) + + for p in self.get_params(): + p.hess = 0.0 + self.state[p]["hessian step"] = 0 + + @property + def is_second_order(self): + return True + + def get_params(self): + """ + Gets all parameters in all param_groups with gradients + """ + + return (p for group in self.param_groups for p in group['params'] if p.requires_grad) + + def zero_hessian(self): + """ + Zeros out the accumalated hessian traces. + """ + + for p in self.get_params(): + if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: + p.hess.zero_() + + @torch.no_grad() + def set_hessian(self): + """ + Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. + """ + + params = [] + for p in filter(lambda p: p.grad is not None, self.get_params()): + if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step + params.append(p) + self.state[p]["hessian step"] += 1 + + if len(params) == 0: + return + + if self.generator.device != params[0].device: # hackish way of casting the generator to the right device + self.generator = torch.Generator(params[0].device).manual_seed(self.seed) + + grads = [p.grad for p in params] + + for i in range(self.n_samples): + # Rademacher distribution {-1.0, 1.0} + zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] + h_zs = torch.autograd.grad( + grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) + for h_z, z, p in zip(h_zs, zs, params): + p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step. + Arguments: + closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) + """ + + loss = None + if closure is not None: + loss = closure() + + self.zero_hessian() + self.set_hessian() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None or p.hess is None: + continue + + if self.avg_conv_kernel and p.dim() == 4: + p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() + + # Perform correct stepweight decay as in AdamW + p.mul_(1 - group['lr'] * group['weight_decay']) + + state = self.state[p] + + # State initialization + if len(state) == 1: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of Hessian diagonal square values + state['exp_hessian_diag_sq'] = torch.zeros_like(p) + + exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] + beta1, beta2 = group['betas'] + state['step'] += 1 + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) + exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + k = group['hessian_power'] + denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) + + # make update + step_size = group['lr'] / bias_correction1 + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/adamp.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adamp.py new file mode 100644 index 0000000000..ee187633ab --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adamp.py @@ -0,0 +1,105 @@ +""" +AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer +import math + + +def _channel_view(x) -> torch.Tensor: + return x.reshape(x.size(0), -1) + + +def _layer_view(x) -> torch.Tensor: + return x.reshape(1, -1) + + +def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): + wd = 1. + expand_size = (-1,) + (1,) * (len(p.shape) - 1) + for view_func in [_channel_view, _layer_view]: + param_view = view_func(p) + grad_view = view_func(grad) + cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() + + # FIXME this is a problem for PyTorch XLA + if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): + p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) + wd = wd_ratio + return perturb, wd + + return perturb, wd + + +class AdamP(Optimizer): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) + super(AdamP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad + beta1, beta2 = group['betas'] + nesterov = group['nesterov'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Adam + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + step_size = group['lr'] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if group['weight_decay'] > 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) + + # Step + p.add_(perturb, alpha=-step_size) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/adamw.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adamw.py new file mode 100644 index 0000000000..66478bc6ef --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/adamw.py @@ -0,0 +1,122 @@ +""" AdamW Optimizer +Impl copied from PyTorch master + +NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed +someday +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdamW(Optimizer): + r"""Implements AdamW algorithm. + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + step_size = group['lr'] / bias_correction1 + + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/lamb.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/lamb.py new file mode 100644 index 0000000000..12c7c49b8a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/lamb.py @@ -0,0 +1,192 @@ +""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math + +import torch +from torch.optim import Optimizer + + +class Lamb(Optimizer): + """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0) + trust_clip (bool): enable LAMBC trust ratio clipping (default: False) + always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__( + self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, + weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False): + defaults = dict( + lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, + grad_averaging=grad_averaging, max_grad_norm=max_grad_norm, + trust_clip=trust_clip, always_adapt=always_adapt) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + # FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes + # scalar types properly https://github.com/pytorch/pytorch/issues/9190 + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device) + clip_global_grad_norm = torch.where( + global_grad_norm > max_grad_norm, + global_grad_norm / max_grad_norm, + one_tensor) + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on parameters that are + # excluded from weight decay, unless always_adapt == True, then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, w_norm / g_norm, one_tensor), + one_tensor, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.minimum(trust_ratio, one_tensor) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/lars.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/lars.py new file mode 100644 index 0000000000..98198e675c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/lars.py @@ -0,0 +1,135 @@ +""" PyTorch LARS / LARC Optimizer + +An implementation of LARS (SGD) + LARC in PyTorch + +Based on: + * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + +Additional cleanup and modifications to properly support PyTorch XLA. + +Copyright 2021 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer + + +class Lars(Optimizer): + """ LARS for PyTorch + + Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf + + Args: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate (default: 1.0). + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) + eps (float): eps for division denominator (default: 1e-8) + trust_clip (bool): enable LARC trust ratio clipping (default: False) + always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) + """ + + def __init__( + self, + params, + lr=1.0, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + trust_coeff=0.001, + eps=1e-8, + trust_clip=False, + always_adapt=False, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + trust_coeff=trust_coeff, + eps=eps, + trust_clip=trust_clip, + always_adapt=always_adapt, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("nesterov", False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + trust_coeff = group['trust_coeff'] + eps = group['eps'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + + # apply LARS LR adaptation, LARC clipping, weight decay + # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + if weight_decay != 0 or group['always_adapt']: + w_norm = p.norm(2.0) + g_norm = grad.norm(2.0) + trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, trust_ratio, one_tensor), + one_tensor, + ) + if group['trust_clip']: + trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) + grad.add(p, alpha=weight_decay) + grad.mul_(trust_ratio) + + # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = torch.clone(grad).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + grad = grad.add(buf, alpha=momentum) + else: + grad = buf + + p.add_(grad, alpha=-group['lr']) + + return loss \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/lookahead.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/lookahead.py new file mode 100644 index 0000000000..462c3acd24 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/lookahead.py @@ -0,0 +1,61 @@ +""" Lookahead Optimizer Wrapper. +Implementation modified from: https://github.com/alphadl/lookahead.pytorch +Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer +from collections import defaultdict + + +class Lookahead(Optimizer): + def __init__(self, base_optimizer, alpha=0.5, k=6): + # NOTE super().__init__() not called on purpose + if not 0.0 <= alpha <= 1.0: + raise ValueError(f'Invalid slow update rate: {alpha}') + if not 1 <= k: + raise ValueError(f'Invalid lookahead steps: {k}') + defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) + self._base_optimizer = base_optimizer + self.param_groups = base_optimizer.param_groups + self.defaults = base_optimizer.defaults + self.defaults.update(defaults) + self.state = defaultdict(dict) + # manually add our defaults to the param groups + for name, default in defaults.items(): + for group in self._base_optimizer.param_groups: + group.setdefault(name, default) + + @torch.no_grad() + def update_slow(self, group): + for fast_p in group["params"]: + if fast_p.grad is None: + continue + param_state = self._base_optimizer.state[fast_p] + if 'lookahead_slow_buff' not in param_state: + param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) + param_state['lookahead_slow_buff'].copy_(fast_p) + slow = param_state['lookahead_slow_buff'] + slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) + fast_p.copy_(slow) + + def sync_lookahead(self): + for group in self._base_optimizer.param_groups: + self.update_slow(group) + + @torch.no_grad() + def step(self, closure=None): + loss = self._base_optimizer.step(closure) + for group in self._base_optimizer.param_groups: + group['lookahead_step'] += 1 + if group['lookahead_step'] % group['lookahead_k'] == 0: + self.update_slow(group) + return loss + + def state_dict(self): + return self._base_optimizer.state_dict() + + def load_state_dict(self, state_dict): + self._base_optimizer.load_state_dict(state_dict) + self.param_groups = self._base_optimizer.param_groups diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/madgrad.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/madgrad.py new file mode 100644 index 0000000000..a76713bf27 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/madgrad.py @@ -0,0 +1,184 @@ +""" PyTorch MADGRAD optimizer + +MADGRAD: https://arxiv.org/abs/2101.11075 + +Code from: https://github.com/facebookresearch/madgrad +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import TYPE_CHECKING, Any, Callable, Optional + +import torch +import torch.optim + +if TYPE_CHECKING: + from torch.optim.optimizer import _params_t +else: + _params_t = Any + + +class MADGRAD(torch.optim.Optimizer): + """ + MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimization. + + .. _MADGRAD: https://arxiv.org/abs/2101.11075 + + MADGRAD is a general purpose optimizer that can be used in place of SGD or + Adam may converge faster and generalize better. Currently GPU-only. + Typically, the same learning rate schedule that is used for SGD or Adam may + be used. The overall learning rate is not comparable to either method and + should be determined by a hyper-parameter sweep. + + MADGRAD requires less weight decay than other methods, often as little as + zero. Momentum values used for SGD or Adam's beta1 should work here also. + + On sparse problems both weight_decay and momentum should be set to 0. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). + """ + + def __init__( + self, + params: _params_t, + lr: float = 1e-2, + momentum: float = 0.9, + weight_decay: float = 0, + eps: float = 1e-6, + decoupled_decay: bool = False, + ): + if momentum < 0 or momentum >= 1: + raise ValueError(f"Momentum {momentum} must be in the range [0,1]") + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if eps < 0: + raise ValueError(f"Eps must be non-negative") + + defaults = dict( + lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) + super().__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self) -> bool: + return False + + @property + def supports_flat_params(self) -> bool: + return True + + @torch.no_grad() + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + eps = group['eps'] + lr = group['lr'] + eps + weight_decay = group['weight_decay'] + momentum = group['momentum'] + ck = 1 - momentum + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if momentum != 0.0 and grad.is_sparse: + raise RuntimeError("momentum != 0 is not compatible with sparse gradients") + + state = self.state[p] + if len(state) == 0: + state['step'] = 0 + state['grad_sum_sq'] = torch.zeros_like(p) + state['s'] = torch.zeros_like(p) + if momentum != 0: + state['x0'] = torch.clone(p).detach() + + state['step'] += 1 + grad_sum_sq = state['grad_sum_sq'] + s = state['s'] + lamb = lr * math.sqrt(state['step']) + + # Apply weight decay + if weight_decay != 0: + if group['decoupled_decay']: + p.mul_(1.0 - group['lr'] * weight_decay) + else: + if grad.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + grad.add_(p, alpha=weight_decay) + + if grad.is_sparse: + grad = grad.coalesce() + grad_val = grad._values() + + p_masked = p.sparse_mask(grad) + grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) + s_masked = s.sparse_mask(grad) + + # Compute x_0 from other known quantities + rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) + x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) + + # Dense + sparse op + grad_sq = grad * grad + grad_sum_sq.add_(grad_sq, alpha=lamb) + grad_sum_sq_masked.add_(grad_sq, alpha=lamb) + + rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) + + s.add_(grad, alpha=lamb) + s_masked._values().add_(grad_val, alpha=lamb) + + # update masked copy of p + p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) + # Copy updated masked p to dense p using an add operation + p_masked._values().add_(p_kp1_masked_vals, alpha=-1) + p.add_(p_masked, alpha=-1) + else: + if momentum == 0: + # Compute x_0 from other known quantities + rms = grad_sum_sq.pow(1 / 3).add_(eps) + x0 = p.addcdiv(s, rms, value=1) + else: + x0 = state['x0'] + + # Accumulate second moments + grad_sum_sq.addcmul_(grad, grad, value=lamb) + rms = grad_sum_sq.pow(1 / 3).add_(eps) + + # Update s + s.add_(grad, alpha=lamb) + + # Step + if momentum == 0: + p.copy_(x0.addcdiv(s, rms, value=-1)) + else: + z = x0.addcdiv(s, rms, value=-1) + + # p is a moving average of z + p.mul_(1 - ck).add_(z, alpha=ck) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/nadam.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/nadam.py new file mode 100644 index 0000000000..6268d5d451 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/nadam.py @@ -0,0 +1,92 @@ +import math + +import torch +from torch.optim.optimizer import Optimizer + + +class Nadam(Optimizer): + """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). + + It has been proposed in `Incorporating Nesterov Momentum into Adam`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + schedule_decay (float, optional): momentum schedule decay (default: 4e-3) + + __ http://cs229.stanford.edu/proj2015/054_report.pdf + __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf + + Originally taken from: https://github.com/pytorch/pytorch/pull/1408 + NOTE: Has potential issues but does work well on some problems. + """ + + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, schedule_decay=4e-3): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay) + super(Nadam, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_schedule'] = 1. + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Warming momentum schedule + m_schedule = state['m_schedule'] + schedule_decay = group['schedule_decay'] + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + eps = group['eps'] + state['step'] += 1 + t = state['step'] + bias_correction2 = 1 - beta2 ** t + + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) + momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) + m_schedule_new = m_schedule * momentum_cache_t + m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 + state['m_schedule'] = m_schedule_new + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) + p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) + p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/nvnovograd.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/nvnovograd.py new file mode 100644 index 0000000000..fda3f4a620 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/nvnovograd.py @@ -0,0 +1,120 @@ +""" Nvidia NovoGrad Optimizer. +Original impl by Nvidia from Jasper example: + - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper +Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` + - https://arxiv.org/abs/1905.11286 +""" + +import torch +from torch.optim.optimizer import Optimizer +import math + + +class NvNovoGrad(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0.98)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(NvNovoGrad, self).__init__(params, defaults) + + def __setstate__(self, state): + super(NvNovoGrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/optim_factory.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/optim_factory.py new file mode 100644 index 0000000000..e174915679 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/optim_factory.py @@ -0,0 +1,217 @@ +""" Optimizer Factory w/ Custom Weight Decay +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Optional + +import torch +import torch.nn as nn +import torch.optim as optim + +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP + +try: + from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD + has_apex = True +except ImportError: + has_apex = False + + +def add_weight_decay(model, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + no_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0.}, + {'params': decay, 'weight_decay': weight_decay}] + + +def optimizer_kwargs(cfg): + """ cfg/argparse to kwargs helper + Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. + """ + kwargs = dict( + opt=cfg.opt, + lr=cfg.lr, + weight_decay=cfg.weight_decay, + momentum=cfg.momentum) + if getattr(cfg, 'opt_eps', None) is not None: + kwargs['eps'] = cfg.opt_eps + if getattr(cfg, 'opt_betas', None) is not None: + kwargs['betas'] = cfg.opt_betas + if getattr(cfg, 'opt_args', None) is not None: + kwargs.update(cfg.opt_args) + return kwargs + + +def create_optimizer(args, model, filter_bias_and_bn=True): + """ Legacy optimizer factory for backwards compatibility. + NOTE: Use create_optimizer_v2 for new code. + """ + return create_optimizer_v2( + model, + **optimizer_kwargs(cfg=args), + filter_bias_and_bn=filter_bias_and_bn, + ) + + +def create_optimizer_v2( + model_or_params, + opt: str = 'sgd', + lr: Optional[float] = None, + weight_decay: float = 0., + momentum: float = 0.9, + filter_bias_and_bn: bool = True, + **kwargs): + """ Create an optimizer. + + TODO currently the model is passed in and all parameters are selected for optimization. + For more general use an interface that allows selection of parameters to optimize and lr groups, one of: + * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion + * expose the parameters interface and leave it up to caller + + Args: + model_or_params (nn.Module): model containing parameters to optimize + opt: name of optimizer to create + lr: initial learning rate + weight_decay: weight decay to apply in optimizer + momentum: momentum for momentum based optimizers (others may use betas via kwargs) + filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay + **kwargs: extra optimizer specific kwargs to pass through + + Returns: + Optimizer + """ + if isinstance(model_or_params, nn.Module): + # a model was passed in, extract parameters and add weight decays to appropriate layers + if weight_decay and filter_bias_and_bn: + skip = {} + if hasattr(model_or_params, 'no_weight_decay'): + skip = model_or_params.no_weight_decay() + parameters = add_weight_decay(model_or_params, weight_decay, skip) + weight_decay = 0. + else: + parameters = model_or_params.parameters() + else: + # iterable of parameters or param groups passed in + parameters = model_or_params + + opt_lower = opt.lower() + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(weight_decay=weight_decay, **kwargs) + if lr is not None: + opt_args.setdefault('lr', lr) + + # basic SGD & related + if opt_lower == 'sgd' or opt_lower == 'nesterov': + # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) + + # adaptive + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'nadam': + try: + # NOTE PyTorch >= 1.10 should have native NAdam + optimizer = optim.Nadam(parameters, **opt_args) + except AttributeError: + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamax': + optimizer = optim.Adamax(parameters, **opt_args) + elif opt_lower == 'adabelief': + optimizer = AdaBelief(parameters, rectify=False, **opt_args) + elif opt_lower == 'radabelief': + optimizer = AdaBelief(parameters, rectify=True, **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + elif opt_lower == 'adagrad': + opt_args.setdefault('eps', 1e-8) + optimizer = optim.Adagrad(parameters, **opt_args) + elif opt_lower == 'adafactor': + optimizer = Adafactor(parameters, **opt_args) + elif opt_lower == 'lamb': + optimizer = Lamb(parameters, **opt_args) + elif opt_lower == 'lambc': + optimizer = Lamb(parameters, trust_clip=True, **opt_args) + elif opt_lower == 'larc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) + elif opt_lower == 'lars': + optimizer = Lars(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'nlarc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) + elif opt_lower == 'nlars': + optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'madgrad': + optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'madgradw': + optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) + elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': + optimizer = NvNovoGrad(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) + + # second order + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + + # NVIDIA fused optimizers, require APEX to be installed + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + + else: + assert False and "Invalid optimizer" + raise ValueError + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/radam.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/radam.py new file mode 100644 index 0000000000..eb8d22e06c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/radam.py @@ -0,0 +1,89 @@ +"""RAdam Optimizer. +Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam +Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)]) + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_fp32 = p.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + state['step'] += 1 + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = group['lr'] * math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) + + # more conservative since it's an approximated value + if num_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + p_fp32.add_(exp_avg, alpha=-step_size) + + p.copy_(p_fp32) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/rmsprop_tf.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/rmsprop_tf.py new file mode 100644 index 0000000000..0817887db3 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/rmsprop_tf.py @@ -0,0 +1,139 @@ +""" RMSProp modified to behave like Tensorflow impl + +Originally cut & paste from PyTorch RMSProp +https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py +Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE + +Modifications Copyright 2021 Ross Wightman +""" + +import torch +from torch.optim import Optimizer + + +class RMSpropTF(Optimizer): + """Implements RMSprop algorithm (TensorFlow style epsilon) + + NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt + and a few other modifications to closer match Tensorflow for matching hyper-params. + + Noteworthy changes include: + 1. Epsilon applied inside square-root + 2. square_avg initialized to ones + 3. LR scaling of update accumulated in momentum buffer + + Proposed by G. Hinton in his + `course `_. + + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks `_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing (decay) constant (default: 0.9) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 + lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer + update as per defaults in Tensorflow + + """ + + def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, + decoupled_decay=False, lr_in_momentum=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict( + lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, + decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) + super(RMSpropTF, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSpropTF, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p) + + square_avg = state['square_avg'] + one_minus_alpha = 1. - group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if group['decoupled_decay']: + p.mul_(1. - group['lr'] * group['weight_decay']) + else: + grad = grad.add(p, alpha=group['weight_decay']) + + # Tensorflow order of ops for updating squared avg + square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) + # square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt + # grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original + else: + avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + # Tensorflow accumulates the LR scaling in the momentum buffer + if group['lr_in_momentum']: + buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr']) + p.add_(-buf) + else: + # PyTorch scales the param update by LR + buf.mul_(group['momentum']).addcdiv_(grad, avg) + p.add_(buf, alpha=-group['lr']) + else: + p.addcdiv_(grad, avg, value=-group['lr']) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/optim/sgdp.py b/PyTorch/contrib/cv/classification/convmixer/timm/optim/sgdp.py new file mode 100644 index 0000000000..baf05fa55c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/optim/sgdp.py @@ -0,0 +1,70 @@ +""" +SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer, required +import math + +from .adamp import projection + + +class SGDP(Optimizer): + def __init__(self, params, lr=required, momentum=0, dampening=0, + weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1): + defaults = dict( + lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, + nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) + super(SGDP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['momentum'] = torch.zeros_like(p) + + # SGD + buf = state['momentum'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + d_p = grad + momentum * buf + else: + d_p = buf + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + d_p, wd_ratio = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if weight_decay != 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum)) + + # Step + p.add_(d_p, alpha=-group['lr']) + + return loss diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/__init__.py new file mode 100644 index 0000000000..408aef6d64 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/__init__.py @@ -0,0 +1,9 @@ +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler +from .onecycle_lr import OneCycleLRScheduler + +from .scheduler_factory import create_scheduler diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/cosine_lr.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/cosine_lr.py new file mode 100644 index 0000000000..84ee349ec2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/cosine_lr.py @@ -0,0 +1,119 @@ +""" Cosine Scheduler + +Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class CosineLRScheduler(Scheduler): + """ + Cosine decay with restarts. + This is described in the paper https://arxiv.org/abs/1608.03983. + + Inspiration from + https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/multistep_lr.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/multistep_lr.py new file mode 100644 index 0000000000..a5d5fe1980 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/multistep_lr.py @@ -0,0 +1,65 @@ +""" MultiStep LR Scheduler + +Basic multi step LR schedule with warmup, noise. +""" +import torch +import bisect +from timm.scheduler.scheduler import Scheduler +from typing import List + +class MultiStepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: List[int], + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def get_curr_decay_steps(self, t): + # find where in the array t goes, + # assumes self.decay_t is sorted + return bisect.bisect_right(self.decay_t, t+1) + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/onecycle_lr.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/onecycle_lr.py new file mode 100644 index 0000000000..0666235557 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/onecycle_lr.py @@ -0,0 +1,42 @@ +""" OneCycle Scheduler +""" +import logging +import math +import numpy as np +import torch +from .scheduler import Scheduler + +_logger = logging.getLogger(__name__) + +class OneCycleLRScheduler(Scheduler): + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + t_mul: float = 1., + lr_min: float = 0., + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + cycle_limit=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + assert warmup_t == 0, "this schedule has warmup built in" + assert t_initial > 0 + self.t_initial = t_initial + + def get_frac_epoch_values(self, frac_epoch: int): + sched = lambda t, lr_max: np.interp([t], [0, self.t_initial*2//5, self.t_initial*4//5, self.t_initial], + [0, lr_max, lr_max/20.0, 0])[0] + return [sched(frac_epoch, v) for v in self.base_values] + + def get_epoch_values(self, epoch: int): + return self.get_frac_epoch_values(epoch) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/plateau_lr.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/plateau_lr.py new file mode 100644 index 0000000000..4f2cacb65a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/plateau_lr.py @@ -0,0 +1,113 @@ +""" Plateau Scheduler + +Adapts PyTorch plateau scheduler and allows application of noise, warmup. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +from .scheduler import Scheduler + + +class PlateauLRScheduler(Scheduler): + """Decay the LR by a factor every time the validation loss plateaus.""" + + def __init__(self, + optimizer, + decay_rate=0.1, + patience_t=10, + verbose=True, + threshold=1e-4, + cooldown_t=0, + warmup_t=0, + warmup_lr_init=0, + lr_min=0, + mode='max', + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize=True, + ): + super().__init__(optimizer, 'lr', initialize=initialize) + + self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, + patience=patience_t, + factor=decay_rate, + verbose=verbose, + threshold=threshold, + cooldown=cooldown_t, + mode=mode, + min_lr=lr_min + ) + + self.noise_range = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + self.restore_lr = None + + def state_dict(self): + return { + 'best': self.lr_scheduler.best, + 'last_epoch': self.lr_scheduler.last_epoch, + } + + def load_state_dict(self, state_dict): + self.lr_scheduler.best = state_dict['best'] + if 'last_epoch' in state_dict: + self.lr_scheduler.last_epoch = state_dict['last_epoch'] + + # override the base class step fn completely + def step(self, epoch, metric=None): + if epoch <= self.warmup_t: + lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] + super().update_groups(lrs) + else: + if self.restore_lr is not None: + # restore actual LR from before our last noise perturbation before stepping base + for i, param_group in enumerate(self.optimizer.param_groups): + param_group['lr'] = self.restore_lr[i] + self.restore_lr = None + + self.lr_scheduler.step(metric, epoch) # step the base scheduler + + if self.noise_range is not None: + if isinstance(self.noise_range, (list, tuple)): + apply_noise = self.noise_range[0] <= epoch < self.noise_range[1] + else: + apply_noise = epoch >= self.noise_range + if apply_noise: + self._apply_noise(epoch) + + def _apply_noise(self, epoch): + g = torch.Generator() + g.manual_seed(self.noise_seed + epoch) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + + # apply the noise on top of previous LR, cache the old value so we can restore for normal + # stepping of base scheduler + restore_lr = [] + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + restore_lr.append(old_lr) + new_lr = old_lr + old_lr * noise + param_group['lr'] = new_lr + self.restore_lr = restore_lr diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/poly_lr.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/poly_lr.py new file mode 100644 index 0000000000..0c1e63b739 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/poly_lr.py @@ -0,0 +1,116 @@ +""" Polynomial Scheduler + +Polynomial LR schedule with warmup, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging + +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class PolyLRScheduler(Scheduler): + """ Polynomial LR Scheduler w/ warmup, noise, and k-decay + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + power: float = 0.5, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=.5, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.power = power + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/scheduler.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/scheduler.py new file mode 100644 index 0000000000..a00bfe28b7 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/scheduler.py @@ -0,0 +1,115 @@ +from typing import Dict, Any + +import torch + + +class Scheduler: + """ Parameter Scheduler Base Class + A scheduler base class that can be used to schedule any optimizer parameter groups. + + Unlike the builtin PyTorch schedulers, this is intended to be consistently called + * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value + * At the END of each optimizer update, after incrementing the update count, to calculate next update's value + + The schedulers built on this should try to remain as stateless as possible (for simplicity). + + This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' + and -1 values for special behaviour. All epoch and update counts must be tracked in the training + code and explicitly passed in to the schedulers on the corresponding step or step_update call. + + Based on ideas from: + * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler + * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True) -> None: + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None # any point to having this for all? + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self) -> Dict[str, Any]: + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + + def get_epoch_values(self, epoch: int): + return None + + def get_frac_epoch_values(self, frac_epoch: float): + return None + + def get_update_values(self, num_updates: int): + return None + + def step(self, epoch: int, metric: float = None) -> None: + self.metric = metric + values = self.get_epoch_values(epoch) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_frac(self, frac_epoch: int, metric: float = None) -> None: + self.metric = metric + values = self.get_frac_epoch_values(frac_epoch) + if values is not None: + values = self._add_noise(values, frac_epoch) + self.update_groups(values) + + def step_update(self, num_updates: int, metric: float = None): + self.metric = metric + values = self.get_update_values(num_updates) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + if apply_noise: + g = torch.Generator() + g.manual_seed(self.noise_seed + round(t)) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + lrs = [v + v * noise for v in lrs] + return lrs diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/scheduler_factory.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/scheduler_factory.py new file mode 100644 index 0000000000..122bdbdacc --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/scheduler_factory.py @@ -0,0 +1,124 @@ +""" Scheduler Factory +Hacked together by / Copyright 2021 Ross Wightman +""" +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler +from .onecycle_lr import OneCycleLRScheduler + + +def create_scheduler(args, optimizer): + num_epochs = args.epochs + + if getattr(args, 'lr_noise', None) is not None: + lr_noise = getattr(args, 'lr_noise') + if isinstance(lr_noise, (list, tuple)): + noise_range = [n * num_epochs for n in lr_noise] + if len(noise_range) == 1: + noise_range = noise_range[0] + else: + noise_range = lr_noise * num_epochs + else: + noise_range = None + noise_args = dict( + noise_range_t=noise_range, + noise_pct=getattr(args, 'lr_noise_pct', 0.67), + noise_std=getattr(args, 'lr_noise_std', 1.), + noise_seed=getattr(args, 'seed', 42), + ) + cycle_args = dict( + cycle_mul=getattr(args, 'lr_cycle_mul', 1.), + cycle_decay=getattr(args, 'lr_cycle_decay', 0.1), + cycle_limit=getattr(args, 'lr_cycle_limit', 1), + ) + + lr_scheduler = None + if args.sched == 'cosine': + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'tanh': + lr_scheduler = TanhLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + t_in_epochs=True, + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'step': + lr_scheduler = StepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'multistep': + lr_scheduler = MultiStepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'plateau': + mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max' + lr_scheduler = PlateauLRScheduler( + optimizer, + decay_rate=args.decay_rate, + patience_t=args.patience_epochs, + lr_min=args.min_lr, + mode=mode, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + cooldown_t=0, + **noise_args, + ) + elif args.sched == 'poly': + lr_scheduler = PolyLRScheduler( + optimizer, + power=args.decay_rate, # overloading 'decay_rate' as polynomial power + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'onecycle': + lr_scheduler = OneCycleLRScheduler( + optimizer, + t_initial=num_epochs, + t_mul=getattr(args, 'lr_cycle_mul', 1.), + lr_min=args.min_lr, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + cycle_limit=getattr(args, 'lr_cycle_limit', 1), + t_in_epochs=True, + noise_range_t=noise_range, + noise_pct=getattr(args, 'lr_noise_pct', 0.67), + noise_std=getattr(args, 'lr_noise_std', 1.), + noise_seed=getattr(args, 'seed', 42), + ) + + return lr_scheduler, num_epochs diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/step_lr.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/step_lr.py new file mode 100644 index 0000000000..f797e1a8cf --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/step_lr.py @@ -0,0 +1,63 @@ +""" Step Scheduler + +Basic step LR schedule with warmup, noise. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch + +from .scheduler import Scheduler + + +class StepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: float, + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/tanh_lr.py b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/tanh_lr.py new file mode 100644 index 0000000000..f2d3c9cdb1 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/scheduler/tanh_lr.py @@ -0,0 +1,117 @@ +""" TanH Scheduler + +TanH schedule with warmup, cycle/restarts, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class TanhLRScheduler(Scheduler): + """ + Hyberbolic-Tangent decay with restarts. + This is described in the paper https://arxiv.org/abs/1806.01593 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lb: float = -7., + ub: float = 3., + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + assert lb < ub + assert cycle_limit >= 0 + assert warmup_t >= 0 + assert warmup_lr_init >= 0 + self.lb = lb + self.ub = ub + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + if self.warmup_t: + t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + if i < self.cycle_limit: + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + + tr = t_curr / t_i + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/__init__.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/__init__.py new file mode 100644 index 0000000000..d02e62d2d0 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/__init__.py @@ -0,0 +1,13 @@ +from .agc import adaptive_clip_grad +from .checkpoint_saver import CheckpointSaver +from .clip_grad import dispatch_clip_grad +from .cuda import ApexScaler, NativeScaler +from .distributed import distribute_bn, reduce_tensor +from .jit import set_jit_legacy +from .log import setup_default_logging, FormatterNoInfo +from .metrics import AverageMeter, accuracy +from .misc import natural_key, add_bool_arg +from .model import unwrap_model, get_state_dict +from .model_ema import ModelEma, ModelEmaV2 +from .random import random_seed +from .summary import update_summary, get_outdir diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/agc.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/agc.py new file mode 100644 index 0000000000..f51401726f --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/agc.py @@ -0,0 +1,42 @@ +""" Adaptive Gradient Clipping + +An impl of AGC, as per (https://arxiv.org/abs/2102.06171): + +@article{brock2021high, + author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, + title={High-Performance Large-Scale Image Recognition Without Normalization}, + journal={arXiv preprint arXiv:}, + year={2021} +} + +Code references: + * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets + * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch + + +def unitwise_norm(x, norm_type=2.0): + if x.ndim <= 1: + return x.norm(norm_type) + else: + # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor + # might need special cases for other weights (possibly MHA) where this may not be true + return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) + + +def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + for p in parameters: + if p.grad is None: + continue + p_data = p.detach() + g_data = p.grad.detach() + max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) + grad_norm = unitwise_norm(g_data, norm_type=norm_type) + clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) + new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) + p.grad.detach().copy_(new_grads) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/checkpoint_saver.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/checkpoint_saver.py new file mode 100644 index 0000000000..6aad74ee52 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/checkpoint_saver.py @@ -0,0 +1,150 @@ +""" Checkpoint Saver + +Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import glob +import operator +import os +import logging + +import torch + +from .model import unwrap_model, get_state_dict + + +_logger = logging.getLogger(__name__) + + +class CheckpointSaver: + def __init__( + self, + model, + optimizer, + args=None, + model_ema=None, + amp_scaler=None, + checkpoint_prefix='checkpoint', + recovery_prefix='recovery', + checkpoint_dir='', + recovery_dir='', + decreasing=False, + max_history=10, + unwrap_fn=unwrap_model): + + # objects to save state_dicts of + self.model = model + self.optimizer = optimizer + self.args = args + self.model_ema = model_ema + self.amp_scaler = amp_scaler + + # state + self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness + self.best_epoch = None + self.best_metric = None + self.curr_recovery_file = '' + self.last_recovery_file = '' + + # config + self.checkpoint_dir = checkpoint_dir + self.recovery_dir = recovery_dir + self.save_prefix = checkpoint_prefix + self.recovery_prefix = recovery_prefix + self.extension = '.pth.tar' + self.decreasing = decreasing # a lower metric is better if True + self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs + self.max_history = max_history + self.unwrap_fn = unwrap_fn + assert self.max_history >= 1 + + def save_checkpoint(self, epoch, metric=None): + assert epoch >= 0 + tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) + last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) + self._save(tmp_save_path, epoch, metric) + if os.path.exists(last_save_path): + os.unlink(last_save_path) # required for Windows support. + os.rename(tmp_save_path, last_save_path) + worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None + if (len(self.checkpoint_files) < self.max_history + or metric is None or self.cmp(metric, worst_file[1])): + if len(self.checkpoint_files) >= self.max_history: + self._cleanup_checkpoints(1) + filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension + save_path = os.path.join(self.checkpoint_dir, filename) + os.link(last_save_path, save_path) + self.checkpoint_files.append((save_path, metric)) + self.checkpoint_files = sorted( + self.checkpoint_files, key=lambda x: x[1], + reverse=not self.decreasing) # sort in descending order if a lower metric is not better + + checkpoints_str = "Current checkpoints:\n" + for c in self.checkpoint_files: + checkpoints_str += ' {}\n'.format(c) + _logger.info(checkpoints_str) + + if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): + self.best_epoch = epoch + self.best_metric = metric + best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) + if os.path.exists(best_save_path): + os.unlink(best_save_path) + os.link(last_save_path, best_save_path) + + return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) + + def _save(self, save_path, epoch, metric=None): + save_state = { + 'epoch': epoch, + 'arch': type(self.model).__name__.lower(), + 'state_dict': get_state_dict(self.model, self.unwrap_fn), + 'optimizer': self.optimizer.state_dict(), + 'version': 2, # version < 2 increments epoch before save + } + if self.args is not None: + save_state['arch'] = self.args.model + save_state['args'] = self.args + if self.amp_scaler is not None: + save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() + if self.model_ema is not None: + save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) + if metric is not None: + save_state['metric'] = metric + torch.save(save_state, save_path) + + def _cleanup_checkpoints(self, trim=0): + trim = min(len(self.checkpoint_files), trim) + delete_index = self.max_history - trim + if delete_index < 0 or len(self.checkpoint_files) <= delete_index: + return + to_delete = self.checkpoint_files[delete_index:] + for d in to_delete: + try: + _logger.debug("Cleaning checkpoint: {}".format(d)) + os.remove(d[0]) + except Exception as e: + _logger.error("Exception '{}' while deleting checkpoint".format(e)) + self.checkpoint_files = self.checkpoint_files[:delete_index] + + def save_recovery(self, epoch, batch_idx=0): + assert epoch >= 0 + filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension + save_path = os.path.join(self.recovery_dir, filename) + self._save(save_path, epoch) + if os.path.exists(self.last_recovery_file): + try: + _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) + os.remove(self.last_recovery_file) + except Exception as e: + _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) + self.last_recovery_file = self.curr_recovery_file + self.curr_recovery_file = save_path + + def find_recovery(self): + recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) + files = glob.glob(recovery_path + '*' + self.extension) + files = sorted(files) + return files[0] if len(files) else '' diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/clip_grad.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/clip_grad.py new file mode 100644 index 0000000000..7eb40697a2 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/clip_grad.py @@ -0,0 +1,23 @@ +import torch + +from timm.utils.agc import adaptive_clip_grad + + +def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): + """ Dispatch to gradient clipping method + + Args: + parameters (Iterable): model parameters to clip + value (float): clipping value/factor/norm, mode dependant + mode (str): clipping mode, one of 'norm', 'value', 'agc' + norm_type (float): p-norm, default 2.0 + """ + if mode == 'norm': + torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) + elif mode == 'value': + torch.nn.utils.clip_grad_value_(parameters, value) + elif mode == 'agc': + adaptive_clip_grad(parameters, value, norm_type=norm_type) + else: + assert False, f"Unknown clip mode ({mode})." + diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/cuda.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/cuda.py new file mode 100644 index 0000000000..9e7bddf304 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/cuda.py @@ -0,0 +1,55 @@ +""" CUDA / AMP utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +try: + from apex import amp + has_apex = True +except ImportError: + amp = None + has_apex = False + +from .clip_grad import dispatch_clip_grad + + +class ApexScaler: + state_dict_key = "amp" + + def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward(create_graph=create_graph) + if clip_grad is not None: + dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode) + optimizer.step() + + def state_dict(self): + if 'state_dict' in amp.__dict__: + return amp.state_dict() + + def load_state_dict(self, state_dict): + if 'load_state_dict' in amp.__dict__: + amp.load_state_dict(state_dict) + + +class NativeScaler: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): + self._scaler.scale(loss).backward(create_graph=create_graph) + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) + self._scaler.step(optimizer) + self._scaler.update() + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/distributed.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/distributed.py new file mode 100644 index 0000000000..3c5dba8c1d --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/distributed.py @@ -0,0 +1,28 @@ +""" Distributed training/validation utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import distributed as dist + +from .model import unwrap_model + + +def reduce_tensor(tensor, n): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt + + +def distribute_bn(model, world_size, reduce=False): + # ensure every node has the same running bn stats + for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): + if ('running_mean' in bn_name) or ('running_var' in bn_name): + if reduce: + # average bn stats across whole group + torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) + bn_buf /= float(world_size) + else: + # broadcast bn stats from rank 0 to whole group + torch.distributed.broadcast(bn_buf, 0) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/jit.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/jit.py new file mode 100644 index 0000000000..185ab7a0d8 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/jit.py @@ -0,0 +1,18 @@ +""" JIT scripting/tracing utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + + +def set_jit_legacy(): + """ Set JIT executor to legacy w/ support for op fusion + This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes + in the JIT exectutor. These API are not supported so could change. + """ + # + assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!" + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + torch._C._jit_override_can_fuse_on_gpu(True) + #torch._C._jit_set_texpr_fuser_enabled(True) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/log.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/log.py new file mode 100644 index 0000000000..c99469e088 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/log.py @@ -0,0 +1,28 @@ +""" Logging helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import logging.handlers + + +class FormatterNoInfo(logging.Formatter): + def __init__(self, fmt='%(levelname)s: %(message)s'): + logging.Formatter.__init__(self, fmt) + + def format(self, record): + if record.levelno == logging.INFO: + return str(record.getMessage()) + return logging.Formatter.format(self, record) + + +def setup_default_logging(default_level=logging.INFO, log_path=''): + console_handler = logging.StreamHandler() + console_handler.setFormatter(FormatterNoInfo()) + logging.root.addHandler(console_handler) + logging.root.setLevel(default_level) + if log_path: + file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) + file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") + file_handler.setFormatter(file_formatter) + logging.root.addHandler(file_handler) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/metrics.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/metrics.py new file mode 100644 index 0000000000..9fdbe13ef1 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/metrics.py @@ -0,0 +1,32 @@ +""" Eval metrics and related + +Hacked together by / Copyright 2020 Ross Wightman +""" + + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = min(max(topk), output.size()[1]) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.reshape(1, -1).expand_as(pred)) + return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/misc.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/misc.py new file mode 100644 index 0000000000..39c0097c60 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/misc.py @@ -0,0 +1,18 @@ +""" Misc utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import re + + +def natural_key(string_): + """See http://www.codinghorror.com/blog/archives/001018.html""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def add_bool_arg(parser, name, default=False, help=''): + dest_name = name.replace('-', '_') + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) + group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) + parser.set_defaults(**{dest_name: default}) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/model.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/model.py new file mode 100644 index 0000000000..bd46e2f49c --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/model.py @@ -0,0 +1,92 @@ +""" Model / state_dict utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +from .model_ema import ModelEma +import torch +import fnmatch + +def unwrap_model(model): + if isinstance(model, ModelEma): + return unwrap_model(model.ema) + else: + return model.module if hasattr(model, 'module') else model + + +def get_state_dict(model, unwrap_fn=unwrap_model): + return unwrap_fn(model).state_dict() + + +def avg_sq_ch_mean(model, input, output): + "calculate average channel square mean of output activations" + return torch.mean(output.mean(axis=[0,2,3])**2).item() + + +def avg_ch_var(model, input, output): + "calculate average channel variance of output activations" + return torch.mean(output.var(axis=[0,2,3])).item()\ + + +def avg_ch_var_residual(model, input, output): + "calculate average channel variance of output activations" + return torch.mean(output.var(axis=[0,2,3])).item() + + +class ActivationStatsHook: + """Iterates through each of `model`'s modules and matches modules using unix pattern + matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is + a match. + + Arguments: + model (nn.Module): model from which we will extract the activation stats + hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string + matching with the name of model's modules. + hook_fns (List[Callable]): List of hook functions to be registered at every + module in `layer_names`. + + Inspiration from https://docs.fast.ai/callback.hook.html. + + Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example + on how to plot Signal Propogation Plots using `ActivationStatsHook`. + """ + + def __init__(self, model, hook_fn_locs, hook_fns): + self.model = model + self.hook_fn_locs = hook_fn_locs + self.hook_fns = hook_fns + if len(hook_fn_locs) != len(hook_fns): + raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ + their lengths are different.") + self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) + for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): + self.register_hook(hook_fn_loc, hook_fn) + + def _create_hook(self, hook_fn): + def append_activation_stats(module, input, output): + out = hook_fn(module, input, output) + self.stats[hook_fn.__name__].append(out) + return append_activation_stats + + def register_hook(self, hook_fn_loc, hook_fn): + for name, module in self.model.named_modules(): + if not fnmatch.fnmatch(name, hook_fn_loc): + continue + module.register_forward_hook(self._create_hook(hook_fn)) + + +def extract_spp_stats(model, + hook_fn_locs, + hook_fns, + input_shape=[8, 3, 224, 224]): + """Extract average square channel mean and variance of activations during + forward pass to plot Signal Propogation Plots (SPP). + + Paper: https://arxiv.org/abs/2101.08692 + + Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 + """ + x = torch.normal(0., 1., input_shape) + hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) + _ = model(x) + return hook.stats + \ No newline at end of file diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/model_ema.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/model_ema.py new file mode 100644 index 0000000000..073d5c5ea1 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/model_ema.py @@ -0,0 +1,126 @@ +""" Exponential Moving Average (EMA) of model updates + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn + +_logger = logging.getLogger(__name__) + + +class ModelEma: + """ Model Exponential Moving Average (DEPRECATED) + + Keep a moving average of everything in the model state_dict (parameters and buffers). + This version is deprecated, it does not work with scripted models. Will be removed eventually. + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device='', resume=''): + # make a copy of the model for accumulating moving average of weights + self.ema = deepcopy(model) + self.ema.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if device: + self.ema.to(device=device) + self.ema_has_module = hasattr(self.ema, 'module') + if resume: + self._load_checkpoint(resume) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def _load_checkpoint(self, checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + assert isinstance(checkpoint, dict) + if 'state_dict_ema' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict_ema'].items(): + # ema model may have been wrapped by DataParallel, and need module prefix + if self.ema_has_module: + name = 'module.' + k if not k.startswith('module') else k + else: + name = k + new_state_dict[name] = v + self.ema.load_state_dict(new_state_dict) + _logger.info("Loaded state_dict_ema") + else: + _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") + + def update(self, model): + # correct a mismatch in state dict keys + needs_module = hasattr(model, 'module') and not self.ema_has_module + with torch.no_grad(): + msd = model.state_dict() + for k, ema_v in self.ema.state_dict().items(): + if needs_module: + k = 'module.' + k + model_v = msd[k].detach() + if self.device: + model_v = model_v.to(device=self.device) + ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) + + +class ModelEmaV2(nn.Module): + """ Model Exponential Moving Average V2 + + Keep a moving average of everything in the model state_dict (parameters and buffers). + V2 of this module is simpler, it does not match params/buffers based on name but simply + iterates in order. It works with torchscript (JIT of full model). + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device=None): + super(ModelEmaV2, self).__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if self.device is not None: + self.module.to(device=device) + + def _update(self, model, update_fn): + with torch.no_grad(): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if self.device is not None: + model_v = model_v.to(device=self.device) + ema_v.copy_(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/random.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/random.py new file mode 100644 index 0000000000..a9679983e9 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/random.py @@ -0,0 +1,9 @@ +import random +import numpy as np +import torch + + +def random_seed(seed=42, rank=0): + torch.manual_seed(seed + rank) + np.random.seed(seed + rank) + random.seed(seed + rank) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/utils/summary.py b/PyTorch/contrib/cv/classification/convmixer/timm/utils/summary.py new file mode 100644 index 0000000000..9f5af9a085 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/utils/summary.py @@ -0,0 +1,39 @@ +""" Summary utilities + +Hacked together by / Copyright 2020 Ross Wightman +""" +import csv +import os +from collections import OrderedDict +try: + import wandb +except ImportError: + pass + +def get_outdir(path, *paths, inc=False): + outdir = os.path.join(path, *paths) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif inc: + count = 1 + outdir_inc = outdir + '-' + str(count) + while os.path.exists(outdir_inc): + count = count + 1 + outdir_inc = outdir + '-' + str(count) + assert count < 100 + outdir = outdir_inc + os.makedirs(outdir) + return outdir + + +def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + if log_wandb: + wandb.log(rowd) + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd) diff --git a/PyTorch/contrib/cv/classification/convmixer/timm/version.py b/PyTorch/contrib/cv/classification/convmixer/timm/version.py new file mode 100644 index 0000000000..2b8877c505 --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/timm/version.py @@ -0,0 +1 @@ +__version__ = '0.5.0' diff --git a/PyTorch/contrib/cv/classification/convmixer/train_npu.py b/PyTorch/contrib/cv/classification/convmixer/train_npu.py new file mode 100644 index 0000000000..059c3b09dd --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/train_npu.py @@ -0,0 +1,959 @@ +#!/usr/bin/env python3 +""" ImageNet Training Script + +This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet +training results with some of the latest networks and training techniques. It favours canonical PyTorch +and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed +and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit. + +This script was started from an early version of the PyTorch ImageNet example +(https://github.com/pytorch/examples/tree/master/imagenet) + +NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples +(https://github.com/NVIDIA/apex/tree/master/examples/imagenet) + +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" +import argparse +import time +import yaml +import os +import logging +from collections import OrderedDict +from contextlib import suppress +from datetime import datetime + +import torch +import torch.nn as nn +import torchvision.utils +from torch.nn.parallel import DistributedDataParallel as NativeDDP + +from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset +from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, \ + convert_splitbn_model, model_parameters +from timm.utils import * +from timm.loss import * +from timm.optim import create_optimizer_v2, optimizer_kwargs +from timm.scheduler import create_scheduler +from timm.utils import ApexScaler, NativeScaler + +try: + from apex import amp + from apex.parallel import DistributedDataParallel as ApexDDP + from apex.parallel import convert_syncbn_model + + has_apex = True +except ImportError: + has_apex = False + +has_native_amp = False +try: + if getattr(torch.npu.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +try: + import wandb + + has_wandb = True +except ImportError: + has_wandb = False + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('train') + +# The first arg parser parses out only the --config argument, this argument is used to +# load a yaml file containing key-values that override the defaults for the main parser below +config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) +parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', + help='YAML config file specifying default arguments') + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') + +# Dataset / Model parameters +parser.add_argument('data_dir', metavar='DIR', + help='path to dataset') +parser.add_argument('--dataset', '-d', metavar='NAME', default='', + help='dataset type (default: ImageFolder/ImageTar if empty)') +parser.add_argument('--train-split', metavar='NAME', default='train', + help='dataset train split (default: train)') +parser.add_argument('--val-split', metavar='NAME', default='validation', + help='dataset validation split (default: validation)') +parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL', + help='Name of model to train (default: "resnet50"') +parser.add_argument('--pretrained', action='store_true', default=False, + help='Start with pretrained version of specified network (if avail)') +parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', + help='Initialize model from this checkpoint (default: none)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='Resume full model and optimizer state from checkpoint (default: none)') +parser.add_argument('--no-resume-opt', action='store_true', default=False, + help='prevent resume of optimizer state when resuming model') +parser.add_argument('--num-classes', type=int, default=None, metavar='N', + help='number of label classes (Model default if None)') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--img-size', type=int, default=None, metavar='N', + help='Image patch size (default: None => model default)') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', + help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--crop-pct', default=None, type=float, + metavar='N', help='Input image center crop percent (for validation only)') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N', + help='input batch size for training (default: 128)') +parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N', + help='validation batch size override (default: None)') + +# Optimizer parameters +parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') +parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: None, use opt default)') +parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='Optimizer momentum (default: 0.9)') +parser.add_argument('--weight-decay', type=float, default=2e-5, + help='weight decay (default: 2e-5)') +parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') +parser.add_argument('--clip-mode', type=str, default='norm', + help='Gradient clipping mode. One of ("norm", "value", "agc")') + +# Learning rate schedule parameters +parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', + help='LR scheduler (default: "step"') +parser.add_argument('--lr', type=float, default=0.05, metavar='LR', + help='learning rate (default: 0.05)') +parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', + help='learning rate noise on/off epoch percentages') +parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', + help='learning rate noise limit percent (default: 0.67)') +parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', + help='learning rate noise std-dev (default: 1.0)') +parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT', + help='learning rate cycle len multiplier (default: 1.0)') +parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT', + help='amount to decay each learning rate cycle (default: 0.5)') +parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N', + help='learning rate cycle limit, cycles enabled if > 1') +parser.add_argument('--lr-k-decay', type=float, default=1.0, + help='learning rate k-decay for cosine/poly (default: 1.0)') +parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', + help='warmup learning rate (default: 0.0001)') +parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') +parser.add_argument('--epochs', type=int, default=300, metavar='N', + help='number of epochs to train (default: 300)') +parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N', + help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).') +parser.add_argument('--start-epoch', default=None, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('--decay-epochs', type=float, default=100, metavar='N', + help='epoch interval to decay LR') +parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', + help='epochs to warmup LR, if scheduler supports') +parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', + help='epochs to cooldown LR at min_lr, after cyclic schedule ends') +parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', + help='patience epochs for Plateau LR scheduler (default: 10') +parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', + help='LR decay rate (default: 0.1)') + +# Augmentation & regularization parameters +parser.add_argument('--no-aug', action='store_true', default=False, + help='Disable all training augmentation, override other train aug args') +parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT', + help='Random resize scale (default: 0.08 1.0)') +parser.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO', + help='Random resize aspect ratio (default: 0.75 1.33)') +parser.add_argument('--hflip', type=float, default=0.5, + help='Horizontal flip training aug probability') +parser.add_argument('--vflip', type=float, default=0., + help='Vertical flip training aug probability') +parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', + help='Color jitter factor (default: 0.4)') +parser.add_argument('--aa', type=str, default=None, metavar='NAME', + help='Use AutoAugment policy. "v0" or "original". (default: None)'), +parser.add_argument('--aug-repeats', type=int, default=0, + help='Number of augmentation repetitions (distributed training only) (default: 0)') +parser.add_argument('--aug-splits', type=int, default=0, + help='Number of augmentation splits (default: 0, valid: 0 or >=2)') +parser.add_argument('--jsd-loss', action='store_true', default=False, + help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.') +parser.add_argument('--bce-loss', action='store_true', default=False, + help='Enable BCE loss w/ Mixup/CutMix use.') +parser.add_argument('--bce-target-thresh', type=float, default=None, + help='Threshold for binarizing softened BCE targets (default: None, disabled)') +parser.add_argument('--reprob', type=float, default=0., metavar='PCT', + help='Random erase prob (default: 0.)') +parser.add_argument('--remode', type=str, default='pixel', + help='Random erase mode (default: "pixel")') +parser.add_argument('--recount', type=int, default=1, + help='Random erase count (default: 1)') +parser.add_argument('--resplit', action='store_true', default=False, + help='Do not random erase first (clean) augmentation split') +parser.add_argument('--mixup', type=float, default=0.0, + help='mixup alpha, mixup enabled if > 0. (default: 0.)') +parser.add_argument('--cutmix', type=float, default=0.0, + help='cutmix alpha, cutmix enabled if > 0. (default: 0.)') +parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, + help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') +parser.add_argument('--mixup-prob', type=float, default=1.0, + help='Probability of performing mixup or cutmix when either/both is enabled') +parser.add_argument('--mixup-switch-prob', type=float, default=0.5, + help='Probability of switching to cutmix when both mixup and cutmix enabled') +parser.add_argument('--mixup-mode', type=str, default='batch', + help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') +parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', + help='Turn off mixup after this epoch, disabled if 0 (default: 0)') +parser.add_argument('--smoothing', type=float, default=0.1, + help='Label smoothing (default: 0.1)') +parser.add_argument('--train-interpolation', type=str, default='random', + help='Training interpolation (random, bilinear, bicubic default: "random")') +parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', + help='Dropout rate (default: 0.)') +parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT', + help='Drop connect rate, DEPRECATED, use drop-path (default: None)') +parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', + help='Drop path rate (default: None)') +parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', + help='Drop block rate (default: None)') + +# Batch norm parameters (only works with gen_efficientnet based models currently) +parser.add_argument('--bn-tf', action='store_true', default=False, + help='Use Tensorflow BatchNorm defaults for models that support it (default: False)') +parser.add_argument('--bn-momentum', type=float, default=None, + help='BatchNorm momentum override (if not None)') +parser.add_argument('--bn-eps', type=float, default=None, + help='BatchNorm epsilon override (if not None)') +parser.add_argument('--sync-bn', action='store_true', + help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') +parser.add_argument('--dist-bn', type=str, default='reduce', + help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")') +parser.add_argument('--split-bn', action='store_true', + help='Enable separate BN layers per augmentation split.') + +# Model Exponential Moving Average +parser.add_argument('--model-ema', action='store_true', default=False, + help='Enable tracking moving average of model weights') +parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, + help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') +parser.add_argument('--model-ema-decay', type=float, default=0.9998, + help='decay factor for model weights moving average (default: 0.9998)') + +# Misc +parser.add_argument('--seed', type=int, default=42, metavar='S', + help='random seed (default: 42)') +parser.add_argument('--worker-seeding', type=str, default='all', + help='worker seed mode (default: all)') +parser.add_argument('--log-interval', type=int, default=50, metavar='N', + help='how many batches to wait before logging training status') +parser.add_argument('--recovery-interval', type=int, default=0, metavar='N', + help='how many batches to wait before writing recovery checkpoint') +parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N', + help='number of checkpoints to keep (default: 10)') +parser.add_argument('-j', '--workers', type=int, default=4, metavar='N', + help='how many training processes to use (default: 4)') +parser.add_argument('--save-images', action='store_true', default=False, + help='save images of input bathes every log interval for debugging') +parser.add_argument('--amp', action='store_true', default=False, + help='use NVIDIA Apex AMP or Native AMP for mixed precision training') +parser.add_argument('--apex-amp', action='store_true', default=False, + help='Use NVIDIA Apex AMP mixed precision') +parser.add_argument('--native-amp', action='store_true', default=False, + help='Use Native Torch AMP mixed precision') +parser.add_argument('--no-ddp-bb', action='store_true', default=False, + help='Force broadcast buffers for native DDP to off.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--pin-mem', action='store_true', default=False, + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') +parser.add_argument('--no-prefetcher', action='store_true', default=False, + help='disable fast prefetcher') +parser.add_argument('--output', default='', type=str, metavar='PATH', + help='path to output folder (default: none, current dir)') +parser.add_argument('--experiment', default='', type=str, metavar='NAME', + help='name of train experiment, name of sub-folder for output') +parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC', + help='Best metric (default: "top1"') +parser.add_argument('--tta', type=int, default=0, metavar='N', + help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)') +parser.add_argument("--local_rank", default=0, type=int) +parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False, + help='use the multi-epochs-loader to save time at the beginning of every epoch') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') +parser.add_argument('--log-wandb', action='store_true', default=False, + help='log training and validation metrics to wandb') +# ============================================================================= +parser.add_argument('--prof', default=False, action='store_true') +parser.add_argument('--device', default='npu', type=str, help='npu or gpu') +parser.add_argument('--performance_1p', default=False, action='store_true') + + +def _parse_args(): + # Do we have a config file to parse? + args_config, remaining = config_parser.parse_known_args() + if args_config.config: + with open(args_config.config, 'r') as f: + cfg = yaml.safe_load(f) + parser.set_defaults(**cfg) + + # The main arg parser parses the rest of the args, the usual + # defaults will have been overridden if config file specified. + args = parser.parse_args(remaining) + + # Cache the args as a text string to save them in the output dir later + args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) + return args, args_text + + +# ======================================================================================= +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def init_distributed_mode(args): + import torch.multiprocessing as mp + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.npu.device_count() + else: + args.distributed = False + return + + args.distributed = True + + torch.npu.set_device(args.gpu) + + print('| distributed init (rank {}): {} {}'.format( + args.rank, args.device, args.gpu), flush=True) + print("world size: ", args.world_size) + torch.distributed.init_process_group(backend='hccl', world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +# ======================================================================================= + +def main(): + setup_default_logging() + args, args_text = _parse_args() + + args.train_split = os.path.join(args.data_dir, "train") + args.val_split = os.path.join(args.data_dir, "val") + + if args.log_wandb: + if has_wandb: + wandb.init(project=args.experiment, config=args) + else: + _logger.warning("You've requested to log metrics to wandb but package not found. " + "Metrics not being logged to wandb, try `pip install wandb`") + + args.prefetcher = not args.no_prefetcher + args.distributed = False + if 'WORLD_SIZE' in os.environ: + args.distributed = int(os.environ['WORLD_SIZE']) > 1 + args.world_size = 1 + args.rank = 0 # global rank + if args.local_rank == 0: + print("==========================distributed===============================") + if args.distributed: + init_distributed_mode(args) + # torch.npu.set_device(args.rank) + # torch.distributed.init_process_group(backend='hccl', world_size=args.world_size, rank=args.rank) + # args.world_size = torch.distributed.get_world_size() + # args.rank = torch.distributed.get_rank() + # _logger.info('Training in distributed mode with multiple processes, 1 NPU per process. Process %d, total %d.' + # % (args.rank, args.world_size)) + else: + _logger.info('Training with a single process on 1 NPU.') + assert args.rank >= 0 + if args.local_rank == 0: + print("==========================distributed finish===============================") + + # resolve AMP arguments based on PyTorch / Apex availability + use_amp = None + if args.amp: + if has_apex: + args.apex_amp = True + elif has_native_amp: + args.native_amp = True + if args.apex_amp and has_apex: + use_amp = 'apex' + elif args.native_amp and has_native_amp: + use_amp = 'native' + elif args.apex_amp or args.native_amp: + _logger.warning("Neither APEX or native Torch AMP is available, using float32. " + "Install NVIDA apex or upgrade to PyTorch 1.6") + + random_seed(args.seed, args.rank) + + model = create_model( + args.model, + pretrained=args.pretrained, + num_classes=args.num_classes, + drop_rate=args.drop, + drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path + drop_path_rate=args.drop_path, + drop_block_rate=args.drop_block, + global_pool=args.gp, + bn_tf=args.bn_tf, + bn_momentum=args.bn_momentum, + bn_eps=args.bn_eps, + scriptable=args.torchscript, + checkpoint_path=args.initial_checkpoint) + if args.num_classes is None: + assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' + args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly + + if args.local_rank == 0: + _logger.info( + f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}') + + data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) + + # setup augmentation batch splits for contrastive loss or split bn + num_aug_splits = 0 + if args.aug_splits > 0: + assert args.aug_splits > 1, 'A split of 1 makes no sense' + num_aug_splits = args.aug_splits + + # enable split bn (separate bn stats per batch-portion) + if args.split_bn: + assert num_aug_splits > 1 or args.resplit + model = convert_splitbn_model(model, max(num_aug_splits, 2)) + + # move model to NPU, enable channels last layout if set + model.npu() + if args.channels_last: + model = model.to(memory_format=torch.channels_last) + + # setup synchronized BatchNorm for distributed training + if args.distributed and args.sync_bn: + assert not args.split_bn + if has_apex and use_amp == 'apex': + # Apex SyncBN preferred unless native amp is activated + model = convert_syncbn_model(model) + else: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + if args.local_rank == 0: + _logger.info( + 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using ' + 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.') + + if args.torchscript: + assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model' + assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model' + model = torch.jit.script(model) + + optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args)) + + # setup automatic mixed-precision (AMP) loss scaling and op casting + amp_autocast = suppress # do nothing + loss_scaler = None + if use_amp == 'apex': + model, optimizer = amp.initialize(model, optimizer, opt_level='O2', loss_scale=128.0) + # model, optimizer = amp.initialize(model, optimizer, opt_level='O1', loss_scale=128.0) + loss_scaler = ApexScaler() + if args.local_rank == 0: + _logger.info('Using NVIDIA APEX AMP. Training in mixed precision.') + elif use_amp == 'native': + amp_autocast = torch.npu.amp.autocast + loss_scaler = NativeScaler() + if args.local_rank == 0: + _logger.info('Using native Torch AMP. Training in mixed precision.') + else: + if args.local_rank == 0: + _logger.info('AMP not enabled. Training in float32.') + + # optionally resume from a checkpoint + resume_epoch = None + if args.resume: + resume_epoch = resume_checkpoint( + model, args.resume, + optimizer=None if args.no_resume_opt else optimizer, + loss_scaler=None if args.no_resume_opt else loss_scaler, + log_info=args.local_rank == 0) + + # setup exponential moving average of model weights, SWA could be used here too + model_ema = None + if args.model_ema: + # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper + model_ema = ModelEmaV2( + model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None) + if args.resume: + load_checkpoint(model_ema.module, args.resume, use_ema=True) + + # setup distributed training + if args.distributed: + if has_apex and use_amp == 'apex': + # Apex DDP preferred unless native amp is activated + if args.local_rank == 0: + _logger.info("Using NVIDIA APEX DistributedDataParallel.") + model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=False) + else: + if args.local_rank == 0: + _logger.info("Using native Torch DistributedDataParallel.") + model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb) + # NOTE: EMA model does not need to be wrapped by DDP + + # setup learning rate schedule and starting epoch + lr_scheduler, num_epochs = create_scheduler(args, optimizer) + start_epoch = 0 + if args.start_epoch is not None: + # a specified start_epoch will always override the resume epoch + start_epoch = args.start_epoch + elif resume_epoch is not None: + start_epoch = resume_epoch + if lr_scheduler is not None and start_epoch > 0: + lr_scheduler.step(start_epoch) + + if args.local_rank == 0: + _logger.info('Scheduled epochs: {}'.format(num_epochs)) + + # create the train and eval datasets + dataset_train = create_dataset( + args.dataset, + root=args.data_dir, split=args.train_split, is_training=True, + batch_size=args.batch_size, repeats=args.epoch_repeats) + dataset_eval = create_dataset( + args.dataset, root=args.data_dir, split=args.val_split, is_training=False, batch_size=args.batch_size) + + # setup mixup / cutmix + collate_fn = None + mixup_fn = None + mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None + if mixup_active: + mixup_args = dict( + mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, + prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, + label_smoothing=args.smoothing, num_classes=args.num_classes) + if args.prefetcher: + assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup) + collate_fn = FastCollateMixup(**mixup_args) + else: + mixup_fn = Mixup(**mixup_args) + + # wrap dataset in AugMix helper + if num_aug_splits > 1: + dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits) + + # create data loaders w/ augmentation pipeiine + train_interpolation = args.train_interpolation + if args.no_aug or not train_interpolation: + train_interpolation = data_config['interpolation'] + loader_train = create_loader( + dataset_train, + input_size=data_config['input_size'], + batch_size=args.batch_size, + is_training=True, + use_prefetcher=args.prefetcher, + no_aug=args.no_aug, + re_prob=args.reprob, + re_mode=args.remode, + re_count=args.recount, + re_split=args.resplit, + scale=args.scale, + ratio=args.ratio, + hflip=args.hflip, + vflip=args.vflip, + color_jitter=args.color_jitter, + auto_augment=args.aa, + num_aug_repeats=args.aug_repeats, + num_aug_splits=num_aug_splits, + interpolation=train_interpolation, + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + distributed=args.distributed, + collate_fn=collate_fn, + pin_memory=args.pin_mem, + use_multi_epochs_loader=args.use_multi_epochs_loader, + worker_seeding=args.worker_seeding, + ) + + loader_eval = create_loader( + dataset_eval, + input_size=data_config['input_size'], + batch_size=args.validation_batch_size or args.batch_size, + is_training=False, + use_prefetcher=args.prefetcher, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + distributed=args.distributed, + crop_pct=data_config['crop_pct'], + pin_memory=args.pin_mem, + ) + + # setup loss function + if args.jsd_loss: + assert num_aug_splits > 1 # JSD only valid with aug splits set + train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing) + elif mixup_active: + # smoothing is handled with mixup target transform which outputs sparse, soft targets + if args.bce_loss: + train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh) + else: + train_loss_fn = SoftTargetCrossEntropy() + elif args.smoothing: + if args.bce_loss: + train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh) + else: + train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing) + else: + train_loss_fn = nn.CrossEntropyLoss() + train_loss_fn = train_loss_fn.npu() + validate_loss_fn = nn.CrossEntropyLoss().npu() + + # setup checkpoint saver and eval metric tracking + eval_metric = args.eval_metric + best_metric = None + best_epoch = None + saver = None + output_dir = None + if args.rank == 0: + if args.experiment: + exp_name = args.experiment + else: + exp_name = '-'.join([ + datetime.now().strftime("%Y%m%d-%H%M%S"), + safe_model_name(args.model), + str(data_config['input_size'][-1]) + ]) + output_dir = get_outdir(args.output if args.output else './output/train', exp_name) + decreasing = True if eval_metric == 'loss' else False + saver = CheckpointSaver( + model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler, + checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist) + with open(os.path.join(output_dir, 'args.yaml'), 'w') as f: + f.write(args_text) + + start_time = time.time() + if args.prof: + profiling(model, loader_train, optimizer, train_loss_fn, args, + lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir, + amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn) + print("prof done...") + else: + try: + for epoch in range(start_epoch, num_epochs): + if args.distributed and hasattr(loader_train.sampler, 'set_epoch'): + loader_train.sampler.set_epoch(epoch) + + train_metrics = train_one_epoch( + epoch, model, loader_train, optimizer, train_loss_fn, args, + lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir, + amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn) + + if args.distributed and args.dist_bn in ('broadcast', 'reduce'): + if args.local_rank == 0: + _logger.info("Distributing BatchNorm running means and vars") + distribute_bn(model, args.world_size, args.dist_bn == 'reduce') + + eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast) + + if model_ema is not None and not args.model_ema_force_cpu: + if args.distributed and args.dist_bn in ('broadcast', 'reduce'): + distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce') + ema_eval_metrics = validate( + model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, + log_suffix=' (EMA)') + eval_metrics = ema_eval_metrics + + if lr_scheduler is not None: + # step LR for next epoch + lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) + + if output_dir is not None: + update_summary( + epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'), + write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb) + + if saver is not None: + # save proper checkpoint with eval metric + save_metric = eval_metrics[eval_metric] + best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric) + + except KeyboardInterrupt: + pass + if best_metric is not None: + _logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) + + total_time = time.time() - start_time + print('Training time: ', total_time) + + +def train_one_epoch( + epoch, model, loader, optimizer, loss_fn, args, + lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, + loss_scaler=None, model_ema=None, mixup_fn=None): + if args.mixup_off_epoch and epoch >= args.mixup_off_epoch: + if args.prefetcher and loader.mixup_enabled: + loader.mixup_enabled = False + elif mixup_fn is not None: + mixup_fn.mixup_enabled = False + + second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order + batch_time_m = AverageMeter() + data_time_m = AverageMeter() + losses_m = AverageMeter() + + model.train() + + end = time.time() + last_idx = len(loader) - 1 + num_updates = epoch * len(loader) + for batch_idx, (input, target) in enumerate(loader): + # ================================================== + if args.performance_1p and batch_idx > 5000: + break + last_batch = batch_idx == last_idx + data_time_m.update(time.time() - end) + + if lr_scheduler is not None: + lr_scheduler.step_frac(epoch + (batch_idx + 1) / len(loader)) + + if not args.prefetcher: + input, target = input.npu(), target.npu() + if mixup_fn is not None: + input, target = mixup_fn(input, target) + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + with amp_autocast(): + output = model(input) + loss = loss_fn(output, target) + + if not args.distributed: + losses_m.update(loss.item(), input.size(0)) + + optimizer.zero_grad() + if loss_scaler is not None: + loss_scaler( + loss, optimizer, + clip_grad=args.clip_grad, clip_mode=args.clip_mode, + parameters=model_parameters(model, exclude_head='agc' in args.clip_mode), + create_graph=second_order) + else: + loss.backward(create_graph=second_order) + if args.clip_grad is not None: + dispatch_clip_grad( + model_parameters(model, exclude_head='agc' in args.clip_mode), + value=args.clip_grad, mode=args.clip_mode) + optimizer.step() + + if model_ema is not None: + model_ema.update(model) + + torch.npu.synchronize() + num_updates += 1 + batch_time_m.update(time.time() - end) + if last_batch or batch_idx % args.log_interval == 0: + lrl = [param_group['lr'] for param_group in optimizer.param_groups] + lr = sum(lrl) / len(lrl) + + if args.distributed: + reduced_loss = reduce_tensor(loss.data, args.world_size) + losses_m.update(reduced_loss.item(), input.size(0)) + + if args.local_rank == 0: + _logger.info( + 'Train: {} [{:>4d}/{} ({:>3.0f}%)] ' + 'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) ' + 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' + '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' + 'LR: {lr:.3e} ' + 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( + epoch, + batch_idx, len(loader), + 100. * batch_idx / last_idx, + loss=losses_m, + batch_time=batch_time_m, + rate=input.size(0) * args.world_size / batch_time_m.val, + rate_avg=input.size(0) * args.world_size / batch_time_m.avg, + lr=lr, + data_time=data_time_m)) + + if args.save_images and output_dir: + torchvision.utils.save_image( + input, + os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx), + padding=0, + normalize=True) + + if saver is not None and args.recovery_interval and ( + last_batch or (batch_idx + 1) % args.recovery_interval == 0): + saver.save_recovery(epoch, batch_idx=batch_idx) + + if lr_scheduler is not None: + lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg) + + end = time.time() + # end for + + if hasattr(optimizer, 'sync_lookahead'): + optimizer.sync_lookahead() + + return OrderedDict([('loss', losses_m.avg)]) + + +def profiling(model, loader, optimizer, loss_fn, args, + lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, + loss_scaler=None, model_ema=None, mixup_fn=None): + def update(input, target, model, optimizer, loss_fn, args, second_order, + amp_autocast=suppress, loss_scaler=None, mixup_fn=None): + + if not args.prefetcher: + input, target = input.npu(), target.npu() + if mixup_fn is not None: + input, target = mixup_fn(input, target) + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + with amp_autocast(): + output = model(input) + loss = loss_fn(output, target) + optimizer.zero_grad() + if loss_scaler is not None: + loss_scaler( + loss, optimizer, + clip_grad=args.clip_grad, clip_mode=args.clip_mode, + parameters=model_parameters(model, exclude_head='agc' in args.clip_mode), + create_graph=second_order) + else: + loss.backward(create_graph=second_order) + if args.clip_grad is not None: + dispatch_clip_grad( + model_parameters(model, exclude_head='agc' in args.clip_mode), + value=args.clip_grad, mode=args.clip_mode) + optimizer.step() + + # ================================================================================= + second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order + + model.train() + last_idx = len(loader) - 1 + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + + if not args.prefetcher: + input, target = input.npu(), target.npu() + if mixup_fn is not None: + input, target = mixup_fn(input, target) + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + if batch_idx < 200: + update(input, target, model, optimizer, loss_fn, args, second_order, amp_autocast, loss_scaler, mixup_fn) + else: + if args.device == 'npu': + with torch.autograd.profiler.profile(use_npu=True) as prof: + update(input, target, model, optimizer, loss_fn, args, second_order, amp_autocast, loss_scaler, + mixup_fn) + else: + with torch.autograd.profiler.profile(use_cuda=True) as prof: + update(input, target, model, optimizer, loss_fn, args, second_order, amp_autocast, loss_scaler, + mixup_fn) + prof.export_chrome_trace("output.prof") + break + + if last_batch or batch_idx % args.log_interval == 0: + lrl = [param_group['lr'] for param_group in optimizer.param_groups] + lr = sum(lrl) / len(lrl) + + # end for + + +def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''): + batch_time_m = AverageMeter() + losses_m = AverageMeter() + top1_m = AverageMeter() + top5_m = AverageMeter() + + model.eval() + + end = time.time() + last_idx = len(loader) - 1 + with torch.no_grad(): + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + if not args.prefetcher: + input = input.npu() + target = target.npu() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + with amp_autocast(): + output = model(input) + if isinstance(output, (tuple, list)): + output = output[0] + + # augmentation reduction + reduce_factor = args.tta + if reduce_factor > 1: + output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) + target = target[0:target.size(0):reduce_factor] + + loss = loss_fn(output, target) + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + + if args.distributed: + reduced_loss = reduce_tensor(loss.data, args.world_size) + acc1 = reduce_tensor(acc1, args.world_size) + acc5 = reduce_tensor(acc5, args.world_size) + else: + reduced_loss = loss.data + + torch.npu.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + top1_m.update(acc1.item(), output.size(0)) + top5_m.update(acc5.item(), output.size(0)) + + batch_time_m.update(time.time() - end) + end = time.time() + if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0): + log_name = 'Test' + log_suffix + _logger.info( + '{0}: [{1:>4d}/{2}] ' + 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' + 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' + 'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( + log_name, batch_idx, last_idx, batch_time=batch_time_m, + loss=losses_m, top1=top1_m, top5=top5_m)) + + metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)]) + + return metrics + + +if __name__ == '__main__': + main() diff --git a/PyTorch/contrib/cv/classification/convmixer/validate_npu.py b/PyTorch/contrib/cv/classification/convmixer/validate_npu.py new file mode 100644 index 0000000000..f820fd6d9a --- /dev/null +++ b/PyTorch/contrib/cv/classification/convmixer/validate_npu.py @@ -0,0 +1,352 @@ +#!/usr/bin/env python3 +""" ImageNet Validation Script + +This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained +models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes +canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit. + +Hacked together by Ross Wightman (https://github.com/rwightman) +""" +import argparse +import os +import csv +import glob +import time +import logging +import torch +import torch.nn as nn +import torch.nn.parallel +from collections import OrderedDict +from contextlib import suppress + +from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models +from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet +from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_legacy + +has_apex = False +try: + from apex import amp + has_apex = True +except ImportError: + pass + +has_native_amp = False +try: + if getattr(torch.npu.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('validate') + + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--dataset', '-d', metavar='NAME', default='', + help='dataset type (default: ImageFolder/ImageTar if empty)') +parser.add_argument('--split', metavar='NAME', default='validation', + help='dataset split (default: validation)') +parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', + help='model architecture (default: dpn92)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 2)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--crop-pct', default=None, type=float, + metavar='N', help='Input image center crop pct') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('--num-classes', type=int, default=None, + help='Number classes in dataset') +parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', + help='path to class to idx mapping file (default: "")') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--log-freq', default=10, type=int, + metavar='N', help='batch logging frequency (default: 10)') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--num-gpu', type=int, default=1, + help='Number of GPUS to use') +parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true', + help='disable test time pool') +parser.add_argument('--no-prefetcher', action='store_true', default=False, + help='disable fast prefetcher') +parser.add_argument('--pin-mem', action='store_true', default=False, + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--amp', action='store_true', default=False, + help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.') +parser.add_argument('--apex-amp', action='store_true', default=False, + help='Use NVIDIA Apex AMP mixed precision') +parser.add_argument('--native-amp', action='store_true', default=False, + help='Use Native Torch AMP mixed precision') +parser.add_argument('--tf-preprocessing', action='store_true', default=False, + help='Use Tensorflow preprocessing pipeline (require CPU TF installed') +parser.add_argument('--use-ema', dest='use_ema', action='store_true', + help='use ema version of weights if present') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') +parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true', + help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance') +parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', + help='Output csv file for validation results (summary)') +parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME', + help='Real labels JSON file for imagenet evaluation') +parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME', + help='Valid label indices txt file for validation of partial label space') + + +def validate(args): + # might as well try to validate something + args.pretrained = args.pretrained or not args.checkpoint + args.prefetcher = not args.no_prefetcher + amp_autocast = suppress # do nothing + if args.amp: + if has_apex: + args.apex_amp = True + elif has_native_amp: + args.native_amp = True + else: + _logger.warning("Neither APEX or Native Torch AMP is available.") + assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set." + if args.native_amp: + amp_autocast = torch.npu.amp.autocast + _logger.info('Validating in mixed precision with native PyTorch AMP.') + elif args.apex_amp: + _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') + else: + _logger.info('Validating in float32. AMP not enabled.') + + if args.legacy_jit: + set_jit_legacy() + + # create model + model = create_model( + args.model, + pretrained=args.pretrained, + num_classes=args.num_classes, + in_chans=3, + global_pool=args.gp, + scriptable=args.torchscript) + if args.num_classes is None: + assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' + args.num_classes = model.num_classes + + if args.checkpoint: + load_checkpoint(model, args.checkpoint, args.use_ema) + + param_count = sum([m.numel() for m in model.parameters()]) + _logger.info('Model %s created, param count: %d' % (args.model, param_count)) + + data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True) + test_time_pool = False + if not args.no_test_pool: + model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True) + + if args.torchscript: + torch.jit.optimized_execution(True) + model = torch.jit.script(model) + + model = model.npu() + if args.apex_amp: + model = amp.initialize(model, opt_level='O1') + + if args.channels_last: + model = model.to(memory_format=torch.channels_last) + + if args.num_gpu > 1: + model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) + + criterion = nn.CrossEntropyLoss().npu() + + args.data = os.path.join(args.data, "val") + dataset = create_dataset( + root=args.data, name=args.dataset, split=args.split, + load_bytes=args.tf_preprocessing, class_map=args.class_map) + + if args.valid_labels: + with open(args.valid_labels, 'r') as f: + valid_labels = {int(line.rstrip()) for line in f} + valid_labels = [i in valid_labels for i in range(args.num_classes)] + else: + valid_labels = None + + if args.real_labels: + real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels) + else: + real_labels = None + + crop_pct = 1.0 if test_time_pool else data_config['crop_pct'] + loader = create_loader( + dataset, + input_size=data_config['input_size'], + batch_size=args.batch_size, + use_prefetcher=args.prefetcher, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + crop_pct=crop_pct, + pin_memory=args.pin_mem, + tf_preprocessing=args.tf_preprocessing) + + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + model.eval() + with torch.no_grad(): + # warmup, reduce variability of first batch time, especially for comparing torchscript vs non + input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).npu() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + model(input) + end = time.time() + for batch_idx, (input, target) in enumerate(loader): + if args.no_prefetcher: + target = target.npu() + input = input.npu() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + # compute output + with amp_autocast(): + output = model(input) + + if valid_labels is not None: + output = output[:, valid_labels] + loss = criterion(output, target) + + if real_labels is not None: + real_labels.add_result(output) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1.item(), input.size(0)) + top5.update(acc5.item(), input.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if batch_idx % args.log_freq == 0: + _logger.info( + 'Test: [{0:>4d}/{1}] ' + 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' + 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' + 'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) ' + 'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format( + batch_idx, len(loader), batch_time=batch_time, + rate_avg=input.size(0) / batch_time.avg, + loss=losses, top1=top1, top5=top5)) + + if real_labels is not None: + # real labels mode replaces topk values at the end + top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5) + else: + top1a, top5a = top1.avg, top5.avg + results = OrderedDict( + top1=round(top1a, 4), top1_err=round(100 - top1a, 4), + top5=round(top5a, 4), top5_err=round(100 - top5a, 4), + param_count=round(param_count / 1e6, 2), + img_size=data_config['input_size'][-1], + cropt_pct=crop_pct, + interpolation=data_config['interpolation']) + + _logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format( + results['top1'], results['top1_err'], results['top5'], results['top5_err'])) + + return results + + +def main(): + setup_default_logging() + args = parser.parse_args() + model_cfgs = [] + model_names = [] + if os.path.isdir(args.checkpoint): + # validate all checkpoints in a path with same model + checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') + checkpoints += glob.glob(args.checkpoint + '/*.pth') + model_names = list_models(args.model) + model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] + else: + if args.model == 'all': + # validate all models in a list of names with pretrained checkpoints + args.pretrained = True + model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k']) + model_cfgs = [(n, '') for n in model_names] + elif not is_model(args.model): + # model name doesn't exist, try as wildcard filter + model_names = list_models(args.model) + model_cfgs = [(n, '') for n in model_names] + + if not model_cfgs and os.path.isfile(args.model): + with open(args.model) as f: + model_names = [line.rstrip() for line in f] + model_cfgs = [(n, None) for n in model_names if n] + + if len(model_cfgs): + results_file = args.results_file or './results-all.csv' + _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) + results = [] + try: + start_batch_size = args.batch_size + for m, c in model_cfgs: + batch_size = start_batch_size + args.model = m + args.checkpoint = c + result = OrderedDict(model=args.model) + r = {} + while not r and batch_size >= args.num_gpu: + torch.npu.empty_cache() + try: + args.batch_size = batch_size + print('Validating with batch size: %d' % args.batch_size) + r = validate(args) + except RuntimeError as e: + if batch_size <= args.num_gpu: + print("Validation failed with no ability to reduce batch size. Exiting.") + raise e + batch_size = max(batch_size // 2, args.num_gpu) + print("Validation failed, reducing batch size by 50%") + result.update(r) + if args.checkpoint: + result['checkpoint'] = args.checkpoint + results.append(result) + except KeyboardInterrupt as e: + pass + results = sorted(results, key=lambda x: x['top1'], reverse=True) + if len(results): + write_results(results_file, results) + else: + validate(args) + + +def write_results(results_file, results): + with open(results_file, mode='w') as cf: + dw = csv.DictWriter(cf, fieldnames=results[0].keys()) + dw.writeheader() + for r in results: + dw.writerow(r) + cf.flush() + + +if __name__ == '__main__': + main() -- Gitee